summaryrefslogtreecommitdiff
path: root/rust
diff options
context:
space:
mode:
Diffstat (limited to 'rust')
-rw-r--r--rust/.kunitconfig3
-rw-r--r--rust/Makefile379
-rw-r--r--rust/alloc/README.md36
-rw-r--r--rust/alloc/alloc.rs452
-rw-r--r--rust/alloc/boxed.rs2463
-rw-r--r--rust/alloc/collections/mod.rs160
-rw-r--r--rust/alloc/lib.rs288
-rw-r--r--rust/alloc/raw_vec.rs611
-rw-r--r--rust/alloc/slice.rs890
-rw-r--r--rust/alloc/vec/drain.rs255
-rw-r--r--rust/alloc/vec/extract_if.rs115
-rw-r--r--rust/alloc/vec/into_iter.rs454
-rw-r--r--rust/alloc/vec/is_zero.rs204
-rw-r--r--rust/alloc/vec/mod.rs3683
-rw-r--r--rust/alloc/vec/partial_eq.rs49
-rw-r--r--rust/alloc/vec/set_len_on_drop.rs35
-rw-r--r--rust/alloc/vec/spec_extend.rs119
-rw-r--r--rust/bindgen_parameters10
-rw-r--r--rust/bindings/bindings_helper.h45
-rw-r--r--rust/bindings/lib.rs8
-rw-r--r--rust/compiler_builtins.rs27
-rw-r--r--rust/exports.c8
-rw-r--r--rust/ffi.rs48
-rw-r--r--rust/helpers.c180
-rw-r--r--rust/helpers/auxiliary.c23
-rw-r--r--rust/helpers/blk.c14
-rw-r--r--rust/helpers/bug.c8
-rw-r--r--rust/helpers/build_assert.c25
-rw-r--r--rust/helpers/build_bug.c8
-rw-r--r--rust/helpers/clk.c66
-rw-r--r--rust/helpers/cpufreq.c10
-rw-r--r--rust/helpers/cpumask.c70
-rw-r--r--rust/helpers/cred.c13
-rw-r--r--rust/helpers/device.c10
-rw-r--r--rust/helpers/dma.c16
-rw-r--r--rust/helpers/drm.c23
-rw-r--r--rust/helpers/err.c18
-rw-r--r--rust/helpers/fs.c12
-rw-r--r--rust/helpers/helpers.c45
-rw-r--r--rust/helpers/io.c101
-rw-r--r--rust/helpers/jump_label.c14
-rw-r--r--rust/helpers/kunit.c8
-rw-r--r--rust/helpers/mm.c50
-rw-r--r--rust/helpers/mutex.c24
-rw-r--r--rust/helpers/page.c19
-rw-r--r--rust/helpers/pci.c23
-rw-r--r--rust/helpers/pid_namespace.c26
-rw-r--r--rust/helpers/platform.c18
-rw-r--r--rust/helpers/rbtree.c9
-rw-r--r--rust/helpers/rcu.c13
-rw-r--r--rust/helpers/refcount.c18
-rw-r--r--rust/helpers/security.c20
-rw-r--r--rust/helpers/signal.c8
-rw-r--r--rust/helpers/slab.c15
-rw-r--r--rust/helpers/spinlock.c37
-rw-r--r--rust/helpers/sync.c13
-rw-r--r--rust/helpers/task.c56
-rw-r--r--rust/helpers/uaccess.c15
-rw-r--r--rust/helpers/vmalloc.c9
-rw-r--r--rust/helpers/wait.c8
-rw-r--r--rust/helpers/workqueue.c14
-rw-r--r--rust/kernel/.gitignore3
-rw-r--r--rust/kernel/alloc.rs224
-rw-r--r--rust/kernel/alloc/allocator.rs189
-rw-r--r--rust/kernel/alloc/allocator_test.rs113
-rw-r--r--rust/kernel/alloc/kbox.rs498
-rw-r--r--rust/kernel/alloc/kvec.rs916
-rw-r--r--rust/kernel/alloc/layout.rs110
-rw-r--r--rust/kernel/allocator.rs88
-rw-r--r--rust/kernel/auxiliary.rs360
-rw-r--r--rust/kernel/block.rs5
-rw-r--r--rust/kernel/block/mq.rs98
-rw-r--r--rust/kernel/block/mq/gen_disk.rs196
-rw-r--r--rust/kernel/block/mq/operations.rs246
-rw-r--r--rust/kernel/block/mq/raw_writer.rs55
-rw-r--r--rust/kernel/block/mq/request.rs262
-rw-r--r--rust/kernel/block/mq/tag_set.rs85
-rw-r--r--rust/kernel/build_assert.rs12
-rw-r--r--rust/kernel/clk.rs334
-rw-r--r--rust/kernel/configfs.rs1049
-rw-r--r--rust/kernel/cpu.rs30
-rw-r--r--rust/kernel/cpufreq.rs1321
-rw-r--r--rust/kernel/cpumask.rs330
-rw-r--r--rust/kernel/cred.rs90
-rw-r--r--rust/kernel/device.rs551
-rw-r--r--rust/kernel/device_id.rs165
-rw-r--r--rust/kernel/devres.rs237
-rw-r--r--rust/kernel/dma.rs391
-rw-r--r--rust/kernel/driver.rs188
-rw-r--r--rust/kernel/drm/device.rs200
-rw-r--r--rust/kernel/drm/driver.rs166
-rw-r--r--rust/kernel/drm/file.rs99
-rw-r--r--rust/kernel/drm/gem/mod.rs328
-rw-r--r--rust/kernel/drm/ioctl.rs162
-rw-r--r--rust/kernel/drm/mod.rs19
-rw-r--r--rust/kernel/error.rs227
-rw-r--r--rust/kernel/faux.rs77
-rw-r--r--rust/kernel/firmware.rs337
-rw-r--r--rust/kernel/fs.rs8
-rw-r--r--rust/kernel/fs/file.rs465
-rw-r--r--rust/kernel/generated_arch_static_branch_asm.rs.S7
-rw-r--r--rust/kernel/init.rs1336
-rw-r--r--rust/kernel/io.rs260
-rw-r--r--rust/kernel/ioctl.rs2
-rw-r--r--rust/kernel/jump_label.rs74
-rw-r--r--rust/kernel/kunit.rs175
-rw-r--r--rust/kernel/lib.rs152
-rw-r--r--rust/kernel/list.rs977
-rw-r--r--rust/kernel/list/arc.rs521
-rw-r--r--rust/kernel/list/arc_field.rs96
-rw-r--r--rust/kernel/list/impl_list_item_mod.rs274
-rw-r--r--rust/kernel/miscdevice.rs364
-rw-r--r--rust/kernel/mm.rs344
-rw-r--r--rust/kernel/mm/virt.rs471
-rw-r--r--rust/kernel/net/phy.rs149
-rw-r--r--rust/kernel/net/phy/reg.rs224
-rw-r--r--rust/kernel/of.rs60
-rw-r--r--rust/kernel/opp.rs1146
-rw-r--r--rust/kernel/page.rs258
-rw-r--r--rust/kernel/pci.rs483
-rw-r--r--rust/kernel/pid_namespace.rs68
-rw-r--r--rust/kernel/platform.rs246
-rw-r--r--rust/kernel/prelude.rs15
-rw-r--r--rust/kernel/print.rs44
-rw-r--r--rust/kernel/rbtree.rs1286
-rw-r--r--rust/kernel/revocable.rs247
-rw-r--r--rust/kernel/security.rs74
-rw-r--r--rust/kernel/seq_file.rs53
-rw-r--r--rust/kernel/sizes.rs26
-rw-r--r--rust/kernel/std_vendor.rs47
-rw-r--r--rust/kernel/str.rs268
-rw-r--r--rust/kernel/sync.rs71
-rw-r--r--rust/kernel/sync/arc.rs355
-rw-r--r--rust/kernel/sync/arc/std_vendor.rs2
-rw-r--r--rust/kernel/sync/condvar.rs44
-rw-r--r--rust/kernel/sync/lock.rs109
-rw-r--r--rust/kernel/sync/lock/global.rs302
-rw-r--r--rust/kernel/sync/lock/mutex.rs32
-rw-r--r--rust/kernel/sync/lock/spinlock.rs32
-rw-r--r--rust/kernel/sync/locked_by.rs22
-rw-r--r--rust/kernel/sync/poll.rs121
-rw-r--r--rust/kernel/sync/rcu.rs52
-rw-r--r--rust/kernel/task.rs284
-rw-r--r--rust/kernel/time.rs135
-rw-r--r--rust/kernel/time/hrtimer.rs520
-rw-r--r--rust/kernel/time/hrtimer/arc.rs100
-rw-r--r--rust/kernel/time/hrtimer/pin.rs104
-rw-r--r--rust/kernel/time/hrtimer/pin_mut.rs108
-rw-r--r--rust/kernel/time/hrtimer/tbox.rs120
-rw-r--r--rust/kernel/tracepoint.rs49
-rw-r--r--rust/kernel/transmute.rs71
-rw-r--r--rust/kernel/types.rs281
-rw-r--r--rust/kernel/uaccess.rs371
-rw-r--r--rust/kernel/workqueue.rs140
-rw-r--r--rust/macros/export.rs29
-rw-r--r--rust/macros/helpers.rs91
-rw-r--r--rust/macros/kunit.rs156
-rw-r--r--rust/macros/lib.rs281
-rw-r--r--rust/macros/module.rs125
-rw-r--r--rust/macros/paste.rs15
-rw-r--r--rust/macros/quote.rs28
-rw-r--r--rust/pin-init/CONTRIBUTING.md72
-rw-r--r--rust/pin-init/README.md228
-rw-r--r--rust/pin-init/examples/big_struct_in_place.rs39
-rw-r--r--rust/pin-init/examples/error.rs27
-rw-r--r--rust/pin-init/examples/linked_list.rs161
-rw-r--r--rust/pin-init/examples/mutex.rs209
-rw-r--r--rust/pin-init/examples/pthread_mutex.rs178
-rw-r--r--rust/pin-init/examples/static_init.rs122
-rw-r--r--rust/pin-init/internal/src/helpers.rs152
-rw-r--r--rust/pin-init/internal/src/lib.rs48
-rw-r--r--rust/pin-init/internal/src/pin_data.rs (renamed from rust/macros/pin_data.rs)9
-rw-r--r--rust/pin-init/internal/src/pinned_drop.rs (renamed from rust/macros/pinned_drop.rs)10
-rw-r--r--rust/pin-init/internal/src/zeroable.rs (renamed from rust/macros/zeroable.rs)12
-rw-r--r--rust/pin-init/src/__internal.rs (renamed from rust/kernel/init/__internal.rs)86
-rw-r--r--rust/pin-init/src/alloc.rs156
-rw-r--r--rust/pin-init/src/lib.rs1483
-rw-r--r--rust/pin-init/src/macros.rs (renamed from rust/kernel/init/macros.rs)198
-rw-r--r--rust/uapi/lib.rs8
-rw-r--r--rust/uapi/uapi_helper.h3
180 files changed, 26418 insertions, 12315 deletions
diff --git a/rust/.kunitconfig b/rust/.kunitconfig
new file mode 100644
index 000000000000..9e72a5ab03c9
--- /dev/null
+++ b/rust/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_RUST=y
+CONFIG_RUST_KERNEL_DOCTESTS=y
diff --git a/rust/Makefile b/rust/Makefile
index 86a125c4243c..3aca903a7d08 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -3,21 +3,18 @@
# Where to place rustdoc generated documentation
rustdoc_output := $(objtree)/Documentation/output/rust/rustdoc
-obj-$(CONFIG_RUST) += core.o compiler_builtins.o
+obj-$(CONFIG_RUST) += core.o compiler_builtins.o ffi.o
always-$(CONFIG_RUST) += exports_core_generated.h
# Missing prototypes are expected in the helpers since these are exported
# for Rust only, thus there is no header nor prototypes.
-obj-$(CONFIG_RUST) += helpers.o
-CFLAGS_REMOVE_helpers.o = -Wmissing-prototypes -Wmissing-declarations
-
-always-$(CONFIG_RUST) += libmacros.so
-no-clean-files += libmacros.so
+obj-$(CONFIG_RUST) += helpers/helpers.o
+CFLAGS_REMOVE_helpers/helpers.o = -Wmissing-prototypes -Wmissing-declarations
always-$(CONFIG_RUST) += bindings/bindings_generated.rs bindings/bindings_helpers_generated.rs
-obj-$(CONFIG_RUST) += alloc.o bindings.o kernel.o
-always-$(CONFIG_RUST) += exports_alloc_generated.h exports_bindings_generated.h \
- exports_kernel_generated.h
+obj-$(CONFIG_RUST) += bindings.o pin_init.o kernel.o
+always-$(CONFIG_RUST) += exports_helpers_generated.h \
+ exports_bindings_generated.h exports_kernel_generated.h
always-$(CONFIG_RUST) += uapi/uapi_generated.rs
obj-$(CONFIG_RUST) += uapi.o
@@ -36,46 +33,43 @@ always-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated_kunit.c
obj-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated.o
obj-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated_kunit.o
-# Avoids running `$(RUSTC)` for the sysroot when it may not be available.
+always-$(subst y,$(CONFIG_RUST),$(CONFIG_JUMP_LABEL)) += kernel/generated_arch_static_branch_asm.rs
+
+# Avoids running `$(RUSTC)` when it may not be available.
ifdef CONFIG_RUST
+libmacros_name := $(shell MAKEFLAGS= $(RUSTC) --print file-names --crate-name macros --crate-type proc-macro - </dev/null)
+libmacros_extension := $(patsubst libmacros.%,%,$(libmacros_name))
+
+libpin_init_internal_name := $(shell MAKEFLAGS= $(RUSTC) --print file-names --crate-name pin_init_internal --crate-type proc-macro - </dev/null)
+libpin_init_internal_extension := $(patsubst libpin_init_internal.%,%,$(libpin_init_internal_name))
+
+always-$(CONFIG_RUST) += $(libmacros_name) $(libpin_init_internal_name)
+
# `$(rust_flags)` is passed in case the user added `--sysroot`.
rustc_sysroot := $(shell MAKEFLAGS= $(RUSTC) $(rust_flags) --print sysroot)
rustc_host_target := $(shell $(RUSTC) --version --verbose | grep -F 'host: ' | cut -d' ' -f2)
RUST_LIB_SRC ?= $(rustc_sysroot)/lib/rustlib/src/rust/library
-ifeq ($(quiet),silent_)
-cargo_quiet=-q
-rust_test_quiet=-q
-rustdoc_test_quiet=--test-args -q
-rustdoc_test_kernel_quiet=>/dev/null
-else ifeq ($(quiet),quiet_)
+ifneq ($(quiet),)
rust_test_quiet=-q
rustdoc_test_quiet=--test-args -q
rustdoc_test_kernel_quiet=>/dev/null
-else
-cargo_quiet=--verbose
endif
core-cfgs = \
--cfg no_fp_fmt_parse
-alloc-cfgs = \
- --cfg no_borrow \
- --cfg no_fmt \
- --cfg no_global_oom_handling \
- --cfg no_macros \
- --cfg no_rc \
- --cfg no_str \
- --cfg no_string \
- --cfg no_sync \
- --cfg no_thin
-
+# `rustc` recognizes `--remap-path-prefix` since 1.26.0, but `rustdoc` only
+# since Rust 1.81.0. Moreover, `rustdoc` ICEs on out-of-tree builds since Rust
+# 1.82.0 (https://github.com/rust-lang/rust/issues/138520). Thus workaround both
+# issues skipping the flag. The former also applies to `RUSTDOC TK`.
quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
cmd_rustdoc = \
OBJTREE=$(abspath $(objtree)) \
- $(RUSTDOC) $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags)) \
+ $(RUSTDOC) $(filter-out $(skip_flags) --remap-path-prefix=%,$(if $(rustdoc_host),$(rust_common_flags),$(rust_flags))) \
$(rustc_target_flags) -L$(objtree)/$(obj) \
+ -Zunstable-options --generate-link-to-definition \
--output $(rustdoc_output) \
--crate-name $(subst rustdoc-,,$@) \
$(if $(rustdoc_host),,--sysroot=/dev/null) \
@@ -93,7 +87,7 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
# command-like flags to solve the issue. Meanwhile, we use the non-custom case
# and then retouch the generated files.
rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \
- rustdoc-alloc rustdoc-kernel
+ rustdoc-kernel rustdoc-pin_init
$(Q)cp $(srctree)/Documentation/images/logo.svg $(rustdoc_output)/static.files/
$(Q)cp $(srctree)/Documentation/images/COPYING-logo $(rustdoc_output)/static.files/
$(Q)find $(rustdoc_output) -name '*.html' -type f -print0 | xargs -0 sed -Ei \
@@ -110,6 +104,9 @@ rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
rustdoc-macros: $(src)/macros/lib.rs FORCE
+$(call if_changed,rustdoc)
+# Starting with Rust 1.82.0, skipping `-Wrustdoc::unescaped_backticks` should
+# not be needed -- see https://github.com/rust-lang/rust/pull/128307.
+rustdoc-core: private skip_flags = -Wrustdoc::unescaped_backticks
rustdoc-core: private rustc_target_flags = $(core-cfgs)
rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
+$(call if_changed,rustdoc)
@@ -117,55 +114,87 @@ rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE
+$(call if_changed,rustdoc)
-# We need to allow `rustdoc::broken_intra_doc_links` because some
-# `no_global_oom_handling` functions refer to non-`no_global_oom_handling`
-# functions. Ideally `rustdoc` would have a way to distinguish broken links
-# due to things that are "configured out" vs. entirely non-existing ones.
-rustdoc-alloc: private rustc_target_flags = $(alloc-cfgs) \
- -Arustdoc::broken_intra_doc_links
-rustdoc-alloc: $(src)/alloc/lib.rs rustdoc-core rustdoc-compiler_builtins FORCE
+rustdoc-ffi: $(src)/ffi.rs rustdoc-core FORCE
+ +$(call if_changed,rustdoc)
+
+rustdoc-pin_init_internal: private rustdoc_host = yes
+rustdoc-pin_init_internal: private rustc_target_flags = --cfg kernel \
+ --extern proc_macro --crate-type proc-macro
+rustdoc-pin_init_internal: $(src)/pin-init/internal/src/lib.rs FORCE
+$(call if_changed,rustdoc)
-rustdoc-kernel: private rustc_target_flags = --extern alloc \
- --extern build_error --extern macros=$(objtree)/$(obj)/libmacros.so \
+rustdoc-pin_init: private rustdoc_host = yes
+rustdoc-pin_init: private rustc_target_flags = --extern pin_init_internal \
+ --extern macros --extern alloc --cfg kernel --cfg feature=\"alloc\"
+rustdoc-pin_init: $(src)/pin-init/src/lib.rs rustdoc-pin_init_internal \
+ rustdoc-macros FORCE
+ +$(call if_changed,rustdoc)
+
+rustdoc-kernel: private rustc_target_flags = --extern ffi --extern pin_init \
+ --extern build_error --extern macros \
--extern bindings --extern uapi
-rustdoc-kernel: $(src)/kernel/lib.rs rustdoc-core rustdoc-macros \
- rustdoc-compiler_builtins rustdoc-alloc $(obj)/libmacros.so \
+rustdoc-kernel: $(src)/kernel/lib.rs rustdoc-core rustdoc-ffi rustdoc-macros \
+ rustdoc-pin_init rustdoc-compiler_builtins $(obj)/$(libmacros_name) \
$(obj)/bindings.o FORCE
+$(call if_changed,rustdoc)
-quiet_cmd_rustc_test_library = RUSTC TL $<
+quiet_cmd_rustc_test_library = $(RUSTC_OR_CLIPPY_QUIET) TL $<
cmd_rustc_test_library = \
OBJTREE=$(abspath $(objtree)) \
- $(RUSTC) $(rust_common_flags) \
+ $(RUSTC_OR_CLIPPY) $(rust_common_flags) \
@$(objtree)/include/generated/rustc_cfg $(rustc_target_flags) \
--crate-type $(if $(rustc_test_library_proc),proc-macro,rlib) \
--out-dir $(objtree)/$(obj)/test --cfg testlib \
- --sysroot $(objtree)/$(obj)/test/sysroot \
-L$(objtree)/$(obj)/test \
--crate-name $(subst rusttest-,,$(subst rusttestlib-,,$@)) $<
-rusttestlib-build_error: $(src)/build_error.rs rusttest-prepare FORCE
+rusttestlib-build_error: $(src)/build_error.rs FORCE
+ +$(call if_changed,rustc_test_library)
+
+rusttestlib-ffi: $(src)/ffi.rs FORCE
+$(call if_changed,rustc_test_library)
rusttestlib-macros: private rustc_target_flags = --extern proc_macro
rusttestlib-macros: private rustc_test_library_proc = yes
-rusttestlib-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
+rusttestlib-macros: $(src)/macros/lib.rs FORCE
+ +$(call if_changed,rustc_test_library)
+
+rusttestlib-pin_init_internal: private rustc_target_flags = --cfg kernel \
+ --extern proc_macro
+rusttestlib-pin_init_internal: private rustc_test_library_proc = yes
+rusttestlib-pin_init_internal: $(src)/pin-init/internal/src/lib.rs FORCE
+ +$(call if_changed,rustc_test_library)
+
+rusttestlib-pin_init: private rustc_target_flags = --extern pin_init_internal \
+ --extern macros --cfg kernel
+rusttestlib-pin_init: $(src)/pin-init/src/lib.rs rusttestlib-macros \
+ rusttestlib-pin_init_internal $(obj)/$(libpin_init_internal_name) FORCE
+$(call if_changed,rustc_test_library)
-rusttestlib-bindings: $(src)/bindings/lib.rs rusttest-prepare FORCE
+rusttestlib-kernel: private rustc_target_flags = --extern ffi \
+ --extern build_error --extern macros --extern pin_init \
+ --extern bindings --extern uapi
+rusttestlib-kernel: $(src)/kernel/lib.rs rusttestlib-bindings rusttestlib-uapi \
+ rusttestlib-build_error rusttestlib-pin_init $(obj)/$(libmacros_name) \
+ $(obj)/bindings.o FORCE
+$(call if_changed,rustc_test_library)
-rusttestlib-uapi: $(src)/uapi/lib.rs rusttest-prepare FORCE
+rusttestlib-bindings: private rustc_target_flags = --extern ffi
+rusttestlib-bindings: $(src)/bindings/lib.rs rusttestlib-ffi FORCE
+ +$(call if_changed,rustc_test_library)
+
+rusttestlib-uapi: private rustc_target_flags = --extern ffi
+rusttestlib-uapi: $(src)/uapi/lib.rs rusttestlib-ffi FORCE
+$(call if_changed,rustc_test_library)
quiet_cmd_rustdoc_test = RUSTDOC T $<
cmd_rustdoc_test = \
+ RUST_MODFILE=test.rs \
OBJTREE=$(abspath $(objtree)) \
$(RUSTDOC) --test $(rust_common_flags) \
@$(objtree)/include/generated/rustc_cfg \
$(rustc_target_flags) $(rustdoc_test_target_flags) \
- --sysroot $(objtree)/$(obj)/test/sysroot $(rustdoc_test_quiet) \
+ $(rustdoc_test_quiet) \
-L$(objtree)/$(obj)/test --output $(rustdoc_output) \
--crate-name $(subst rusttest-,,$@) $<
@@ -174,9 +203,9 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
rm -rf $(objtree)/$(obj)/test/doctests/kernel; \
mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \
OBJTREE=$(abspath $(objtree)) \
- $(RUSTDOC) --test $(rust_flags) \
- -L$(objtree)/$(obj) --extern alloc --extern kernel \
- --extern build_error --extern macros \
+ $(RUSTDOC) --test $(filter-out --remap-path-prefix=%,$(rust_flags)) \
+ -L$(objtree)/$(obj) --extern ffi --extern pin_init \
+ --extern kernel --extern build_error --extern macros \
--extern bindings --extern uapi \
--no-run --crate-name kernel -Zunstable-options \
--sysroot=/dev/null \
@@ -192,13 +221,12 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
# We cannot use `-Zpanic-abort-tests` because some tests are dynamic,
# so for the moment we skip `-Cpanic=abort`.
-quiet_cmd_rustc_test = RUSTC T $<
+quiet_cmd_rustc_test = $(RUSTC_OR_CLIPPY_QUIET) T $<
cmd_rustc_test = \
OBJTREE=$(abspath $(objtree)) \
- $(RUSTC) --test $(rust_common_flags) \
+ $(RUSTC_OR_CLIPPY) --test $(rust_common_flags) \
@$(objtree)/include/generated/rustc_cfg \
$(rustc_target_flags) --out-dir $(objtree)/$(obj)/test \
- --sysroot $(objtree)/$(obj)/test/sysroot \
-L$(objtree)/$(obj)/test \
--crate-name $(subst rusttest-,,$@) $<; \
$(objtree)/$(obj)/test/$(subst rusttest-,,$@) $(rust_test_quiet) \
@@ -206,68 +234,20 @@ quiet_cmd_rustc_test = RUSTC T $<
rusttest: rusttest-macros rusttest-kernel
-# This prepares a custom sysroot with our custom `alloc` instead of
-# the standard one.
-#
-# This requires several hacks:
-# - Unlike `core` and `alloc`, `std` depends on more than a dozen crates,
-# including third-party crates that need to be downloaded, plus custom
-# `build.rs` steps. Thus hardcoding things here is not maintainable.
-# - `cargo` knows how to build the standard library, but it is an unstable
-# feature so far (`-Zbuild-std`).
-# - `cargo` only considers the use case of building the standard library
-# to use it in a given package. Thus we need to create a dummy package
-# and pick the generated libraries from there.
-# - Since we only keep a subset of upstream `alloc` in-tree, we need
-# to recreate it on the fly by putting our sources on top.
-# - The usual ways of modifying the dependency graph in `cargo` do not seem
-# to apply for the `-Zbuild-std` steps, thus we have to mislead it
-# by modifying the sources in the sysroot.
-# - To avoid messing with the user's Rust installation, we create a clone
-# of the sysroot. However, `cargo` ignores `RUSTFLAGS` in the `-Zbuild-std`
-# steps, thus we use a wrapper binary passed via `RUSTC` to pass the flag.
-#
-# In the future, we hope to avoid the whole ordeal by either:
-# - Making the `test` crate not depend on `std` (either improving upstream
-# or having our own custom crate).
-# - Making the tests run in kernel space (requires the previous point).
-# - Making `std` and friends be more like a "normal" crate, so that
-# `-Zbuild-std` and related hacks are not needed.
-quiet_cmd_rustsysroot = RUSTSYSROOT
- cmd_rustsysroot = \
- rm -rf $(objtree)/$(obj)/test; \
- mkdir -p $(objtree)/$(obj)/test; \
- cp -a $(rustc_sysroot) $(objtree)/$(obj)/test/sysroot; \
- cp -r $(srctree)/$(src)/alloc/* \
- $(objtree)/$(obj)/test/sysroot/lib/rustlib/src/rust/library/alloc/src; \
- echo '\#!/bin/sh' > $(objtree)/$(obj)/test/rustc_sysroot; \
- echo "$(RUSTC) --sysroot=$(abspath $(objtree)/$(obj)/test/sysroot) \"\$$@\"" \
- >> $(objtree)/$(obj)/test/rustc_sysroot; \
- chmod u+x $(objtree)/$(obj)/test/rustc_sysroot; \
- $(CARGO) -q new $(objtree)/$(obj)/test/dummy; \
- RUSTC=$(objtree)/$(obj)/test/rustc_sysroot $(CARGO) $(cargo_quiet) \
- test -Zbuild-std --target $(rustc_host_target) \
- --manifest-path $(objtree)/$(obj)/test/dummy/Cargo.toml; \
- rm $(objtree)/$(obj)/test/sysroot/lib/rustlib/$(rustc_host_target)/lib/*; \
- cp $(objtree)/$(obj)/test/dummy/target/$(rustc_host_target)/debug/deps/* \
- $(objtree)/$(obj)/test/sysroot/lib/rustlib/$(rustc_host_target)/lib
-
-rusttest-prepare: FORCE
- +$(call if_changed,rustsysroot)
-
-rusttest-macros: private rustc_target_flags = --extern proc_macro
+rusttest-macros: private rustc_target_flags = --extern proc_macro \
+ --extern macros --extern kernel --extern pin_init
rusttest-macros: private rustdoc_test_target_flags = --crate-type proc-macro
-rusttest-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
+rusttest-macros: $(src)/macros/lib.rs \
+ rusttestlib-macros rusttestlib-kernel rusttestlib-pin_init FORCE
+$(call if_changed,rustc_test)
+$(call if_changed,rustdoc_test)
-rusttest-kernel: private rustc_target_flags = --extern alloc \
+rusttest-kernel: private rustc_target_flags = --extern ffi --extern pin_init \
--extern build_error --extern macros --extern bindings --extern uapi
-rusttest-kernel: $(src)/kernel/lib.rs rusttest-prepare \
+rusttest-kernel: $(src)/kernel/lib.rs rusttestlib-ffi rusttestlib-kernel \
rusttestlib-build_error rusttestlib-macros rusttestlib-bindings \
- rusttestlib-uapi FORCE
+ rusttestlib-uapi rusttestlib-pin_init FORCE
+$(call if_changed,rustc_test)
- +$(call if_changed,rustc_test_library)
ifdef CONFIG_CC_IS_CLANG
bindgen_c_flags = $(c_flags)
@@ -284,19 +264,24 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
-mfunction-return=thunk-extern -mrecord-mcount -mabi=lp64 \
-mindirect-branch-cs-prefix -mstack-protector-guard% -mtraceback=no \
-mno-pointers-to-nested-functions -mno-string \
- -mno-strict-align -mstrict-align \
+ -mno-strict-align -mstrict-align -mdirect-extern-access \
+ -mexplicit-relocs -mno-check-zero-division \
-fconserve-stack -falign-jumps=% -falign-loops=% \
-femit-struct-debug-baseonly -fno-ipa-cp-clone -fno-ipa-sra \
-fno-partial-inlining -fplugin-arg-arm_ssp_per_task_plugin-% \
-fno-reorder-blocks -fno-allow-store-data-races -fasan-shadow-offset=% \
-fzero-call-used-regs=% -fno-stack-clash-protection \
-fno-inline-functions-called-once -fsanitize=bounds-strict \
- -fstrict-flex-arrays=% \
+ -fstrict-flex-arrays=% -fmin-function-alignment=% \
+ -fzero-init-padding-bits=% \
--param=% --param asan-%
# Derived from `scripts/Makefile.clang`.
BINDGEN_TARGET_x86 := x86_64-linux-gnu
BINDGEN_TARGET_arm64 := aarch64-linux-gnu
+BINDGEN_TARGET_arm := arm-linux-gnueabi
+BINDGEN_TARGET_loongarch := loongarch64-linux-gnusf
+BINDGEN_TARGET_um := $(BINDGEN_TARGET_$(SUBARCH))
BINDGEN_TARGET := $(BINDGEN_TARGET_$(SRCARCH))
# All warnings are inhibited since GCC builds are very experimental,
@@ -328,18 +313,35 @@ else
bindgen_c_flags_lto = $(bindgen_c_flags)
endif
-bindgen_c_flags_final = $(bindgen_c_flags_lto) -D__BINDGEN__
-
+# `-fno-builtin` is passed to avoid `bindgen` from using `clang` builtin
+# prototypes for functions like `memcpy` -- if this flag is not passed,
+# `bindgen`-generated prototypes use `c_ulong` or `c_uint` depending on
+# architecture instead of generating `usize`.
+bindgen_c_flags_final = $(bindgen_c_flags_lto) -fno-builtin -D__BINDGEN__
+
+# Each `bindgen` release may upgrade the list of Rust target versions. By
+# default, the highest stable release in their list is used. Thus we need to set
+# a `--rust-target` to avoid future `bindgen` releases emitting code that
+# `rustc` may not understand. On top of that, `bindgen` does not support passing
+# an unknown Rust target version.
+#
+# Therefore, the Rust target for `bindgen` can be only as high as the minimum
+# Rust version the kernel supports and only as high as the greatest stable Rust
+# target supported by the minimum `bindgen` version the kernel supports (that
+# is, if we do not test the actual `rustc`/`bindgen` versions running).
+#
+# Starting with `bindgen` 0.71.0, we will be able to set any future Rust version
+# instead, i.e. we will be able to set here our minimum supported Rust version.
quiet_cmd_bindgen = BINDGEN $@
cmd_bindgen = \
- $(BINDGEN) $< $(bindgen_target_flags) \
- --use-core --with-derive-default --ctypes-prefix core::ffi --no-layout-tests \
- --no-debug '.*' \
+ $(BINDGEN) $< $(bindgen_target_flags) --rust-target 1.68 \
+ --use-core --with-derive-default --ctypes-prefix ffi --no-layout-tests \
+ --no-debug '.*' --enable-function-attribute-detection \
-o $@ -- $(bindgen_c_flags_final) -DMODULE \
$(bindgen_target_cflags) $(bindgen_target_extra)
$(obj)/bindings/bindings_generated.rs: private bindgen_target_flags = \
- $(shell grep -Ev '^#|^$$' $(srctree)/$(src)/bindgen_parameters)
+ $(shell grep -Ev '^#|^$$' $(src)/bindgen_parameters)
$(obj)/bindings/bindings_generated.rs: private bindgen_target_extra = ; \
sed -Ei 's/pub const RUST_CONST_HELPER_([a-zA-Z0-9_]*)/pub const \1/g' $@
$(obj)/bindings/bindings_generated.rs: $(src)/bindings/bindings_helper.h \
@@ -347,7 +349,7 @@ $(obj)/bindings/bindings_generated.rs: $(src)/bindings/bindings_helper.h \
$(call if_changed_dep,bindgen)
$(obj)/uapi/uapi_generated.rs: private bindgen_target_flags = \
- $(shell grep -Ev '^#|^$$' $(srctree)/$(src)/bindgen_parameters)
+ $(shell grep -Ev '^#|^$$' $(src)/bindgen_parameters)
$(obj)/uapi/uapi_generated.rs: $(src)/uapi/uapi_helper.h \
$(src)/bindgen_parameters FORCE
$(call if_changed_dep,bindgen)
@@ -363,18 +365,28 @@ $(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_cflags = \
-I$(objtree)/$(obj) -Wno-missing-prototypes -Wno-missing-declarations
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_extra = ; \
sed -Ei 's/pub fn rust_helper_([a-zA-Z0-9_]*)/#[link_name="rust_helper_\1"]\n pub fn \1/g' $@
-$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE
+$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers/helpers.c FORCE
$(call if_changed_dep,bindgen)
+rust_exports = $(NM) -p --defined-only $(1) | awk '$$2~/(T|R|D|B)/ && $$3!~/__(pfx|cfi|odr_asan)/ { printf $(2),$$3 }'
+
quiet_cmd_exports = EXPORTS $@
cmd_exports = \
- $(NM) -p --defined-only $< \
- | awk '/ (T|R|D) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
+ $(call rust_exports,$<,"EXPORT_SYMBOL_RUST_GPL(%s);\n") > $@
$(obj)/exports_core_generated.h: $(obj)/core.o FORCE
$(call if_changed,exports)
-$(obj)/exports_alloc_generated.h: $(obj)/alloc.o FORCE
+# Even though Rust kernel modules should never use the bindings directly,
+# symbols from the `bindings` crate and the C helpers need to be exported
+# because Rust generics and inlined functions may not get their code generated
+# in the crate where they are defined. Other helpers, called from non-inline
+# functions, may not be exported, in principle. However, in general, the Rust
+# compiler does not guarantee codegen will be performed for a non-inline
+# function either. Therefore, we export all symbols from helpers and bindings.
+# In the future, this may be revisited to reduce the number of exports after
+# the compiler is informed about the places codegen is required.
+$(obj)/exports_helpers_generated.h: $(obj)/helpers/helpers.o FORCE
$(call if_changed,exports)
$(obj)/exports_bindings_generated.h: $(obj)/bindings.o FORCE
@@ -385,17 +397,19 @@ $(obj)/exports_kernel_generated.h: $(obj)/kernel.o FORCE
quiet_cmd_rustc_procmacro = $(RUSTC_OR_CLIPPY_QUIET) P $@
cmd_rustc_procmacro = \
- $(RUSTC_OR_CLIPPY) $(rust_common_flags) \
+ $(RUSTC_OR_CLIPPY) $(rust_common_flags) $(rustc_target_flags) \
-Clinker-flavor=gcc -Clinker=$(HOSTCC) \
- -Clink-args='$(call escsq,$(KBUILD_HOSTLDFLAGS))' \
+ -Clink-args='$(call escsq,$(KBUILD_PROCMACROLDFLAGS))' \
--emit=dep-info=$(depfile) --emit=link=$@ --extern proc_macro \
--crate-type proc-macro \
- --crate-name $(patsubst lib%.so,%,$(notdir $@)) $<
+ --crate-name $(patsubst lib%.$(libmacros_extension),%,$(notdir $@)) $<
# Procedural macros can only be used with the `rustc` that compiled it.
-# Therefore, to get `libmacros.so` automatically recompiled when the compiler
-# version changes, we add `core.o` as a dependency (even if it is not needed).
-$(obj)/libmacros.so: $(src)/macros/lib.rs $(obj)/core.o FORCE
+$(obj)/$(libmacros_name): $(src)/macros/lib.rs FORCE
+ +$(call if_changed_dep,rustc_procmacro)
+
+$(obj)/$(libpin_init_internal_name): private rustc_target_flags = --cfg kernel
+$(obj)/$(libpin_init_internal_name): $(src)/pin-init/internal/src/lib.rs FORCE
+$(call if_changed_dep,rustc_procmacro)
quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L $@
@@ -408,21 +422,29 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
--crate-type rlib -L$(objtree)/$(obj) \
--crate-name $(patsubst %.o,%,$(notdir $@)) $< \
--sysroot=/dev/null \
- $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@)
+ $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@) \
+ $(cmd_objtool)
rust-analyzer:
- $(Q)$(srctree)/scripts/generate_rust_analyzer.py \
- --cfgs='core=$(core-cfgs)' --cfgs='alloc=$(alloc-cfgs)' \
+ $(Q)MAKEFLAGS= $(srctree)/scripts/generate_rust_analyzer.py \
+ --cfgs='core=$(core-cfgs)' \
$(realpath $(srctree)) $(realpath $(objtree)) \
- $(RUST_LIB_SRC) $(KBUILD_EXTMOD) > \
- $(if $(KBUILD_EXTMOD),$(extmod_prefix),$(objtree))/rust-project.json
+ $(rustc_sysroot) $(RUST_LIB_SRC) $(if $(KBUILD_EXTMOD),$(srcroot)) \
+ > rust-project.json
redirect-intrinsics = \
- __addsf3 __eqsf2 __gesf2 __lesf2 __ltsf2 __mulsf3 __nesf2 __unordsf2 \
- __adddf3 __ledf2 __ltdf2 __muldf3 __unorddf2 \
+ __addsf3 __eqsf2 __extendsfdf2 __gesf2 __lesf2 __ltsf2 __mulsf3 __nesf2 __truncdfsf2 __unordsf2 \
+ __adddf3 __eqdf2 __ledf2 __ltdf2 __muldf3 __unorddf2 \
__muloti4 __multi3 \
__udivmodti4 __udivti3 __umodti3
+ifdef CONFIG_ARM
+ # Add eabi initrinsics for ARM 32-bit
+ redirect-intrinsics += \
+ __aeabi_fadd __aeabi_fmul __aeabi_fcmpeq __aeabi_fcmple __aeabi_fcmplt __aeabi_fcmpun \
+ __aeabi_dadd __aeabi_dmul __aeabi_dcmple __aeabi_dcmplt __aeabi_dcmpun \
+ __aeabi_uldivmod
+endif
ifneq ($(or $(CONFIG_ARM64),$(and $(CONFIG_RISCV),$(CONFIG_64BIT))),)
# These intrinsics are defined for ARM64 and RISCV64
redirect-intrinsics += \
@@ -430,44 +452,89 @@ ifneq ($(or $(CONFIG_ARM64),$(and $(CONFIG_RISCV),$(CONFIG_64BIT))),)
__ashlti3 __lshrti3
endif
+ifdef CONFIG_MODVERSIONS
+cmd_gendwarfksyms = $(if $(skip_gendwarfksyms),, \
+ $(call rust_exports,$@,"%s\n") | \
+ scripts/gendwarfksyms/gendwarfksyms \
+ $(if $(KBUILD_GENDWARFKSYMS_STABLE), --stable) \
+ $(if $(KBUILD_SYMTYPES), --symtypes $(@:.o=.symtypes),) \
+ $@ >> $(dot-target).cmd)
+endif
+
+define rule_rustc_library
+ $(call cmd_and_fixdep,rustc_library)
+ $(call cmd,gen_objtooldep)
+ $(call cmd,gendwarfksyms)
+endef
+
+define rule_rust_cc_library
+ $(call if_changed_rule,cc_o_c)
+ $(call cmd,force_checksrc)
+ $(call cmd,gendwarfksyms)
+endef
+
+# helpers.o uses the same export mechanism as Rust libraries, so ensure symbol
+# versions are calculated for the helpers too.
+$(obj)/helpers/helpers.o: $(src)/helpers/helpers.c $(recordmcount_source) FORCE
+ +$(call if_changed_rule,rust_cc_library)
+
+# Disable symbol versioning for exports.o to avoid conflicts with the actual
+# symbol versions generated from Rust objects.
+$(obj)/exports.o: private skip_gendwarfksyms = 1
+
$(obj)/core.o: private skip_clippy = 1
-$(obj)/core.o: private skip_flags = -Dunreachable_pub
+$(obj)/core.o: private skip_flags = -Wunreachable_pub
$(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--redefine-sym $(sym)=__rust$(sym))
$(obj)/core.o: private rustc_target_flags = $(core-cfgs)
-$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
- +$(call if_changed_dep,rustc_library)
-ifneq ($(or $(CONFIG_X86_64),$(CONFIG_LOONGARCH)),)
+$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs \
+ $(wildcard $(objtree)/include/config/RUSTC_VERSION_TEXT) FORCE
+ +$(call if_changed_rule,rustc_library)
+ifneq ($(or $(CONFIG_X86_64),$(CONFIG_X86_32)),)
$(obj)/core.o: scripts/target.json
endif
+$(obj)/compiler_builtins.o: private skip_gendwarfksyms = 1
$(obj)/compiler_builtins.o: private rustc_objcopy = -w -W '__*'
$(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
-$(obj)/alloc.o: private skip_clippy = 1
-$(obj)/alloc.o: private skip_flags = -Dunreachable_pub
-$(obj)/alloc.o: private rustc_target_flags = $(alloc-cfgs)
-$(obj)/alloc.o: $(src)/alloc/lib.rs $(obj)/compiler_builtins.o FORCE
- +$(call if_changed_dep,rustc_library)
+$(obj)/pin_init.o: private skip_gendwarfksyms = 1
+$(obj)/pin_init.o: private rustc_target_flags = --extern pin_init_internal \
+ --extern macros --cfg kernel
+$(obj)/pin_init.o: $(src)/pin-init/src/lib.rs $(obj)/compiler_builtins.o \
+ $(obj)/$(libpin_init_internal_name) $(obj)/$(libmacros_name) FORCE
+ +$(call if_changed_rule,rustc_library)
+$(obj)/build_error.o: private skip_gendwarfksyms = 1
$(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
+
+$(obj)/ffi.o: private skip_gendwarfksyms = 1
+$(obj)/ffi.o: $(src)/ffi.rs $(obj)/compiler_builtins.o FORCE
+ +$(call if_changed_rule,rustc_library)
+$(obj)/bindings.o: private rustc_target_flags = --extern ffi
$(obj)/bindings.o: $(src)/bindings/lib.rs \
- $(obj)/compiler_builtins.o \
+ $(obj)/ffi.o \
$(obj)/bindings/bindings_generated.rs \
$(obj)/bindings/bindings_helpers_generated.rs FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
+$(obj)/uapi.o: private rustc_target_flags = --extern ffi
+$(obj)/uapi.o: private skip_gendwarfksyms = 1
$(obj)/uapi.o: $(src)/uapi/lib.rs \
- $(obj)/compiler_builtins.o \
+ $(obj)/ffi.o \
$(obj)/uapi/uapi_generated.rs FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
-$(obj)/kernel.o: private rustc_target_flags = --extern alloc \
+$(obj)/kernel.o: private rustc_target_flags = --extern ffi --extern pin_init \
--extern build_error --extern macros --extern bindings --extern uapi
-$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/alloc.o $(obj)/build_error.o \
- $(obj)/libmacros.so $(obj)/bindings.o $(obj)/uapi.o FORCE
- +$(call if_changed_dep,rustc_library)
+$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/build_error.o $(obj)/pin_init.o \
+ $(obj)/$(libmacros_name) $(obj)/bindings.o $(obj)/uapi.o FORCE
+ +$(call if_changed_rule,rustc_library)
+
+ifdef CONFIG_JUMP_LABEL
+$(obj)/kernel.o: $(obj)/kernel/generated_arch_static_branch_asm.rs
+endif
endif # CONFIG_RUST
diff --git a/rust/alloc/README.md b/rust/alloc/README.md
deleted file mode 100644
index eb6f22e94ebf..000000000000
--- a/rust/alloc/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# `alloc`
-
-These source files come from the Rust standard library, hosted in
-the <https://github.com/rust-lang/rust> repository, licensed under
-"Apache-2.0 OR MIT" and adapted for kernel use. For copyright details,
-see <https://github.com/rust-lang/rust/blob/master/COPYRIGHT>.
-
-Please note that these files should be kept as close as possible to
-upstream. In general, only additions should be performed (e.g. new
-methods). Eventually, changes should make it into upstream so that,
-at some point, this fork can be dropped from the kernel tree.
-
-The Rust upstream version on top of which these files are based matches
-the output of `scripts/min-tool-version.sh rustc`.
-
-
-## Rationale
-
-On one hand, kernel folks wanted to keep `alloc` in-tree to have more
-freedom in both workflow and actual features if actually needed
-(e.g. receiver types if we ended up using them), which is reasonable.
-
-On the other hand, Rust folks wanted to keep `alloc` as close as
-upstream as possible and avoid as much divergence as possible, which
-is also reasonable.
-
-We agreed on a middle-ground: we would keep a subset of `alloc`
-in-tree that would be as small and as close as possible to upstream.
-Then, upstream can start adding the functions that we add to `alloc`
-etc., until we reach a point where the kernel already knows exactly
-what it needs in `alloc` and all the new methods are merged into
-upstream, so that we can drop `alloc` from the kernel tree and go back
-to using the upstream one.
-
-By doing this, the kernel can go a bit faster now, and Rust can
-slowly incorporate and discuss the changes as needed.
diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs
deleted file mode 100644
index abb791cc2371..000000000000
--- a/rust/alloc/alloc.rs
+++ /dev/null
@@ -1,452 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-//! Memory allocation APIs
-
-#![stable(feature = "alloc_module", since = "1.28.0")]
-
-#[cfg(not(test))]
-use core::intrinsics;
-
-#[cfg(not(test))]
-use core::ptr::{self, NonNull};
-
-#[stable(feature = "alloc_module", since = "1.28.0")]
-#[doc(inline)]
-pub use core::alloc::*;
-
-#[cfg(test)]
-mod tests;
-
-extern "Rust" {
- // These are the magic symbols to call the global allocator. rustc generates
- // them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
- // (the code expanding that attribute macro generates those functions), or to call
- // the default implementations in std (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
- // otherwise.
- // The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
- // like `malloc`, `realloc`, and `free`, respectively.
- #[rustc_allocator]
- #[rustc_nounwind]
- fn __rust_alloc(size: usize, align: usize) -> *mut u8;
- #[rustc_deallocator]
- #[rustc_nounwind]
- fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
- #[rustc_reallocator]
- #[rustc_nounwind]
- fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
- #[rustc_allocator_zeroed]
- #[rustc_nounwind]
- fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
-
- static __rust_no_alloc_shim_is_unstable: u8;
-}
-
-/// The global memory allocator.
-///
-/// This type implements the [`Allocator`] trait by forwarding calls
-/// to the allocator registered with the `#[global_allocator]` attribute
-/// if there is one, or the `std` crate’s default.
-///
-/// Note: while this type is unstable, the functionality it provides can be
-/// accessed through the [free functions in `alloc`](self#functions).
-#[unstable(feature = "allocator_api", issue = "32838")]
-#[derive(Copy, Clone, Default, Debug)]
-#[cfg(not(test))]
-pub struct Global;
-
-#[cfg(test)]
-pub use std::alloc::Global;
-
-/// Allocate memory with the global allocator.
-///
-/// This function forwards calls to the [`GlobalAlloc::alloc`] method
-/// of the allocator registered with the `#[global_allocator]` attribute
-/// if there is one, or the `std` crate’s default.
-///
-/// This function is expected to be deprecated in favor of the `alloc` method
-/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
-///
-/// # Safety
-///
-/// See [`GlobalAlloc::alloc`].
-///
-/// # Examples
-///
-/// ```
-/// use std::alloc::{alloc, dealloc, handle_alloc_error, Layout};
-///
-/// unsafe {
-/// let layout = Layout::new::<u16>();
-/// let ptr = alloc(layout);
-/// if ptr.is_null() {
-/// handle_alloc_error(layout);
-/// }
-///
-/// *(ptr as *mut u16) = 42;
-/// assert_eq!(*(ptr as *mut u16), 42);
-///
-/// dealloc(ptr, layout);
-/// }
-/// ```
-#[stable(feature = "global_alloc", since = "1.28.0")]
-#[must_use = "losing the pointer will leak memory"]
-#[inline]
-pub unsafe fn alloc(layout: Layout) -> *mut u8 {
- unsafe {
- // Make sure we don't accidentally allow omitting the allocator shim in
- // stable code until it is actually stabilized.
- core::ptr::read_volatile(&__rust_no_alloc_shim_is_unstable);
-
- __rust_alloc(layout.size(), layout.align())
- }
-}
-
-/// Deallocate memory with the global allocator.
-///
-/// This function forwards calls to the [`GlobalAlloc::dealloc`] method
-/// of the allocator registered with the `#[global_allocator]` attribute
-/// if there is one, or the `std` crate’s default.
-///
-/// This function is expected to be deprecated in favor of the `dealloc` method
-/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
-///
-/// # Safety
-///
-/// See [`GlobalAlloc::dealloc`].
-#[stable(feature = "global_alloc", since = "1.28.0")]
-#[inline]
-pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
- unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
-}
-
-/// Reallocate memory with the global allocator.
-///
-/// This function forwards calls to the [`GlobalAlloc::realloc`] method
-/// of the allocator registered with the `#[global_allocator]` attribute
-/// if there is one, or the `std` crate’s default.
-///
-/// This function is expected to be deprecated in favor of the `realloc` method
-/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
-///
-/// # Safety
-///
-/// See [`GlobalAlloc::realloc`].
-#[stable(feature = "global_alloc", since = "1.28.0")]
-#[must_use = "losing the pointer will leak memory"]
-#[inline]
-pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
- unsafe { __rust_realloc(ptr, layout.size(), layout.align(), new_size) }
-}
-
-/// Allocate zero-initialized memory with the global allocator.
-///
-/// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method
-/// of the allocator registered with the `#[global_allocator]` attribute
-/// if there is one, or the `std` crate’s default.
-///
-/// This function is expected to be deprecated in favor of the `alloc_zeroed` method
-/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
-///
-/// # Safety
-///
-/// See [`GlobalAlloc::alloc_zeroed`].
-///
-/// # Examples
-///
-/// ```
-/// use std::alloc::{alloc_zeroed, dealloc, Layout};
-///
-/// unsafe {
-/// let layout = Layout::new::<u16>();
-/// let ptr = alloc_zeroed(layout);
-///
-/// assert_eq!(*(ptr as *mut u16), 0);
-///
-/// dealloc(ptr, layout);
-/// }
-/// ```
-#[stable(feature = "global_alloc", since = "1.28.0")]
-#[must_use = "losing the pointer will leak memory"]
-#[inline]
-pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
- unsafe { __rust_alloc_zeroed(layout.size(), layout.align()) }
-}
-
-#[cfg(not(test))]
-impl Global {
- #[inline]
- fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
- match layout.size() {
- 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)),
- // SAFETY: `layout` is non-zero in size,
- size => unsafe {
- let raw_ptr = if zeroed { alloc_zeroed(layout) } else { alloc(layout) };
- let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
- Ok(NonNull::slice_from_raw_parts(ptr, size))
- },
- }
- }
-
- // SAFETY: Same as `Allocator::grow`
- #[inline]
- unsafe fn grow_impl(
- &self,
- ptr: NonNull<u8>,
- old_layout: Layout,
- new_layout: Layout,
- zeroed: bool,
- ) -> Result<NonNull<[u8]>, AllocError> {
- debug_assert!(
- new_layout.size() >= old_layout.size(),
- "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
- );
-
- match old_layout.size() {
- 0 => self.alloc_impl(new_layout, zeroed),
-
- // SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
- // as required by safety conditions. Other conditions must be upheld by the caller
- old_size if old_layout.align() == new_layout.align() => unsafe {
- let new_size = new_layout.size();
-
- // `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
- intrinsics::assume(new_size >= old_layout.size());
-
- let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
- let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
- if zeroed {
- raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
- }
- Ok(NonNull::slice_from_raw_parts(ptr, new_size))
- },
-
- // SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
- // both the old and new memory allocation are valid for reads and writes for `old_size`
- // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
- // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
- // for `dealloc` must be upheld by the caller.
- old_size => unsafe {
- let new_ptr = self.alloc_impl(new_layout, zeroed)?;
- ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
- self.deallocate(ptr, old_layout);
- Ok(new_ptr)
- },
- }
- }
-}
-
-#[unstable(feature = "allocator_api", issue = "32838")]
-#[cfg(not(test))]
-unsafe impl Allocator for Global {
- #[inline]
- fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
- self.alloc_impl(layout, false)
- }
-
- #[inline]
- fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
- self.alloc_impl(layout, true)
- }
-
- #[inline]
- unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
- if layout.size() != 0 {
- // SAFETY: `layout` is non-zero in size,
- // other conditions must be upheld by the caller
- unsafe { dealloc(ptr.as_ptr(), layout) }
- }
- }
-
- #[inline]
- unsafe fn grow(
- &self,
- ptr: NonNull<u8>,
- old_layout: Layout,
- new_layout: Layout,
- ) -> Result<NonNull<[u8]>, AllocError> {
- // SAFETY: all conditions must be upheld by the caller
- unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
- }
-
- #[inline]
- unsafe fn grow_zeroed(
- &self,
- ptr: NonNull<u8>,
- old_layout: Layout,
- new_layout: Layout,
- ) -> Result<NonNull<[u8]>, AllocError> {
- // SAFETY: all conditions must be upheld by the caller
- unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
- }
-
- #[inline]
- unsafe fn shrink(
- &self,
- ptr: NonNull<u8>,
- old_layout: Layout,
- new_layout: Layout,
- ) -> Result<NonNull<[u8]>, AllocError> {
- debug_assert!(
- new_layout.size() <= old_layout.size(),
- "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
- );
-
- match new_layout.size() {
- // SAFETY: conditions must be upheld by the caller
- 0 => unsafe {
- self.deallocate(ptr, old_layout);
- Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0))
- },
-
- // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
- new_size if old_layout.align() == new_layout.align() => unsafe {
- // `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
- intrinsics::assume(new_size <= old_layout.size());
-
- let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
- let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
- Ok(NonNull::slice_from_raw_parts(ptr, new_size))
- },
-
- // SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
- // both the old and new memory allocation are valid for reads and writes for `new_size`
- // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
- // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
- // for `dealloc` must be upheld by the caller.
- new_size => unsafe {
- let new_ptr = self.allocate(new_layout)?;
- ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
- self.deallocate(ptr, old_layout);
- Ok(new_ptr)
- },
- }
- }
-}
-
-/// The allocator for unique pointers.
-#[cfg(all(not(no_global_oom_handling), not(test)))]
-#[lang = "exchange_malloc"]
-#[inline]
-unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
- let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
- match Global.allocate(layout) {
- Ok(ptr) => ptr.as_mut_ptr(),
- Err(_) => handle_alloc_error(layout),
- }
-}
-
-// # Allocation error handler
-
-#[cfg(not(no_global_oom_handling))]
-extern "Rust" {
- // This is the magic symbol to call the global alloc error handler. rustc generates
- // it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
- // default implementations below (`__rdl_oom`) otherwise.
- fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
-}
-
-/// Signal a memory allocation error.
-///
-/// Callers of memory allocation APIs wishing to cease execution
-/// in response to an allocation error are encouraged to call this function,
-/// rather than directly invoking [`panic!`] or similar.
-///
-/// This function is guaranteed to diverge (not return normally with a value), but depending on
-/// global configuration, it may either panic (resulting in unwinding or aborting as per
-/// configuration for all panics), or abort the process (with no unwinding).
-///
-/// The default behavior is:
-///
-/// * If the binary links against `std` (typically the case), then
-/// print a message to standard error and abort the process.
-/// This behavior can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`].
-/// Future versions of Rust may panic by default instead.
-///
-/// * If the binary does not link against `std` (all of its crates are marked
-/// [`#![no_std]`][no_std]), then call [`panic!`] with a message.
-/// [The panic handler] applies as to any panic.
-///
-/// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html
-/// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html
-/// [The panic handler]: https://doc.rust-lang.org/reference/runtime.html#the-panic_handler-attribute
-/// [no_std]: https://doc.rust-lang.org/reference/names/preludes.html#the-no_std-attribute
-#[stable(feature = "global_alloc", since = "1.28.0")]
-#[rustc_const_unstable(feature = "const_alloc_error", issue = "92523")]
-#[cfg(all(not(no_global_oom_handling), not(test)))]
-#[cold]
-pub const fn handle_alloc_error(layout: Layout) -> ! {
- const fn ct_error(_: Layout) -> ! {
- panic!("allocation failed");
- }
-
- #[inline]
- fn rt_error(layout: Layout) -> ! {
- unsafe {
- __rust_alloc_error_handler(layout.size(), layout.align());
- }
- }
-
- #[cfg(not(feature = "panic_immediate_abort"))]
- unsafe {
- core::intrinsics::const_eval_select((layout,), ct_error, rt_error)
- }
-
- #[cfg(feature = "panic_immediate_abort")]
- ct_error(layout)
-}
-
-// For alloc test `std::alloc::handle_alloc_error` can be used directly.
-#[cfg(all(not(no_global_oom_handling), test))]
-pub use std::alloc::handle_alloc_error;
-
-#[cfg(all(not(no_global_oom_handling), not(test)))]
-#[doc(hidden)]
-#[allow(unused_attributes)]
-#[unstable(feature = "alloc_internals", issue = "none")]
-pub mod __alloc_error_handler {
- // called via generated `__rust_alloc_error_handler` if there is no
- // `#[alloc_error_handler]`.
- #[rustc_std_internal_symbol]
- pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! {
- extern "Rust" {
- // This symbol is emitted by rustc next to __rust_alloc_error_handler.
- // Its value depends on the -Zoom={panic,abort} compiler option.
- static __rust_alloc_error_handler_should_panic: u8;
- }
-
- if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
- panic!("memory allocation of {size} bytes failed")
- } else {
- core::panicking::panic_nounwind_fmt(
- format_args!("memory allocation of {size} bytes failed"),
- /* force_no_backtrace */ false,
- )
- }
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-/// Specialize clones into pre-allocated, uninitialized memory.
-/// Used by `Box::clone` and `Rc`/`Arc::make_mut`.
-pub(crate) trait WriteCloneIntoRaw: Sized {
- unsafe fn write_clone_into_raw(&self, target: *mut Self);
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Clone> WriteCloneIntoRaw for T {
- #[inline]
- default unsafe fn write_clone_into_raw(&self, target: *mut Self) {
- // Having allocated *first* may allow the optimizer to create
- // the cloned value in-place, skipping the local and move.
- unsafe { target.write(self.clone()) };
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Copy> WriteCloneIntoRaw for T {
- #[inline]
- unsafe fn write_clone_into_raw(&self, target: *mut Self) {
- // We can always copy in-place, without ever involving a local value.
- unsafe { target.copy_from_nonoverlapping(self, 1) };
- }
-}
diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs
deleted file mode 100644
index c93a22a5c97f..000000000000
--- a/rust/alloc/boxed.rs
+++ /dev/null
@@ -1,2463 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-//! The `Box<T>` type for heap allocation.
-//!
-//! [`Box<T>`], casually referred to as a 'box', provides the simplest form of
-//! heap allocation in Rust. Boxes provide ownership for this allocation, and
-//! drop their contents when they go out of scope. Boxes also ensure that they
-//! never allocate more than `isize::MAX` bytes.
-//!
-//! # Examples
-//!
-//! Move a value from the stack to the heap by creating a [`Box`]:
-//!
-//! ```
-//! let val: u8 = 5;
-//! let boxed: Box<u8> = Box::new(val);
-//! ```
-//!
-//! Move a value from a [`Box`] back to the stack by [dereferencing]:
-//!
-//! ```
-//! let boxed: Box<u8> = Box::new(5);
-//! let val: u8 = *boxed;
-//! ```
-//!
-//! Creating a recursive data structure:
-//!
-//! ```
-//! #[derive(Debug)]
-//! enum List<T> {
-//! Cons(T, Box<List<T>>),
-//! Nil,
-//! }
-//!
-//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
-//! println!("{list:?}");
-//! ```
-//!
-//! This will print `Cons(1, Cons(2, Nil))`.
-//!
-//! Recursive structures must be boxed, because if the definition of `Cons`
-//! looked like this:
-//!
-//! ```compile_fail,E0072
-//! # enum List<T> {
-//! Cons(T, List<T>),
-//! # }
-//! ```
-//!
-//! It wouldn't work. This is because the size of a `List` depends on how many
-//! elements are in the list, and so we don't know how much memory to allocate
-//! for a `Cons`. By introducing a [`Box<T>`], which has a defined size, we know how
-//! big `Cons` needs to be.
-//!
-//! # Memory layout
-//!
-//! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for
-//! its allocation. It is valid to convert both ways between a [`Box`] and a
-//! raw pointer allocated with the [`Global`] allocator, given that the
-//! [`Layout`] used with the allocator is correct for the type. More precisely,
-//! a `value: *mut T` that has been allocated with the [`Global`] allocator
-//! with `Layout::for_value(&*value)` may be converted into a box using
-//! [`Box::<T>::from_raw(value)`]. Conversely, the memory backing a `value: *mut
-//! T` obtained from [`Box::<T>::into_raw`] may be deallocated using the
-//! [`Global`] allocator with [`Layout::for_value(&*value)`].
-//!
-//! For zero-sized values, the `Box` pointer still has to be [valid] for reads
-//! and writes and sufficiently aligned. In particular, casting any aligned
-//! non-zero integer literal to a raw pointer produces a valid pointer, but a
-//! pointer pointing into previously allocated memory that since got freed is
-//! not valid. The recommended way to build a Box to a ZST if `Box::new` cannot
-//! be used is to use [`ptr::NonNull::dangling`].
-//!
-//! So long as `T: Sized`, a `Box<T>` is guaranteed to be represented
-//! as a single pointer and is also ABI-compatible with C pointers
-//! (i.e. the C type `T*`). This means that if you have extern "C"
-//! Rust functions that will be called from C, you can define those
-//! Rust functions using `Box<T>` types, and use `T*` as corresponding
-//! type on the C side. As an example, consider this C header which
-//! declares functions that create and destroy some kind of `Foo`
-//! value:
-//!
-//! ```c
-//! /* C header */
-//!
-//! /* Returns ownership to the caller */
-//! struct Foo* foo_new(void);
-//!
-//! /* Takes ownership from the caller; no-op when invoked with null */
-//! void foo_delete(struct Foo*);
-//! ```
-//!
-//! These two functions might be implemented in Rust as follows. Here, the
-//! `struct Foo*` type from C is translated to `Box<Foo>`, which captures
-//! the ownership constraints. Note also that the nullable argument to
-//! `foo_delete` is represented in Rust as `Option<Box<Foo>>`, since `Box<Foo>`
-//! cannot be null.
-//!
-//! ```
-//! #[repr(C)]
-//! pub struct Foo;
-//!
-//! #[no_mangle]
-//! pub extern "C" fn foo_new() -> Box<Foo> {
-//! Box::new(Foo)
-//! }
-//!
-//! #[no_mangle]
-//! pub extern "C" fn foo_delete(_: Option<Box<Foo>>) {}
-//! ```
-//!
-//! Even though `Box<T>` has the same representation and C ABI as a C pointer,
-//! this does not mean that you can convert an arbitrary `T*` into a `Box<T>`
-//! and expect things to work. `Box<T>` values will always be fully aligned,
-//! non-null pointers. Moreover, the destructor for `Box<T>` will attempt to
-//! free the value with the global allocator. In general, the best practice
-//! is to only use `Box<T>` for pointers that originated from the global
-//! allocator.
-//!
-//! **Important.** At least at present, you should avoid using
-//! `Box<T>` types for functions that are defined in C but invoked
-//! from Rust. In those cases, you should directly mirror the C types
-//! as closely as possible. Using types like `Box<T>` where the C
-//! definition is just using `T*` can lead to undefined behavior, as
-//! described in [rust-lang/unsafe-code-guidelines#198][ucg#198].
-//!
-//! # Considerations for unsafe code
-//!
-//! **Warning: This section is not normative and is subject to change, possibly
-//! being relaxed in the future! It is a simplified summary of the rules
-//! currently implemented in the compiler.**
-//!
-//! The aliasing rules for `Box<T>` are the same as for `&mut T`. `Box<T>`
-//! asserts uniqueness over its content. Using raw pointers derived from a box
-//! after that box has been mutated through, moved or borrowed as `&mut T`
-//! is not allowed. For more guidance on working with box from unsafe code, see
-//! [rust-lang/unsafe-code-guidelines#326][ucg#326].
-//!
-//!
-//! [ucg#198]: https://github.com/rust-lang/unsafe-code-guidelines/issues/198
-//! [ucg#326]: https://github.com/rust-lang/unsafe-code-guidelines/issues/326
-//! [dereferencing]: core::ops::Deref
-//! [`Box::<T>::from_raw(value)`]: Box::from_raw
-//! [`Global`]: crate::alloc::Global
-//! [`Layout`]: crate::alloc::Layout
-//! [`Layout::for_value(&*value)`]: crate::alloc::Layout::for_value
-//! [valid]: ptr#safety
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-use core::any::Any;
-use core::async_iter::AsyncIterator;
-use core::borrow;
-use core::cmp::Ordering;
-use core::error::Error;
-use core::fmt;
-use core::future::Future;
-use core::hash::{Hash, Hasher};
-use core::iter::FusedIterator;
-use core::marker::Tuple;
-use core::marker::Unsize;
-use core::mem::{self, SizedTypeProperties};
-use core::ops::{
- CoerceUnsized, Coroutine, CoroutineState, Deref, DerefMut, DispatchFromDyn, Receiver,
-};
-use core::pin::Pin;
-use core::ptr::{self, NonNull, Unique};
-use core::task::{Context, Poll};
-
-#[cfg(not(no_global_oom_handling))]
-use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw};
-use crate::alloc::{AllocError, Allocator, Global, Layout};
-#[cfg(not(no_global_oom_handling))]
-use crate::borrow::Cow;
-use crate::raw_vec::RawVec;
-#[cfg(not(no_global_oom_handling))]
-use crate::str::from_boxed_utf8_unchecked;
-#[cfg(not(no_global_oom_handling))]
-use crate::string::String;
-#[cfg(not(no_global_oom_handling))]
-use crate::vec::Vec;
-
-#[cfg(not(no_thin))]
-#[unstable(feature = "thin_box", issue = "92791")]
-pub use thin::ThinBox;
-
-#[cfg(not(no_thin))]
-mod thin;
-
-/// A pointer type that uniquely owns a heap allocation of type `T`.
-///
-/// See the [module-level documentation](../../std/boxed/index.html) for more.
-#[lang = "owned_box"]
-#[fundamental]
-#[stable(feature = "rust1", since = "1.0.0")]
-// The declaration of the `Box` struct must be kept in sync with the
-// `alloc::alloc::box_free` function or ICEs will happen. See the comment
-// on `box_free` for more details.
-pub struct Box<
- T: ?Sized,
- #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
->(Unique<T>, A);
-
-impl<T> Box<T> {
- /// Allocates memory on the heap and then places `x` into it.
- ///
- /// This doesn't actually allocate if `T` is zero-sized.
- ///
- /// # Examples
- ///
- /// ```
- /// let five = Box::new(5);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[inline(always)]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- #[rustc_diagnostic_item = "box_new"]
- pub fn new(x: T) -> Self {
- #[rustc_box]
- Box::new(x)
- }
-
- /// Constructs a new box with uninitialized contents.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(new_uninit)]
- ///
- /// let mut five = Box::<u32>::new_uninit();
- ///
- /// let five = unsafe {
- /// // Deferred initialization:
- /// five.as_mut_ptr().write(5);
- ///
- /// five.assume_init()
- /// };
- ///
- /// assert_eq!(*five, 5)
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[unstable(feature = "new_uninit", issue = "63291")]
- #[must_use]
- #[inline]
- pub fn new_uninit() -> Box<mem::MaybeUninit<T>> {
- Self::new_uninit_in(Global)
- }
-
- /// Constructs a new `Box` with uninitialized contents, with the memory
- /// being filled with `0` bytes.
- ///
- /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
- /// of this method.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(new_uninit)]
- ///
- /// let zero = Box::<u32>::new_zeroed();
- /// let zero = unsafe { zero.assume_init() };
- ///
- /// assert_eq!(*zero, 0)
- /// ```
- ///
- /// [zeroed]: mem::MaybeUninit::zeroed
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- #[unstable(feature = "new_uninit", issue = "63291")]
- #[must_use]
- pub fn new_zeroed() -> Box<mem::MaybeUninit<T>> {
- Self::new_zeroed_in(Global)
- }
-
- /// Constructs a new `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
- /// `x` will be pinned in memory and unable to be moved.
- ///
- /// Constructing and pinning of the `Box` can also be done in two steps: `Box::pin(x)`
- /// does the same as <code>[Box::into_pin]\([Box::new]\(x))</code>. Consider using
- /// [`into_pin`](Box::into_pin) if you already have a `Box<T>`, or if you want to
- /// construct a (pinned) `Box` in a different way than with [`Box::new`].
- #[cfg(not(no_global_oom_handling))]
- #[stable(feature = "pin", since = "1.33.0")]
- #[must_use]
- #[inline(always)]
- pub fn pin(x: T) -> Pin<Box<T>> {
- Box::new(x).into()
- }
-
- /// Allocates memory on the heap then places `x` into it,
- /// returning an error if the allocation fails
- ///
- /// This doesn't actually allocate if `T` is zero-sized.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api)]
- ///
- /// let five = Box::try_new(5)?;
- /// # Ok::<(), std::alloc::AllocError>(())
- /// ```
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[inline]
- pub fn try_new(x: T) -> Result<Self, AllocError> {
- Self::try_new_in(x, Global)
- }
-
- /// Constructs a new box with uninitialized contents on the heap,
- /// returning an error if the allocation fails
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api, new_uninit)]
- ///
- /// let mut five = Box::<u32>::try_new_uninit()?;
- ///
- /// let five = unsafe {
- /// // Deferred initialization:
- /// five.as_mut_ptr().write(5);
- ///
- /// five.assume_init()
- /// };
- ///
- /// assert_eq!(*five, 5);
- /// # Ok::<(), std::alloc::AllocError>(())
- /// ```
- #[unstable(feature = "allocator_api", issue = "32838")]
- // #[unstable(feature = "new_uninit", issue = "63291")]
- #[inline]
- pub fn try_new_uninit() -> Result<Box<mem::MaybeUninit<T>>, AllocError> {
- Box::try_new_uninit_in(Global)
- }
-
- /// Constructs a new `Box` with uninitialized contents, with the memory
- /// being filled with `0` bytes on the heap
- ///
- /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
- /// of this method.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api, new_uninit)]
- ///
- /// let zero = Box::<u32>::try_new_zeroed()?;
- /// let zero = unsafe { zero.assume_init() };
- ///
- /// assert_eq!(*zero, 0);
- /// # Ok::<(), std::alloc::AllocError>(())
- /// ```
- ///
- /// [zeroed]: mem::MaybeUninit::zeroed
- #[unstable(feature = "allocator_api", issue = "32838")]
- // #[unstable(feature = "new_uninit", issue = "63291")]
- #[inline]
- pub fn try_new_zeroed() -> Result<Box<mem::MaybeUninit<T>>, AllocError> {
- Box::try_new_zeroed_in(Global)
- }
-}
-
-impl<T, A: Allocator> Box<T, A> {
- /// Allocates memory in the given allocator then places `x` into it.
- ///
- /// This doesn't actually allocate if `T` is zero-sized.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api)]
- ///
- /// use std::alloc::System;
- ///
- /// let five = Box::new_in(5, System);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[must_use]
- #[inline]
- pub fn new_in(x: T, alloc: A) -> Self
- where
- A: Allocator,
- {
- let mut boxed = Self::new_uninit_in(alloc);
- unsafe {
- boxed.as_mut_ptr().write(x);
- boxed.assume_init()
- }
- }
-
- /// Allocates memory in the given allocator then places `x` into it,
- /// returning an error if the allocation fails
- ///
- /// This doesn't actually allocate if `T` is zero-sized.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api)]
- ///
- /// use std::alloc::System;
- ///
- /// let five = Box::try_new_in(5, System)?;
- /// # Ok::<(), std::alloc::AllocError>(())
- /// ```
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[inline]
- pub fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
- where
- A: Allocator,
- {
- let mut boxed = Self::try_new_uninit_in(alloc)?;
- unsafe {
- boxed.as_mut_ptr().write(x);
- Ok(boxed.assume_init())
- }
- }
-
- /// Constructs a new box with uninitialized contents in the provided allocator.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api, new_uninit)]
- ///
- /// use std::alloc::System;
- ///
- /// let mut five = Box::<u32, _>::new_uninit_in(System);
- ///
- /// let five = unsafe {
- /// // Deferred initialization:
- /// five.as_mut_ptr().write(5);
- ///
- /// five.assume_init()
- /// };
- ///
- /// assert_eq!(*five, 5)
- /// ```
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[cfg(not(no_global_oom_handling))]
- #[must_use]
- // #[unstable(feature = "new_uninit", issue = "63291")]
- pub fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
- where
- A: Allocator,
- {
- let layout = Layout::new::<mem::MaybeUninit<T>>();
- // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
- // That would make code size bigger.
- match Box::try_new_uninit_in(alloc) {
- Ok(m) => m,
- Err(_) => handle_alloc_error(layout),
- }
- }
-
- /// Constructs a new box with uninitialized contents in the provided allocator,
- /// returning an error if the allocation fails
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api, new_uninit)]
- ///
- /// use std::alloc::System;
- ///
- /// let mut five = Box::<u32, _>::try_new_uninit_in(System)?;
- ///
- /// let five = unsafe {
- /// // Deferred initialization:
- /// five.as_mut_ptr().write(5);
- ///
- /// five.assume_init()
- /// };
- ///
- /// assert_eq!(*five, 5);
- /// # Ok::<(), std::alloc::AllocError>(())
- /// ```
- #[unstable(feature = "allocator_api", issue = "32838")]
- // #[unstable(feature = "new_uninit", issue = "63291")]
- pub fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
- where
- A: Allocator,
- {
- let ptr = if T::IS_ZST {
- NonNull::dangling()
- } else {
- let layout = Layout::new::<mem::MaybeUninit<T>>();
- alloc.allocate(layout)?.cast()
- };
- unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
- }
-
- /// Constructs a new `Box` with uninitialized contents, with the memory
- /// being filled with `0` bytes in the provided allocator.
- ///
- /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
- /// of this method.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api, new_uninit)]
- ///
- /// use std::alloc::System;
- ///
- /// let zero = Box::<u32, _>::new_zeroed_in(System);
- /// let zero = unsafe { zero.assume_init() };
- ///
- /// assert_eq!(*zero, 0)
- /// ```
- ///
- /// [zeroed]: mem::MaybeUninit::zeroed
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[cfg(not(no_global_oom_handling))]
- // #[unstable(feature = "new_uninit", issue = "63291")]
- #[must_use]
- pub fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
- where
- A: Allocator,
- {
- let layout = Layout::new::<mem::MaybeUninit<T>>();
- // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
- // That would make code size bigger.
- match Box::try_new_zeroed_in(alloc) {
- Ok(m) => m,
- Err(_) => handle_alloc_error(layout),
- }
- }
-
- /// Constructs a new `Box` with uninitialized contents, with the memory
- /// being filled with `0` bytes in the provided allocator,
- /// returning an error if the allocation fails,
- ///
- /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
- /// of this method.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api, new_uninit)]
- ///
- /// use std::alloc::System;
- ///
- /// let zero = Box::<u32, _>::try_new_zeroed_in(System)?;
- /// let zero = unsafe { zero.assume_init() };
- ///
- /// assert_eq!(*zero, 0);
- /// # Ok::<(), std::alloc::AllocError>(())
- /// ```
- ///
- /// [zeroed]: mem::MaybeUninit::zeroed
- #[unstable(feature = "allocator_api", issue = "32838")]
- // #[unstable(feature = "new_uninit", issue = "63291")]
- pub fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
- where
- A: Allocator,
- {
- let ptr = if T::IS_ZST {
- NonNull::dangling()
- } else {
- let layout = Layout::new::<mem::MaybeUninit<T>>();
- alloc.allocate_zeroed(layout)?.cast()
- };
- unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
- }
-
- /// Constructs a new `Pin<Box<T, A>>`. If `T` does not implement [`Unpin`], then
- /// `x` will be pinned in memory and unable to be moved.
- ///
- /// Constructing and pinning of the `Box` can also be done in two steps: `Box::pin_in(x, alloc)`
- /// does the same as <code>[Box::into_pin]\([Box::new_in]\(x, alloc))</code>. Consider using
- /// [`into_pin`](Box::into_pin) if you already have a `Box<T, A>`, or if you want to
- /// construct a (pinned) `Box` in a different way than with [`Box::new_in`].
- #[cfg(not(no_global_oom_handling))]
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[must_use]
- #[inline(always)]
- pub fn pin_in(x: T, alloc: A) -> Pin<Self>
- where
- A: 'static + Allocator,
- {
- Self::into_pin(Self::new_in(x, alloc))
- }
-
- /// Converts a `Box<T>` into a `Box<[T]>`
- ///
- /// This conversion does not allocate on the heap and happens in place.
- #[unstable(feature = "box_into_boxed_slice", issue = "71582")]
- pub fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
- let (raw, alloc) = Box::into_raw_with_allocator(boxed);
- unsafe { Box::from_raw_in(raw as *mut [T; 1], alloc) }
- }
-
- /// Consumes the `Box`, returning the wrapped value.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(box_into_inner)]
- ///
- /// let c = Box::new(5);
- ///
- /// assert_eq!(Box::into_inner(c), 5);
- /// ```
- #[unstable(feature = "box_into_inner", issue = "80437")]
- #[inline]
- pub fn into_inner(boxed: Self) -> T {
- *boxed
- }
-}
-
-impl<T> Box<[T]> {
- /// Constructs a new boxed slice with uninitialized contents.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(new_uninit)]
- ///
- /// let mut values = Box::<[u32]>::new_uninit_slice(3);
- ///
- /// let values = unsafe {
- /// // Deferred initialization:
- /// values[0].as_mut_ptr().write(1);
- /// values[1].as_mut_ptr().write(2);
- /// values[2].as_mut_ptr().write(3);
- ///
- /// values.assume_init()
- /// };
- ///
- /// assert_eq!(*values, [1, 2, 3])
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[unstable(feature = "new_uninit", issue = "63291")]
- #[must_use]
- pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
- unsafe { RawVec::with_capacity(len).into_box(len) }
- }
-
- /// Constructs a new boxed slice with uninitialized contents, with the memory
- /// being filled with `0` bytes.
- ///
- /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
- /// of this method.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(new_uninit)]
- ///
- /// let values = Box::<[u32]>::new_zeroed_slice(3);
- /// let values = unsafe { values.assume_init() };
- ///
- /// assert_eq!(*values, [0, 0, 0])
- /// ```
- ///
- /// [zeroed]: mem::MaybeUninit::zeroed
- #[cfg(not(no_global_oom_handling))]
- #[unstable(feature = "new_uninit", issue = "63291")]
- #[must_use]
- pub fn new_zeroed_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
- unsafe { RawVec::with_capacity_zeroed(len).into_box(len) }
- }
-
- /// Constructs a new boxed slice with uninitialized contents. Returns an error if
- /// the allocation fails
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api, new_uninit)]
- ///
- /// let mut values = Box::<[u32]>::try_new_uninit_slice(3)?;
- /// let values = unsafe {
- /// // Deferred initialization:
- /// values[0].as_mut_ptr().write(1);
- /// values[1].as_mut_ptr().write(2);
- /// values[2].as_mut_ptr().write(3);
- /// values.assume_init()
- /// };
- ///
- /// assert_eq!(*values, [1, 2, 3]);
- /// # Ok::<(), std::alloc::AllocError>(())
- /// ```
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[inline]
- pub fn try_new_uninit_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
- let ptr = if T::IS_ZST || len == 0 {
- NonNull::dangling()
- } else {
- let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
- Ok(l) => l,
- Err(_) => return Err(AllocError),
- };
- Global.allocate(layout)?.cast()
- };
- unsafe { Ok(RawVec::from_raw_parts_in(ptr.as_ptr(), len, Global).into_box(len)) }
- }
-
- /// Constructs a new boxed slice with uninitialized contents, with the memory
- /// being filled with `0` bytes. Returns an error if the allocation fails
- ///
- /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
- /// of this method.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api, new_uninit)]
- ///
- /// let values = Box::<[u32]>::try_new_zeroed_slice(3)?;
- /// let values = unsafe { values.assume_init() };
- ///
- /// assert_eq!(*values, [0, 0, 0]);
- /// # Ok::<(), std::alloc::AllocError>(())
- /// ```
- ///
- /// [zeroed]: mem::MaybeUninit::zeroed
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[inline]
- pub fn try_new_zeroed_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
- let ptr = if T::IS_ZST || len == 0 {
- NonNull::dangling()
- } else {
- let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
- Ok(l) => l,
- Err(_) => return Err(AllocError),
- };
- Global.allocate_zeroed(layout)?.cast()
- };
- unsafe { Ok(RawVec::from_raw_parts_in(ptr.as_ptr(), len, Global).into_box(len)) }
- }
-}
-
-impl<T, A: Allocator> Box<[T], A> {
- /// Constructs a new boxed slice with uninitialized contents in the provided allocator.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api, new_uninit)]
- ///
- /// use std::alloc::System;
- ///
- /// let mut values = Box::<[u32], _>::new_uninit_slice_in(3, System);
- ///
- /// let values = unsafe {
- /// // Deferred initialization:
- /// values[0].as_mut_ptr().write(1);
- /// values[1].as_mut_ptr().write(2);
- /// values[2].as_mut_ptr().write(3);
- ///
- /// values.assume_init()
- /// };
- ///
- /// assert_eq!(*values, [1, 2, 3])
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[unstable(feature = "allocator_api", issue = "32838")]
- // #[unstable(feature = "new_uninit", issue = "63291")]
- #[must_use]
- pub fn new_uninit_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit<T>], A> {
- unsafe { RawVec::with_capacity_in(len, alloc).into_box(len) }
- }
-
- /// Constructs a new boxed slice with uninitialized contents in the provided allocator,
- /// with the memory being filled with `0` bytes.
- ///
- /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
- /// of this method.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api, new_uninit)]
- ///
- /// use std::alloc::System;
- ///
- /// let values = Box::<[u32], _>::new_zeroed_slice_in(3, System);
- /// let values = unsafe { values.assume_init() };
- ///
- /// assert_eq!(*values, [0, 0, 0])
- /// ```
- ///
- /// [zeroed]: mem::MaybeUninit::zeroed
- #[cfg(not(no_global_oom_handling))]
- #[unstable(feature = "allocator_api", issue = "32838")]
- // #[unstable(feature = "new_uninit", issue = "63291")]
- #[must_use]
- pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit<T>], A> {
- unsafe { RawVec::with_capacity_zeroed_in(len, alloc).into_box(len) }
- }
-}
-
-impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
- /// Converts to `Box<T, A>`.
- ///
- /// # Safety
- ///
- /// As with [`MaybeUninit::assume_init`],
- /// it is up to the caller to guarantee that the value
- /// really is in an initialized state.
- /// Calling this when the content is not yet fully initialized
- /// causes immediate undefined behavior.
- ///
- /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(new_uninit)]
- ///
- /// let mut five = Box::<u32>::new_uninit();
- ///
- /// let five: Box<u32> = unsafe {
- /// // Deferred initialization:
- /// five.as_mut_ptr().write(5);
- ///
- /// five.assume_init()
- /// };
- ///
- /// assert_eq!(*five, 5)
- /// ```
- #[unstable(feature = "new_uninit", issue = "63291")]
- #[inline]
- pub unsafe fn assume_init(self) -> Box<T, A> {
- let (raw, alloc) = Box::into_raw_with_allocator(self);
- unsafe { Box::from_raw_in(raw as *mut T, alloc) }
- }
-
- /// Writes the value and converts to `Box<T, A>`.
- ///
- /// This method converts the box similarly to [`Box::assume_init`] but
- /// writes `value` into it before conversion thus guaranteeing safety.
- /// In some scenarios use of this method may improve performance because
- /// the compiler may be able to optimize copying from stack.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(new_uninit)]
- ///
- /// let big_box = Box::<[usize; 1024]>::new_uninit();
- ///
- /// let mut array = [0; 1024];
- /// for (i, place) in array.iter_mut().enumerate() {
- /// *place = i;
- /// }
- ///
- /// // The optimizer may be able to elide this copy, so previous code writes
- /// // to heap directly.
- /// let big_box = Box::write(big_box, array);
- ///
- /// for (i, x) in big_box.iter().enumerate() {
- /// assert_eq!(*x, i);
- /// }
- /// ```
- #[unstable(feature = "new_uninit", issue = "63291")]
- #[inline]
- pub fn write(mut boxed: Self, value: T) -> Box<T, A> {
- unsafe {
- (*boxed).write(value);
- boxed.assume_init()
- }
- }
-}
-
-impl<T, A: Allocator> Box<[mem::MaybeUninit<T>], A> {
- /// Converts to `Box<[T], A>`.
- ///
- /// # Safety
- ///
- /// As with [`MaybeUninit::assume_init`],
- /// it is up to the caller to guarantee that the values
- /// really are in an initialized state.
- /// Calling this when the content is not yet fully initialized
- /// causes immediate undefined behavior.
- ///
- /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(new_uninit)]
- ///
- /// let mut values = Box::<[u32]>::new_uninit_slice(3);
- ///
- /// let values = unsafe {
- /// // Deferred initialization:
- /// values[0].as_mut_ptr().write(1);
- /// values[1].as_mut_ptr().write(2);
- /// values[2].as_mut_ptr().write(3);
- ///
- /// values.assume_init()
- /// };
- ///
- /// assert_eq!(*values, [1, 2, 3])
- /// ```
- #[unstable(feature = "new_uninit", issue = "63291")]
- #[inline]
- pub unsafe fn assume_init(self) -> Box<[T], A> {
- let (raw, alloc) = Box::into_raw_with_allocator(self);
- unsafe { Box::from_raw_in(raw as *mut [T], alloc) }
- }
-}
-
-impl<T: ?Sized> Box<T> {
- /// Constructs a box from a raw pointer.
- ///
- /// After calling this function, the raw pointer is owned by the
- /// resulting `Box`. Specifically, the `Box` destructor will call
- /// the destructor of `T` and free the allocated memory. For this
- /// to be safe, the memory must have been allocated in accordance
- /// with the [memory layout] used by `Box` .
- ///
- /// # Safety
- ///
- /// This function is unsafe because improper use may lead to
- /// memory problems. For example, a double-free may occur if the
- /// function is called twice on the same raw pointer.
- ///
- /// The safety conditions are described in the [memory layout] section.
- ///
- /// # Examples
- ///
- /// Recreate a `Box` which was previously converted to a raw pointer
- /// using [`Box::into_raw`]:
- /// ```
- /// let x = Box::new(5);
- /// let ptr = Box::into_raw(x);
- /// let x = unsafe { Box::from_raw(ptr) };
- /// ```
- /// Manually create a `Box` from scratch by using the global allocator:
- /// ```
- /// use std::alloc::{alloc, Layout};
- ///
- /// unsafe {
- /// let ptr = alloc(Layout::new::<i32>()) as *mut i32;
- /// // In general .write is required to avoid attempting to destruct
- /// // the (uninitialized) previous contents of `ptr`, though for this
- /// // simple example `*ptr = 5` would have worked as well.
- /// ptr.write(5);
- /// let x = Box::from_raw(ptr);
- /// }
- /// ```
- ///
- /// [memory layout]: self#memory-layout
- /// [`Layout`]: crate::Layout
- #[stable(feature = "box_raw", since = "1.4.0")]
- #[inline]
- #[must_use = "call `drop(Box::from_raw(ptr))` if you intend to drop the `Box`"]
- pub unsafe fn from_raw(raw: *mut T) -> Self {
- unsafe { Self::from_raw_in(raw, Global) }
- }
-}
-
-impl<T: ?Sized, A: Allocator> Box<T, A> {
- /// Constructs a box from a raw pointer in the given allocator.
- ///
- /// After calling this function, the raw pointer is owned by the
- /// resulting `Box`. Specifically, the `Box` destructor will call
- /// the destructor of `T` and free the allocated memory. For this
- /// to be safe, the memory must have been allocated in accordance
- /// with the [memory layout] used by `Box` .
- ///
- /// # Safety
- ///
- /// This function is unsafe because improper use may lead to
- /// memory problems. For example, a double-free may occur if the
- /// function is called twice on the same raw pointer.
- ///
- ///
- /// # Examples
- ///
- /// Recreate a `Box` which was previously converted to a raw pointer
- /// using [`Box::into_raw_with_allocator`]:
- /// ```
- /// #![feature(allocator_api)]
- ///
- /// use std::alloc::System;
- ///
- /// let x = Box::new_in(5, System);
- /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
- /// let x = unsafe { Box::from_raw_in(ptr, alloc) };
- /// ```
- /// Manually create a `Box` from scratch by using the system allocator:
- /// ```
- /// #![feature(allocator_api, slice_ptr_get)]
- ///
- /// use std::alloc::{Allocator, Layout, System};
- ///
- /// unsafe {
- /// let ptr = System.allocate(Layout::new::<i32>())?.as_mut_ptr() as *mut i32;
- /// // In general .write is required to avoid attempting to destruct
- /// // the (uninitialized) previous contents of `ptr`, though for this
- /// // simple example `*ptr = 5` would have worked as well.
- /// ptr.write(5);
- /// let x = Box::from_raw_in(ptr, System);
- /// }
- /// # Ok::<(), std::alloc::AllocError>(())
- /// ```
- ///
- /// [memory layout]: self#memory-layout
- /// [`Layout`]: crate::Layout
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- #[inline]
- pub const unsafe fn from_raw_in(raw: *mut T, alloc: A) -> Self {
- Box(unsafe { Unique::new_unchecked(raw) }, alloc)
- }
-
- /// Consumes the `Box`, returning a wrapped raw pointer.
- ///
- /// The pointer will be properly aligned and non-null.
- ///
- /// After calling this function, the caller is responsible for the
- /// memory previously managed by the `Box`. In particular, the
- /// caller should properly destroy `T` and release the memory, taking
- /// into account the [memory layout] used by `Box`. The easiest way to
- /// do this is to convert the raw pointer back into a `Box` with the
- /// [`Box::from_raw`] function, allowing the `Box` destructor to perform
- /// the cleanup.
- ///
- /// Note: this is an associated function, which means that you have
- /// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
- /// is so that there is no conflict with a method on the inner type.
- ///
- /// # Examples
- /// Converting the raw pointer back into a `Box` with [`Box::from_raw`]
- /// for automatic cleanup:
- /// ```
- /// let x = Box::new(String::from("Hello"));
- /// let ptr = Box::into_raw(x);
- /// let x = unsafe { Box::from_raw(ptr) };
- /// ```
- /// Manual cleanup by explicitly running the destructor and deallocating
- /// the memory:
- /// ```
- /// use std::alloc::{dealloc, Layout};
- /// use std::ptr;
- ///
- /// let x = Box::new(String::from("Hello"));
- /// let ptr = Box::into_raw(x);
- /// unsafe {
- /// ptr::drop_in_place(ptr);
- /// dealloc(ptr as *mut u8, Layout::new::<String>());
- /// }
- /// ```
- /// Note: This is equivalent to the following:
- /// ```
- /// let x = Box::new(String::from("Hello"));
- /// let ptr = Box::into_raw(x);
- /// unsafe {
- /// drop(Box::from_raw(ptr));
- /// }
- /// ```
- ///
- /// [memory layout]: self#memory-layout
- #[stable(feature = "box_raw", since = "1.4.0")]
- #[inline]
- pub fn into_raw(b: Self) -> *mut T {
- Self::into_raw_with_allocator(b).0
- }
-
- /// Consumes the `Box`, returning a wrapped raw pointer and the allocator.
- ///
- /// The pointer will be properly aligned and non-null.
- ///
- /// After calling this function, the caller is responsible for the
- /// memory previously managed by the `Box`. In particular, the
- /// caller should properly destroy `T` and release the memory, taking
- /// into account the [memory layout] used by `Box`. The easiest way to
- /// do this is to convert the raw pointer back into a `Box` with the
- /// [`Box::from_raw_in`] function, allowing the `Box` destructor to perform
- /// the cleanup.
- ///
- /// Note: this is an associated function, which means that you have
- /// to call it as `Box::into_raw_with_allocator(b)` instead of `b.into_raw_with_allocator()`. This
- /// is so that there is no conflict with a method on the inner type.
- ///
- /// # Examples
- /// Converting the raw pointer back into a `Box` with [`Box::from_raw_in`]
- /// for automatic cleanup:
- /// ```
- /// #![feature(allocator_api)]
- ///
- /// use std::alloc::System;
- ///
- /// let x = Box::new_in(String::from("Hello"), System);
- /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
- /// let x = unsafe { Box::from_raw_in(ptr, alloc) };
- /// ```
- /// Manual cleanup by explicitly running the destructor and deallocating
- /// the memory:
- /// ```
- /// #![feature(allocator_api)]
- ///
- /// use std::alloc::{Allocator, Layout, System};
- /// use std::ptr::{self, NonNull};
- ///
- /// let x = Box::new_in(String::from("Hello"), System);
- /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
- /// unsafe {
- /// ptr::drop_in_place(ptr);
- /// let non_null = NonNull::new_unchecked(ptr);
- /// alloc.deallocate(non_null.cast(), Layout::new::<String>());
- /// }
- /// ```
- ///
- /// [memory layout]: self#memory-layout
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[inline]
- pub fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
- let (leaked, alloc) = Box::into_unique(b);
- (leaked.as_ptr(), alloc)
- }
-
- #[unstable(
- feature = "ptr_internals",
- issue = "none",
- reason = "use `Box::leak(b).into()` or `Unique::from(Box::leak(b))` instead"
- )]
- #[inline]
- #[doc(hidden)]
- pub fn into_unique(b: Self) -> (Unique<T>, A) {
- // Box is recognized as a "unique pointer" by Stacked Borrows, but internally it is a
- // raw pointer for the type system. Turning it directly into a raw pointer would not be
- // recognized as "releasing" the unique pointer to permit aliased raw accesses,
- // so all raw pointer methods have to go through `Box::leak`. Turning *that* to a raw pointer
- // behaves correctly.
- let alloc = unsafe { ptr::read(&b.1) };
- (Unique::from(Box::leak(b)), alloc)
- }
-
- /// Returns a reference to the underlying allocator.
- ///
- /// Note: this is an associated function, which means that you have
- /// to call it as `Box::allocator(&b)` instead of `b.allocator()`. This
- /// is so that there is no conflict with a method on the inner type.
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- #[inline]
- pub const fn allocator(b: &Self) -> &A {
- &b.1
- }
-
- /// Consumes and leaks the `Box`, returning a mutable reference,
- /// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime
- /// `'a`. If the type has only static references, or none at all, then this
- /// may be chosen to be `'static`.
- ///
- /// This function is mainly useful for data that lives for the remainder of
- /// the program's life. Dropping the returned reference will cause a memory
- /// leak. If this is not acceptable, the reference should first be wrapped
- /// with the [`Box::from_raw`] function producing a `Box`. This `Box` can
- /// then be dropped which will properly destroy `T` and release the
- /// allocated memory.
- ///
- /// Note: this is an associated function, which means that you have
- /// to call it as `Box::leak(b)` instead of `b.leak()`. This
- /// is so that there is no conflict with a method on the inner type.
- ///
- /// # Examples
- ///
- /// Simple usage:
- ///
- /// ```
- /// let x = Box::new(41);
- /// let static_ref: &'static mut usize = Box::leak(x);
- /// *static_ref += 1;
- /// assert_eq!(*static_ref, 42);
- /// ```
- ///
- /// Unsized data:
- ///
- /// ```
- /// let x = vec![1, 2, 3].into_boxed_slice();
- /// let static_ref = Box::leak(x);
- /// static_ref[0] = 4;
- /// assert_eq!(*static_ref, [4, 2, 3]);
- /// ```
- #[stable(feature = "box_leak", since = "1.26.0")]
- #[inline]
- pub fn leak<'a>(b: Self) -> &'a mut T
- where
- A: 'a,
- {
- unsafe { &mut *mem::ManuallyDrop::new(b).0.as_ptr() }
- }
-
- /// Converts a `Box<T>` into a `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
- /// `*boxed` will be pinned in memory and unable to be moved.
- ///
- /// This conversion does not allocate on the heap and happens in place.
- ///
- /// This is also available via [`From`].
- ///
- /// Constructing and pinning a `Box` with <code>Box::into_pin([Box::new]\(x))</code>
- /// can also be written more concisely using <code>[Box::pin]\(x)</code>.
- /// This `into_pin` method is useful if you already have a `Box<T>`, or you are
- /// constructing a (pinned) `Box` in a different way than with [`Box::new`].
- ///
- /// # Notes
- ///
- /// It's not recommended that crates add an impl like `From<Box<T>> for Pin<T>`,
- /// as it'll introduce an ambiguity when calling `Pin::from`.
- /// A demonstration of such a poor impl is shown below.
- ///
- /// ```compile_fail
- /// # use std::pin::Pin;
- /// struct Foo; // A type defined in this crate.
- /// impl From<Box<()>> for Pin<Foo> {
- /// fn from(_: Box<()>) -> Pin<Foo> {
- /// Pin::new(Foo)
- /// }
- /// }
- ///
- /// let foo = Box::new(());
- /// let bar = Pin::from(foo);
- /// ```
- #[stable(feature = "box_into_pin", since = "1.63.0")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn into_pin(boxed: Self) -> Pin<Self>
- where
- A: 'static,
- {
- // It's not possible to move or replace the insides of a `Pin<Box<T>>`
- // when `T: !Unpin`, so it's safe to pin it directly without any
- // additional requirements.
- unsafe { Pin::new_unchecked(boxed) }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> {
- #[inline]
- fn drop(&mut self) {
- // the T in the Box is dropped by the compiler before the destructor is run
-
- let ptr = self.0;
-
- unsafe {
- let layout = Layout::for_value_raw(ptr.as_ptr());
- if layout.size() != 0 {
- self.1.deallocate(From::from(ptr.cast()), layout);
- }
- }
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Default> Default for Box<T> {
- /// Creates a `Box<T>`, with the `Default` value for T.
- #[inline]
- fn default() -> Self {
- Box::new(T::default())
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Default for Box<[T]> {
- #[inline]
- fn default() -> Self {
- let ptr: Unique<[T]> = Unique::<[T; 0]>::dangling();
- Box(ptr, Global)
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "default_box_extra", since = "1.17.0")]
-impl Default for Box<str> {
- #[inline]
- fn default() -> Self {
- // SAFETY: This is the same as `Unique::cast<U>` but with an unsized `U = str`.
- let ptr: Unique<str> = unsafe {
- let bytes: Unique<[u8]> = Unique::<[u8; 0]>::dangling();
- Unique::new_unchecked(bytes.as_ptr() as *mut str)
- };
- Box(ptr, Global)
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Clone, A: Allocator + Clone> Clone for Box<T, A> {
- /// Returns a new box with a `clone()` of this box's contents.
- ///
- /// # Examples
- ///
- /// ```
- /// let x = Box::new(5);
- /// let y = x.clone();
- ///
- /// // The value is the same
- /// assert_eq!(x, y);
- ///
- /// // But they are unique objects
- /// assert_ne!(&*x as *const i32, &*y as *const i32);
- /// ```
- #[inline]
- fn clone(&self) -> Self {
- // Pre-allocate memory to allow writing the cloned value directly.
- let mut boxed = Self::new_uninit_in(self.1.clone());
- unsafe {
- (**self).write_clone_into_raw(boxed.as_mut_ptr());
- boxed.assume_init()
- }
- }
-
- /// Copies `source`'s contents into `self` without creating a new allocation.
- ///
- /// # Examples
- ///
- /// ```
- /// let x = Box::new(5);
- /// let mut y = Box::new(10);
- /// let yp: *const i32 = &*y;
- ///
- /// y.clone_from(&x);
- ///
- /// // The value is the same
- /// assert_eq!(x, y);
- ///
- /// // And no allocation occurred
- /// assert_eq!(yp, &*y);
- /// ```
- #[inline]
- fn clone_from(&mut self, source: &Self) {
- (**self).clone_from(&(**source));
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "box_slice_clone", since = "1.3.0")]
-impl Clone for Box<str> {
- fn clone(&self) -> Self {
- // this makes a copy of the data
- let buf: Box<[u8]> = self.as_bytes().into();
- unsafe { from_boxed_utf8_unchecked(buf) }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Box<T, A> {
- #[inline]
- fn eq(&self, other: &Self) -> bool {
- PartialEq::eq(&**self, &**other)
- }
- #[inline]
- fn ne(&self, other: &Self) -> bool {
- PartialEq::ne(&**self, &**other)
- }
-}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Box<T, A> {
- #[inline]
- fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- PartialOrd::partial_cmp(&**self, &**other)
- }
- #[inline]
- fn lt(&self, other: &Self) -> bool {
- PartialOrd::lt(&**self, &**other)
- }
- #[inline]
- fn le(&self, other: &Self) -> bool {
- PartialOrd::le(&**self, &**other)
- }
- #[inline]
- fn ge(&self, other: &Self) -> bool {
- PartialOrd::ge(&**self, &**other)
- }
- #[inline]
- fn gt(&self, other: &Self) -> bool {
- PartialOrd::gt(&**self, &**other)
- }
-}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Ord, A: Allocator> Ord for Box<T, A> {
- #[inline]
- fn cmp(&self, other: &Self) -> Ordering {
- Ord::cmp(&**self, &**other)
- }
-}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Eq, A: Allocator> Eq for Box<T, A> {}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Hash, A: Allocator> Hash for Box<T, A> {
- fn hash<H: Hasher>(&self, state: &mut H) {
- (**self).hash(state);
- }
-}
-
-#[stable(feature = "indirect_hasher_impl", since = "1.22.0")]
-impl<T: ?Sized + Hasher, A: Allocator> Hasher for Box<T, A> {
- fn finish(&self) -> u64 {
- (**self).finish()
- }
- fn write(&mut self, bytes: &[u8]) {
- (**self).write(bytes)
- }
- fn write_u8(&mut self, i: u8) {
- (**self).write_u8(i)
- }
- fn write_u16(&mut self, i: u16) {
- (**self).write_u16(i)
- }
- fn write_u32(&mut self, i: u32) {
- (**self).write_u32(i)
- }
- fn write_u64(&mut self, i: u64) {
- (**self).write_u64(i)
- }
- fn write_u128(&mut self, i: u128) {
- (**self).write_u128(i)
- }
- fn write_usize(&mut self, i: usize) {
- (**self).write_usize(i)
- }
- fn write_i8(&mut self, i: i8) {
- (**self).write_i8(i)
- }
- fn write_i16(&mut self, i: i16) {
- (**self).write_i16(i)
- }
- fn write_i32(&mut self, i: i32) {
- (**self).write_i32(i)
- }
- fn write_i64(&mut self, i: i64) {
- (**self).write_i64(i)
- }
- fn write_i128(&mut self, i: i128) {
- (**self).write_i128(i)
- }
- fn write_isize(&mut self, i: isize) {
- (**self).write_isize(i)
- }
- fn write_length_prefix(&mut self, len: usize) {
- (**self).write_length_prefix(len)
- }
- fn write_str(&mut self, s: &str) {
- (**self).write_str(s)
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "from_for_ptrs", since = "1.6.0")]
-impl<T> From<T> for Box<T> {
- /// Converts a `T` into a `Box<T>`
- ///
- /// The conversion allocates on the heap and moves `t`
- /// from the stack into it.
- ///
- /// # Examples
- ///
- /// ```rust
- /// let x = 5;
- /// let boxed = Box::new(5);
- ///
- /// assert_eq!(Box::from(x), boxed);
- /// ```
- fn from(t: T) -> Self {
- Box::new(t)
- }
-}
-
-#[stable(feature = "pin", since = "1.33.0")]
-impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Pin<Box<T, A>>
-where
- A: 'static,
-{
- /// Converts a `Box<T>` into a `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
- /// `*boxed` will be pinned in memory and unable to be moved.
- ///
- /// This conversion does not allocate on the heap and happens in place.
- ///
- /// This is also available via [`Box::into_pin`].
- ///
- /// Constructing and pinning a `Box` with <code><Pin<Box\<T>>>::from([Box::new]\(x))</code>
- /// can also be written more concisely using <code>[Box::pin]\(x)</code>.
- /// This `From` implementation is useful if you already have a `Box<T>`, or you are
- /// constructing a (pinned) `Box` in a different way than with [`Box::new`].
- fn from(boxed: Box<T, A>) -> Self {
- Box::into_pin(boxed)
- }
-}
-
-/// Specialization trait used for `From<&[T]>`.
-#[cfg(not(no_global_oom_handling))]
-trait BoxFromSlice<T> {
- fn from_slice(slice: &[T]) -> Self;
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Clone> BoxFromSlice<T> for Box<[T]> {
- #[inline]
- default fn from_slice(slice: &[T]) -> Self {
- slice.to_vec().into_boxed_slice()
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Copy> BoxFromSlice<T> for Box<[T]> {
- #[inline]
- fn from_slice(slice: &[T]) -> Self {
- let len = slice.len();
- let buf = RawVec::with_capacity(len);
- unsafe {
- ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
- buf.into_box(slice.len()).assume_init()
- }
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "box_from_slice", since = "1.17.0")]
-impl<T: Clone> From<&[T]> for Box<[T]> {
- /// Converts a `&[T]` into a `Box<[T]>`
- ///
- /// This conversion allocates on the heap
- /// and performs a copy of `slice` and its contents.
- ///
- /// # Examples
- /// ```rust
- /// // create a &[u8] which will be used to create a Box<[u8]>
- /// let slice: &[u8] = &[104, 101, 108, 108, 111];
- /// let boxed_slice: Box<[u8]> = Box::from(slice);
- ///
- /// println!("{boxed_slice:?}");
- /// ```
- #[inline]
- fn from(slice: &[T]) -> Box<[T]> {
- <Self as BoxFromSlice<T>>::from_slice(slice)
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "box_from_cow", since = "1.45.0")]
-impl<T: Clone> From<Cow<'_, [T]>> for Box<[T]> {
- /// Converts a `Cow<'_, [T]>` into a `Box<[T]>`
- ///
- /// When `cow` is the `Cow::Borrowed` variant, this
- /// conversion allocates on the heap and copies the
- /// underlying slice. Otherwise, it will try to reuse the owned
- /// `Vec`'s allocation.
- #[inline]
- fn from(cow: Cow<'_, [T]>) -> Box<[T]> {
- match cow {
- Cow::Borrowed(slice) => Box::from(slice),
- Cow::Owned(slice) => Box::from(slice),
- }
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "box_from_slice", since = "1.17.0")]
-impl From<&str> for Box<str> {
- /// Converts a `&str` into a `Box<str>`
- ///
- /// This conversion allocates on the heap
- /// and performs a copy of `s`.
- ///
- /// # Examples
- ///
- /// ```rust
- /// let boxed: Box<str> = Box::from("hello");
- /// println!("{boxed}");
- /// ```
- #[inline]
- fn from(s: &str) -> Box<str> {
- unsafe { from_boxed_utf8_unchecked(Box::from(s.as_bytes())) }
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "box_from_cow", since = "1.45.0")]
-impl From<Cow<'_, str>> for Box<str> {
- /// Converts a `Cow<'_, str>` into a `Box<str>`
- ///
- /// When `cow` is the `Cow::Borrowed` variant, this
- /// conversion allocates on the heap and copies the
- /// underlying `str`. Otherwise, it will try to reuse the owned
- /// `String`'s allocation.
- ///
- /// # Examples
- ///
- /// ```rust
- /// use std::borrow::Cow;
- ///
- /// let unboxed = Cow::Borrowed("hello");
- /// let boxed: Box<str> = Box::from(unboxed);
- /// println!("{boxed}");
- /// ```
- ///
- /// ```rust
- /// # use std::borrow::Cow;
- /// let unboxed = Cow::Owned("hello".to_string());
- /// let boxed: Box<str> = Box::from(unboxed);
- /// println!("{boxed}");
- /// ```
- #[inline]
- fn from(cow: Cow<'_, str>) -> Box<str> {
- match cow {
- Cow::Borrowed(s) => Box::from(s),
- Cow::Owned(s) => Box::from(s),
- }
- }
-}
-
-#[stable(feature = "boxed_str_conv", since = "1.19.0")]
-impl<A: Allocator> From<Box<str, A>> for Box<[u8], A> {
- /// Converts a `Box<str>` into a `Box<[u8]>`
- ///
- /// This conversion does not allocate on the heap and happens in place.
- ///
- /// # Examples
- /// ```rust
- /// // create a Box<str> which will be used to create a Box<[u8]>
- /// let boxed: Box<str> = Box::from("hello");
- /// let boxed_str: Box<[u8]> = Box::from(boxed);
- ///
- /// // create a &[u8] which will be used to create a Box<[u8]>
- /// let slice: &[u8] = &[104, 101, 108, 108, 111];
- /// let boxed_slice = Box::from(slice);
- ///
- /// assert_eq!(boxed_slice, boxed_str);
- /// ```
- #[inline]
- fn from(s: Box<str, A>) -> Self {
- let (raw, alloc) = Box::into_raw_with_allocator(s);
- unsafe { Box::from_raw_in(raw as *mut [u8], alloc) }
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "box_from_array", since = "1.45.0")]
-impl<T, const N: usize> From<[T; N]> for Box<[T]> {
- /// Converts a `[T; N]` into a `Box<[T]>`
- ///
- /// This conversion moves the array to newly heap-allocated memory.
- ///
- /// # Examples
- ///
- /// ```rust
- /// let boxed: Box<[u8]> = Box::from([4, 2]);
- /// println!("{boxed:?}");
- /// ```
- fn from(array: [T; N]) -> Box<[T]> {
- Box::new(array)
- }
-}
-
-/// Casts a boxed slice to a boxed array.
-///
-/// # Safety
-///
-/// `boxed_slice.len()` must be exactly `N`.
-unsafe fn boxed_slice_as_array_unchecked<T, A: Allocator, const N: usize>(
- boxed_slice: Box<[T], A>,
-) -> Box<[T; N], A> {
- debug_assert_eq!(boxed_slice.len(), N);
-
- let (ptr, alloc) = Box::into_raw_with_allocator(boxed_slice);
- // SAFETY: Pointer and allocator came from an existing box,
- // and our safety condition requires that the length is exactly `N`
- unsafe { Box::from_raw_in(ptr as *mut [T; N], alloc) }
-}
-
-#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
-impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> {
- type Error = Box<[T]>;
-
- /// Attempts to convert a `Box<[T]>` into a `Box<[T; N]>`.
- ///
- /// The conversion occurs in-place and does not require a
- /// new memory allocation.
- ///
- /// # Errors
- ///
- /// Returns the old `Box<[T]>` in the `Err` variant if
- /// `boxed_slice.len()` does not equal `N`.
- fn try_from(boxed_slice: Box<[T]>) -> Result<Self, Self::Error> {
- if boxed_slice.len() == N {
- Ok(unsafe { boxed_slice_as_array_unchecked(boxed_slice) })
- } else {
- Err(boxed_slice)
- }
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "boxed_array_try_from_vec", since = "1.66.0")]
-impl<T, const N: usize> TryFrom<Vec<T>> for Box<[T; N]> {
- type Error = Vec<T>;
-
- /// Attempts to convert a `Vec<T>` into a `Box<[T; N]>`.
- ///
- /// Like [`Vec::into_boxed_slice`], this is in-place if `vec.capacity() == N`,
- /// but will require a reallocation otherwise.
- ///
- /// # Errors
- ///
- /// Returns the original `Vec<T>` in the `Err` variant if
- /// `boxed_slice.len()` does not equal `N`.
- ///
- /// # Examples
- ///
- /// This can be used with [`vec!`] to create an array on the heap:
- ///
- /// ```
- /// let state: Box<[f32; 100]> = vec![1.0; 100].try_into().unwrap();
- /// assert_eq!(state.len(), 100);
- /// ```
- fn try_from(vec: Vec<T>) -> Result<Self, Self::Error> {
- if vec.len() == N {
- let boxed_slice = vec.into_boxed_slice();
- Ok(unsafe { boxed_slice_as_array_unchecked(boxed_slice) })
- } else {
- Err(vec)
- }
- }
-}
-
-impl<A: Allocator> Box<dyn Any, A> {
- /// Attempt to downcast the box to a concrete type.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::any::Any;
- ///
- /// fn print_if_string(value: Box<dyn Any>) {
- /// if let Ok(string) = value.downcast::<String>() {
- /// println!("String ({}): {}", string.len(), string);
- /// }
- /// }
- ///
- /// let my_string = "Hello World".to_string();
- /// print_if_string(Box::new(my_string));
- /// print_if_string(Box::new(0i8));
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
- if self.is::<T>() { unsafe { Ok(self.downcast_unchecked::<T>()) } } else { Err(self) }
- }
-
- /// Downcasts the box to a concrete type.
- ///
- /// For a safe alternative see [`downcast`].
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(downcast_unchecked)]
- ///
- /// use std::any::Any;
- ///
- /// let x: Box<dyn Any> = Box::new(1_usize);
- ///
- /// unsafe {
- /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
- /// }
- /// ```
- ///
- /// # Safety
- ///
- /// The contained value must be of type `T`. Calling this method
- /// with the incorrect type is *undefined behavior*.
- ///
- /// [`downcast`]: Self::downcast
- #[inline]
- #[unstable(feature = "downcast_unchecked", issue = "90850")]
- pub unsafe fn downcast_unchecked<T: Any>(self) -> Box<T, A> {
- debug_assert!(self.is::<T>());
- unsafe {
- let (raw, alloc): (*mut dyn Any, _) = Box::into_raw_with_allocator(self);
- Box::from_raw_in(raw as *mut T, alloc)
- }
- }
-}
-
-impl<A: Allocator> Box<dyn Any + Send, A> {
- /// Attempt to downcast the box to a concrete type.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::any::Any;
- ///
- /// fn print_if_string(value: Box<dyn Any + Send>) {
- /// if let Ok(string) = value.downcast::<String>() {
- /// println!("String ({}): {}", string.len(), string);
- /// }
- /// }
- ///
- /// let my_string = "Hello World".to_string();
- /// print_if_string(Box::new(my_string));
- /// print_if_string(Box::new(0i8));
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
- if self.is::<T>() { unsafe { Ok(self.downcast_unchecked::<T>()) } } else { Err(self) }
- }
-
- /// Downcasts the box to a concrete type.
- ///
- /// For a safe alternative see [`downcast`].
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(downcast_unchecked)]
- ///
- /// use std::any::Any;
- ///
- /// let x: Box<dyn Any + Send> = Box::new(1_usize);
- ///
- /// unsafe {
- /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
- /// }
- /// ```
- ///
- /// # Safety
- ///
- /// The contained value must be of type `T`. Calling this method
- /// with the incorrect type is *undefined behavior*.
- ///
- /// [`downcast`]: Self::downcast
- #[inline]
- #[unstable(feature = "downcast_unchecked", issue = "90850")]
- pub unsafe fn downcast_unchecked<T: Any>(self) -> Box<T, A> {
- debug_assert!(self.is::<T>());
- unsafe {
- let (raw, alloc): (*mut (dyn Any + Send), _) = Box::into_raw_with_allocator(self);
- Box::from_raw_in(raw as *mut T, alloc)
- }
- }
-}
-
-impl<A: Allocator> Box<dyn Any + Send + Sync, A> {
- /// Attempt to downcast the box to a concrete type.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::any::Any;
- ///
- /// fn print_if_string(value: Box<dyn Any + Send + Sync>) {
- /// if let Ok(string) = value.downcast::<String>() {
- /// println!("String ({}): {}", string.len(), string);
- /// }
- /// }
- ///
- /// let my_string = "Hello World".to_string();
- /// print_if_string(Box::new(my_string));
- /// print_if_string(Box::new(0i8));
- /// ```
- #[inline]
- #[stable(feature = "box_send_sync_any_downcast", since = "1.51.0")]
- pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
- if self.is::<T>() { unsafe { Ok(self.downcast_unchecked::<T>()) } } else { Err(self) }
- }
-
- /// Downcasts the box to a concrete type.
- ///
- /// For a safe alternative see [`downcast`].
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(downcast_unchecked)]
- ///
- /// use std::any::Any;
- ///
- /// let x: Box<dyn Any + Send + Sync> = Box::new(1_usize);
- ///
- /// unsafe {
- /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
- /// }
- /// ```
- ///
- /// # Safety
- ///
- /// The contained value must be of type `T`. Calling this method
- /// with the incorrect type is *undefined behavior*.
- ///
- /// [`downcast`]: Self::downcast
- #[inline]
- #[unstable(feature = "downcast_unchecked", issue = "90850")]
- pub unsafe fn downcast_unchecked<T: Any>(self) -> Box<T, A> {
- debug_assert!(self.is::<T>());
- unsafe {
- let (raw, alloc): (*mut (dyn Any + Send + Sync), _) =
- Box::into_raw_with_allocator(self);
- Box::from_raw_in(raw as *mut T, alloc)
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: fmt::Display + ?Sized, A: Allocator> fmt::Display for Box<T, A> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(&**self, f)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: fmt::Debug + ?Sized, A: Allocator> fmt::Debug for Box<T, A> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Debug::fmt(&**self, f)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized, A: Allocator> fmt::Pointer for Box<T, A> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- // It's not possible to extract the inner Uniq directly from the Box,
- // instead we cast it to a *const which aliases the Unique
- let ptr: *const T = &**self;
- fmt::Pointer::fmt(&ptr, f)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized, A: Allocator> Deref for Box<T, A> {
- type Target = T;
-
- fn deref(&self) -> &T {
- &**self
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized, A: Allocator> DerefMut for Box<T, A> {
- fn deref_mut(&mut self) -> &mut T {
- &mut **self
- }
-}
-
-#[unstable(feature = "receiver_trait", issue = "none")]
-impl<T: ?Sized, A: Allocator> Receiver for Box<T, A> {}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<I: Iterator + ?Sized, A: Allocator> Iterator for Box<I, A> {
- type Item = I::Item;
- fn next(&mut self) -> Option<I::Item> {
- (**self).next()
- }
- fn size_hint(&self) -> (usize, Option<usize>) {
- (**self).size_hint()
- }
- fn nth(&mut self, n: usize) -> Option<I::Item> {
- (**self).nth(n)
- }
- fn last(self) -> Option<I::Item> {
- BoxIter::last(self)
- }
-}
-
-trait BoxIter {
- type Item;
- fn last(self) -> Option<Self::Item>;
-}
-
-impl<I: Iterator + ?Sized, A: Allocator> BoxIter for Box<I, A> {
- type Item = I::Item;
- default fn last(self) -> Option<I::Item> {
- #[inline]
- fn some<T>(_: Option<T>, x: T) -> Option<T> {
- Some(x)
- }
-
- self.fold(None, some)
- }
-}
-
-/// Specialization for sized `I`s that uses `I`s implementation of `last()`
-/// instead of the default.
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<I: Iterator, A: Allocator> BoxIter for Box<I, A> {
- fn last(self) -> Option<I::Item> {
- (*self).last()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<I: DoubleEndedIterator + ?Sized, A: Allocator> DoubleEndedIterator for Box<I, A> {
- fn next_back(&mut self) -> Option<I::Item> {
- (**self).next_back()
- }
- fn nth_back(&mut self, n: usize) -> Option<I::Item> {
- (**self).nth_back(n)
- }
-}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<I: ExactSizeIterator + ?Sized, A: Allocator> ExactSizeIterator for Box<I, A> {
- fn len(&self) -> usize {
- (**self).len()
- }
- fn is_empty(&self) -> bool {
- (**self).is_empty()
- }
-}
-
-#[stable(feature = "fused", since = "1.26.0")]
-impl<I: FusedIterator + ?Sized, A: Allocator> FusedIterator for Box<I, A> {}
-
-#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args: Tuple, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
- type Output = <F as FnOnce<Args>>::Output;
-
- extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
- <F as FnOnce<Args>>::call_once(*self, args)
- }
-}
-
-#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args: Tuple, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
- extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
- <F as FnMut<Args>>::call_mut(self, args)
- }
-}
-
-#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args: Tuple, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
- extern "rust-call" fn call(&self, args: Args) -> Self::Output {
- <F as Fn<Args>>::call(self, args)
- }
-}
-
-#[unstable(feature = "coerce_unsized", issue = "18598")]
-impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
-
-#[unstable(feature = "dispatch_from_dyn", issue = "none")]
-impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T, Global> {}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "boxed_slice_from_iter", since = "1.32.0")]
-impl<I> FromIterator<I> for Box<[I]> {
- fn from_iter<T: IntoIterator<Item = I>>(iter: T) -> Self {
- iter.into_iter().collect::<Vec<_>>().into_boxed_slice()
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "box_slice_clone", since = "1.3.0")]
-impl<T: Clone, A: Allocator + Clone> Clone for Box<[T], A> {
- fn clone(&self) -> Self {
- let alloc = Box::allocator(self).clone();
- self.to_vec_in(alloc).into_boxed_slice()
- }
-
- fn clone_from(&mut self, other: &Self) {
- if self.len() == other.len() {
- self.clone_from_slice(&other);
- } else {
- *self = other.clone();
- }
- }
-}
-
-#[stable(feature = "box_borrow", since = "1.1.0")]
-impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Box<T, A> {
- fn borrow(&self) -> &T {
- &**self
- }
-}
-
-#[stable(feature = "box_borrow", since = "1.1.0")]
-impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for Box<T, A> {
- fn borrow_mut(&mut self) -> &mut T {
- &mut **self
- }
-}
-
-#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
-impl<T: ?Sized, A: Allocator> AsRef<T> for Box<T, A> {
- fn as_ref(&self) -> &T {
- &**self
- }
-}
-
-#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
-impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> {
- fn as_mut(&mut self) -> &mut T {
- &mut **self
- }
-}
-
-/* Nota bene
- *
- * We could have chosen not to add this impl, and instead have written a
- * function of Pin<Box<T>> to Pin<T>. Such a function would not be sound,
- * because Box<T> implements Unpin even when T does not, as a result of
- * this impl.
- *
- * We chose this API instead of the alternative for a few reasons:
- * - Logically, it is helpful to understand pinning in regard to the
- * memory region being pointed to. For this reason none of the
- * standard library pointer types support projecting through a pin
- * (Box<T> is the only pointer type in std for which this would be
- * safe.)
- * - It is in practice very useful to have Box<T> be unconditionally
- * Unpin because of trait objects, for which the structural auto
- * trait functionality does not apply (e.g., Box<dyn Foo> would
- * otherwise not be Unpin).
- *
- * Another type with the same semantics as Box but only a conditional
- * implementation of `Unpin` (where `T: Unpin`) would be valid/safe, and
- * could have a method to project a Pin<T> from it.
- */
-#[stable(feature = "pin", since = "1.33.0")]
-impl<T: ?Sized, A: Allocator> Unpin for Box<T, A> where A: 'static {}
-
-#[unstable(feature = "coroutine_trait", issue = "43122")]
-impl<G: ?Sized + Coroutine<R> + Unpin, R, A: Allocator> Coroutine<R> for Box<G, A>
-where
- A: 'static,
-{
- type Yield = G::Yield;
- type Return = G::Return;
-
- fn resume(mut self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
- G::resume(Pin::new(&mut *self), arg)
- }
-}
-
-#[unstable(feature = "coroutine_trait", issue = "43122")]
-impl<G: ?Sized + Coroutine<R>, R, A: Allocator> Coroutine<R> for Pin<Box<G, A>>
-where
- A: 'static,
-{
- type Yield = G::Yield;
- type Return = G::Return;
-
- fn resume(mut self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
- G::resume((*self).as_mut(), arg)
- }
-}
-
-#[stable(feature = "futures_api", since = "1.36.0")]
-impl<F: ?Sized + Future + Unpin, A: Allocator> Future for Box<F, A>
-where
- A: 'static,
-{
- type Output = F::Output;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- F::poll(Pin::new(&mut *self), cx)
- }
-}
-
-#[unstable(feature = "async_iterator", issue = "79024")]
-impl<S: ?Sized + AsyncIterator + Unpin> AsyncIterator for Box<S> {
- type Item = S::Item;
-
- fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- Pin::new(&mut **self).poll_next(cx)
- }
-
- fn size_hint(&self) -> (usize, Option<usize>) {
- (**self).size_hint()
- }
-}
-
-impl dyn Error {
- #[inline]
- #[stable(feature = "error_downcast", since = "1.3.0")]
- #[rustc_allow_incoherent_impl]
- /// Attempts to downcast the box to a concrete type.
- pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error>> {
- if self.is::<T>() {
- unsafe {
- let raw: *mut dyn Error = Box::into_raw(self);
- Ok(Box::from_raw(raw as *mut T))
- }
- } else {
- Err(self)
- }
- }
-}
-
-impl dyn Error + Send {
- #[inline]
- #[stable(feature = "error_downcast", since = "1.3.0")]
- #[rustc_allow_incoherent_impl]
- /// Attempts to downcast the box to a concrete type.
- pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error + Send>> {
- let err: Box<dyn Error> = self;
- <dyn Error>::downcast(err).map_err(|s| unsafe {
- // Reapply the `Send` marker.
- Box::from_raw(Box::into_raw(s) as *mut (dyn Error + Send))
- })
- }
-}
-
-impl dyn Error + Send + Sync {
- #[inline]
- #[stable(feature = "error_downcast", since = "1.3.0")]
- #[rustc_allow_incoherent_impl]
- /// Attempts to downcast the box to a concrete type.
- pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Self>> {
- let err: Box<dyn Error> = self;
- <dyn Error>::downcast(err).map_err(|s| unsafe {
- // Reapply the `Send + Sync` marker.
- Box::from_raw(Box::into_raw(s) as *mut (dyn Error + Send + Sync))
- })
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
- /// Converts a type of [`Error`] into a box of dyn [`Error`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::fmt;
- /// use std::mem;
- ///
- /// #[derive(Debug)]
- /// struct AnError;
- ///
- /// impl fmt::Display for AnError {
- /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- /// write!(f, "An error")
- /// }
- /// }
- ///
- /// impl Error for AnError {}
- ///
- /// let an_error = AnError;
- /// assert!(0 == mem::size_of_val(&an_error));
- /// let a_boxed_error = Box::<dyn Error>::from(an_error);
- /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- fn from(err: E) -> Box<dyn Error + 'a> {
- Box::new(err)
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + 'a> {
- /// Converts a type of [`Error`] + [`Send`] + [`Sync`] into a box of
- /// dyn [`Error`] + [`Send`] + [`Sync`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::fmt;
- /// use std::mem;
- ///
- /// #[derive(Debug)]
- /// struct AnError;
- ///
- /// impl fmt::Display for AnError {
- /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- /// write!(f, "An error")
- /// }
- /// }
- ///
- /// impl Error for AnError {}
- ///
- /// unsafe impl Send for AnError {}
- ///
- /// unsafe impl Sync for AnError {}
- ///
- /// let an_error = AnError;
- /// assert!(0 == mem::size_of_val(&an_error));
- /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error);
- /// assert!(
- /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> {
- Box::new(err)
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl From<String> for Box<dyn Error + Send + Sync> {
- /// Converts a [`String`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::mem;
- ///
- /// let a_string_error = "a string error".to_string();
- /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_string_error);
- /// assert!(
- /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- #[inline]
- fn from(err: String) -> Box<dyn Error + Send + Sync> {
- struct StringError(String);
-
- impl Error for StringError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- &self.0
- }
- }
-
- impl fmt::Display for StringError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(&self.0, f)
- }
- }
-
- // Purposefully skip printing "StringError(..)"
- impl fmt::Debug for StringError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Debug::fmt(&self.0, f)
- }
- }
-
- Box::new(StringError(err))
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "string_box_error", since = "1.6.0")]
-impl From<String> for Box<dyn Error> {
- /// Converts a [`String`] into a box of dyn [`Error`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::mem;
- ///
- /// let a_string_error = "a string error".to_string();
- /// let a_boxed_error = Box::<dyn Error>::from(a_string_error);
- /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- fn from(str_err: String) -> Box<dyn Error> {
- let err1: Box<dyn Error + Send + Sync> = From::from(str_err);
- let err2: Box<dyn Error> = err1;
- err2
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> {
- /// Converts a [`str`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
- ///
- /// [`str`]: prim@str
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::mem;
- ///
- /// let a_str_error = "a str error";
- /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_str_error);
- /// assert!(
- /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- #[inline]
- fn from(err: &str) -> Box<dyn Error + Send + Sync + 'a> {
- From::from(String::from(err))
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "string_box_error", since = "1.6.0")]
-impl From<&str> for Box<dyn Error> {
- /// Converts a [`str`] into a box of dyn [`Error`].
- ///
- /// [`str`]: prim@str
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::mem;
- ///
- /// let a_str_error = "a str error";
- /// let a_boxed_error = Box::<dyn Error>::from(a_str_error);
- /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- fn from(err: &str) -> Box<dyn Error> {
- From::from(String::from(err))
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "cow_box_error", since = "1.22.0")]
-impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> {
- /// Converts a [`Cow`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::mem;
- /// use std::borrow::Cow;
- ///
- /// let a_cow_str_error = Cow::from("a str error");
- /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_cow_str_error);
- /// assert!(
- /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- fn from(err: Cow<'b, str>) -> Box<dyn Error + Send + Sync + 'a> {
- From::from(String::from(err))
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "cow_box_error", since = "1.22.0")]
-impl<'a> From<Cow<'a, str>> for Box<dyn Error> {
- /// Converts a [`Cow`] into a box of dyn [`Error`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::mem;
- /// use std::borrow::Cow;
- ///
- /// let a_cow_str_error = Cow::from("a str error");
- /// let a_boxed_error = Box::<dyn Error>::from(a_cow_str_error);
- /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- fn from(err: Cow<'a, str>) -> Box<dyn Error> {
- From::from(String::from(err))
- }
-}
-
-#[stable(feature = "box_error", since = "1.8.0")]
-impl<T: core::error::Error> core::error::Error for Box<T> {
- #[allow(deprecated, deprecated_in_future)]
- fn description(&self) -> &str {
- core::error::Error::description(&**self)
- }
-
- #[allow(deprecated)]
- fn cause(&self) -> Option<&dyn core::error::Error> {
- core::error::Error::cause(&**self)
- }
-
- fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
- core::error::Error::source(&**self)
- }
-
- fn provide<'b>(&'b self, request: &mut core::error::Request<'b>) {
- core::error::Error::provide(&**self, request);
- }
-}
diff --git a/rust/alloc/collections/mod.rs b/rust/alloc/collections/mod.rs
deleted file mode 100644
index 00ffb3b97365..000000000000
--- a/rust/alloc/collections/mod.rs
+++ /dev/null
@@ -1,160 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-//! Collection types.
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-#[cfg(not(no_global_oom_handling))]
-pub mod binary_heap;
-#[cfg(not(no_global_oom_handling))]
-mod btree;
-#[cfg(not(no_global_oom_handling))]
-pub mod linked_list;
-#[cfg(not(no_global_oom_handling))]
-pub mod vec_deque;
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub mod btree_map {
- //! An ordered map based on a B-Tree.
- #[stable(feature = "rust1", since = "1.0.0")]
- pub use super::btree::map::*;
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub mod btree_set {
- //! An ordered set based on a B-Tree.
- #[stable(feature = "rust1", since = "1.0.0")]
- pub use super::btree::set::*;
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[doc(no_inline)]
-pub use binary_heap::BinaryHeap;
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[doc(no_inline)]
-pub use btree_map::BTreeMap;
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[doc(no_inline)]
-pub use btree_set::BTreeSet;
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[doc(no_inline)]
-pub use linked_list::LinkedList;
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[doc(no_inline)]
-pub use vec_deque::VecDeque;
-
-use crate::alloc::{Layout, LayoutError};
-use core::fmt::Display;
-
-/// The error type for `try_reserve` methods.
-#[derive(Clone, PartialEq, Eq, Debug)]
-#[stable(feature = "try_reserve", since = "1.57.0")]
-pub struct TryReserveError {
- kind: TryReserveErrorKind,
-}
-
-impl TryReserveError {
- /// Details about the allocation that caused the error
- #[inline]
- #[must_use]
- #[unstable(
- feature = "try_reserve_kind",
- reason = "Uncertain how much info should be exposed",
- issue = "48043"
- )]
- pub fn kind(&self) -> TryReserveErrorKind {
- self.kind.clone()
- }
-}
-
-/// Details of the allocation that caused a `TryReserveError`
-#[derive(Clone, PartialEq, Eq, Debug)]
-#[unstable(
- feature = "try_reserve_kind",
- reason = "Uncertain how much info should be exposed",
- issue = "48043"
-)]
-pub enum TryReserveErrorKind {
- /// Error due to the computed capacity exceeding the collection's maximum
- /// (usually `isize::MAX` bytes).
- CapacityOverflow,
-
- /// The memory allocator returned an error
- AllocError {
- /// The layout of allocation request that failed
- layout: Layout,
-
- #[doc(hidden)]
- #[unstable(
- feature = "container_error_extra",
- issue = "none",
- reason = "\
- Enable exposing the allocator’s custom error value \
- if an associated type is added in the future: \
- https://github.com/rust-lang/wg-allocators/issues/23"
- )]
- non_exhaustive: (),
- },
-}
-
-#[unstable(
- feature = "try_reserve_kind",
- reason = "Uncertain how much info should be exposed",
- issue = "48043"
-)]
-impl From<TryReserveErrorKind> for TryReserveError {
- #[inline]
- fn from(kind: TryReserveErrorKind) -> Self {
- Self { kind }
- }
-}
-
-#[unstable(feature = "try_reserve_kind", reason = "new API", issue = "48043")]
-impl From<LayoutError> for TryReserveErrorKind {
- /// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
- #[inline]
- fn from(_: LayoutError) -> Self {
- TryReserveErrorKind::CapacityOverflow
- }
-}
-
-#[stable(feature = "try_reserve", since = "1.57.0")]
-impl Display for TryReserveError {
- fn fmt(
- &self,
- fmt: &mut core::fmt::Formatter<'_>,
- ) -> core::result::Result<(), core::fmt::Error> {
- fmt.write_str("memory allocation failed")?;
- let reason = match self.kind {
- TryReserveErrorKind::CapacityOverflow => {
- " because the computed capacity exceeded the collection's maximum"
- }
- TryReserveErrorKind::AllocError { .. } => {
- " because the memory allocator returned an error"
- }
- };
- fmt.write_str(reason)
- }
-}
-
-/// An intermediate trait for specialization of `Extend`.
-#[doc(hidden)]
-#[cfg(not(no_global_oom_handling))]
-trait SpecExtend<I: IntoIterator> {
- /// Extends `self` with the contents of the given iterator.
- fn spec_extend(&mut self, iter: I);
-}
-
-#[stable(feature = "try_reserve", since = "1.57.0")]
-impl core::error::Error for TryReserveError {}
diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs
deleted file mode 100644
index 36f79c075593..000000000000
--- a/rust/alloc/lib.rs
+++ /dev/null
@@ -1,288 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-//! # The Rust core allocation and collections library
-//!
-//! This library provides smart pointers and collections for managing
-//! heap-allocated values.
-//!
-//! This library, like core, normally doesn’t need to be used directly
-//! since its contents are re-exported in the [`std` crate](../std/index.html).
-//! Crates that use the `#![no_std]` attribute however will typically
-//! not depend on `std`, so they’d use this crate instead.
-//!
-//! ## Boxed values
-//!
-//! The [`Box`] type is a smart pointer type. There can only be one owner of a
-//! [`Box`], and the owner can decide to mutate the contents, which live on the
-//! heap.
-//!
-//! This type can be sent among threads efficiently as the size of a `Box` value
-//! is the same as that of a pointer. Tree-like data structures are often built
-//! with boxes because each node often has only one owner, the parent.
-//!
-//! ## Reference counted pointers
-//!
-//! The [`Rc`] type is a non-threadsafe reference-counted pointer type intended
-//! for sharing memory within a thread. An [`Rc`] pointer wraps a type, `T`, and
-//! only allows access to `&T`, a shared reference.
-//!
-//! This type is useful when inherited mutability (such as using [`Box`]) is too
-//! constraining for an application, and is often paired with the [`Cell`] or
-//! [`RefCell`] types in order to allow mutation.
-//!
-//! ## Atomically reference counted pointers
-//!
-//! The [`Arc`] type is the threadsafe equivalent of the [`Rc`] type. It
-//! provides all the same functionality of [`Rc`], except it requires that the
-//! contained type `T` is shareable. Additionally, [`Arc<T>`][`Arc`] is itself
-//! sendable while [`Rc<T>`][`Rc`] is not.
-//!
-//! This type allows for shared access to the contained data, and is often
-//! paired with synchronization primitives such as mutexes to allow mutation of
-//! shared resources.
-//!
-//! ## Collections
-//!
-//! Implementations of the most common general purpose data structures are
-//! defined in this library. They are re-exported through the
-//! [standard collections library](../std/collections/index.html).
-//!
-//! ## Heap interfaces
-//!
-//! The [`alloc`](alloc/index.html) module defines the low-level interface to the
-//! default global allocator. It is not compatible with the libc allocator API.
-//!
-//! [`Arc`]: sync
-//! [`Box`]: boxed
-//! [`Cell`]: core::cell
-//! [`Rc`]: rc
-//! [`RefCell`]: core::cell
-
-// To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be
-// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
-// rustc itself never sets the feature, so this line has no effect there.
-#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
-//
-#![allow(unused_attributes)]
-#![stable(feature = "alloc", since = "1.36.0")]
-#![doc(
- html_playground_url = "https://play.rust-lang.org/",
- issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
- test(no_crate_inject, attr(allow(unused_variables), deny(warnings)))
-)]
-#![doc(cfg_hide(
- not(test),
- not(any(test, bootstrap)),
- any(not(feature = "miri-test-libstd"), test, doctest),
- no_global_oom_handling,
- not(no_global_oom_handling),
- not(no_rc),
- not(no_sync),
- target_has_atomic = "ptr"
-))]
-#![doc(rust_logo)]
-#![feature(rustdoc_internals)]
-#![no_std]
-#![needs_allocator]
-// Lints:
-#![deny(unsafe_op_in_unsafe_fn)]
-#![deny(fuzzy_provenance_casts)]
-#![warn(deprecated_in_future)]
-#![warn(missing_debug_implementations)]
-#![warn(missing_docs)]
-#![allow(explicit_outlives_requirements)]
-#![warn(multiple_supertrait_upcastable)]
-#![allow(internal_features)]
-#![allow(rustdoc::redundant_explicit_links)]
-//
-// Library features:
-// tidy-alphabetical-start
-#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
-#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
-#![cfg_attr(test, feature(is_sorted))]
-#![cfg_attr(test, feature(new_uninit))]
-#![feature(alloc_layout_extra)]
-#![feature(allocator_api)]
-#![feature(array_chunks)]
-#![feature(array_into_iter_constructors)]
-#![feature(array_methods)]
-#![feature(array_windows)]
-#![feature(ascii_char)]
-#![feature(assert_matches)]
-#![feature(async_iterator)]
-#![feature(coerce_unsized)]
-#![feature(const_align_of_val)]
-#![feature(const_box)]
-#![cfg_attr(not(no_borrow), feature(const_cow_is_borrowed))]
-#![feature(const_eval_select)]
-#![feature(const_maybe_uninit_as_mut_ptr)]
-#![feature(const_maybe_uninit_write)]
-#![feature(const_pin)]
-#![feature(const_refs_to_cell)]
-#![feature(const_size_of_val)]
-#![feature(const_waker)]
-#![feature(core_intrinsics)]
-#![feature(core_panic)]
-#![feature(deprecated_suggestion)]
-#![feature(dispatch_from_dyn)]
-#![feature(error_generic_member_access)]
-#![feature(error_in_core)]
-#![feature(exact_size_is_empty)]
-#![feature(extend_one)]
-#![feature(fmt_internals)]
-#![feature(fn_traits)]
-#![feature(hasher_prefixfree_extras)]
-#![feature(inline_const)]
-#![feature(inplace_iteration)]
-#![feature(iter_advance_by)]
-#![feature(iter_next_chunk)]
-#![feature(iter_repeat_n)]
-#![feature(layout_for_ptr)]
-#![feature(maybe_uninit_slice)]
-#![feature(maybe_uninit_uninit_array)]
-#![feature(maybe_uninit_uninit_array_transpose)]
-#![feature(pattern)]
-#![feature(ptr_internals)]
-#![feature(ptr_metadata)]
-#![feature(ptr_sub_ptr)]
-#![feature(receiver_trait)]
-#![feature(set_ptr_value)]
-#![feature(sized_type_properties)]
-#![feature(slice_from_ptr_range)]
-#![feature(slice_group_by)]
-#![feature(slice_ptr_get)]
-#![feature(slice_ptr_len)]
-#![feature(slice_range)]
-#![feature(std_internals)]
-#![feature(str_internals)]
-#![feature(strict_provenance)]
-#![feature(trusted_fused)]
-#![feature(trusted_len)]
-#![feature(trusted_random_access)]
-#![feature(try_trait_v2)]
-#![feature(tuple_trait)]
-#![feature(unchecked_math)]
-#![feature(unicode_internals)]
-#![feature(unsize)]
-#![feature(utf8_chunks)]
-// tidy-alphabetical-end
-//
-// Language features:
-// tidy-alphabetical-start
-#![cfg_attr(not(test), feature(coroutine_trait))]
-#![cfg_attr(test, feature(panic_update_hook))]
-#![cfg_attr(test, feature(test))]
-#![feature(allocator_internals)]
-#![feature(allow_internal_unstable)]
-#![feature(associated_type_bounds)]
-#![feature(c_unwind)]
-#![feature(cfg_sanitize)]
-#![feature(const_mut_refs)]
-#![feature(const_precise_live_drops)]
-#![feature(const_ptr_write)]
-#![feature(const_trait_impl)]
-#![feature(const_try)]
-#![feature(dropck_eyepatch)]
-#![feature(exclusive_range_pattern)]
-#![feature(fundamental)]
-#![feature(hashmap_internals)]
-#![feature(lang_items)]
-#![feature(min_specialization)]
-#![feature(multiple_supertrait_upcastable)]
-#![feature(negative_impls)]
-#![feature(never_type)]
-#![feature(pointer_is_aligned)]
-#![feature(rustc_allow_const_fn_unstable)]
-#![feature(rustc_attrs)]
-#![feature(slice_internals)]
-#![feature(staged_api)]
-#![feature(stmt_expr_attributes)]
-#![feature(unboxed_closures)]
-#![feature(unsized_fn_params)]
-#![feature(with_negative_coherence)]
-// tidy-alphabetical-end
-//
-// Rustdoc features:
-#![feature(doc_cfg)]
-#![feature(doc_cfg_hide)]
-// Technically, this is a bug in rustdoc: rustdoc sees the documentation on `#[lang = slice_alloc]`
-// blocks is for `&[T]`, which also has documentation using this feature in `core`, and gets mad
-// that the feature-gate isn't enabled. Ideally, it wouldn't check for the feature gate for docs
-// from other crates, but since this can only appear for lang items, it doesn't seem worth fixing.
-#![feature(intra_doc_pointers)]
-
-// Allow testing this library
-#[cfg(test)]
-#[macro_use]
-extern crate std;
-#[cfg(test)]
-extern crate test;
-#[cfg(test)]
-mod testing;
-
-// Module with internal macros used by other modules (needs to be included before other modules).
-#[cfg(not(no_macros))]
-#[macro_use]
-mod macros;
-
-mod raw_vec;
-
-// Heaps provided for low-level allocation strategies
-
-pub mod alloc;
-
-// Primitive types using the heaps above
-
-// Need to conditionally define the mod from `boxed.rs` to avoid
-// duplicating the lang-items when building in test cfg; but also need
-// to allow code to have `use boxed::Box;` declarations.
-#[cfg(not(test))]
-pub mod boxed;
-#[cfg(test)]
-mod boxed {
- pub use std::boxed::Box;
-}
-#[cfg(not(no_borrow))]
-pub mod borrow;
-pub mod collections;
-#[cfg(all(not(no_rc), not(no_sync), not(no_global_oom_handling)))]
-pub mod ffi;
-#[cfg(not(no_fmt))]
-pub mod fmt;
-#[cfg(not(no_rc))]
-pub mod rc;
-pub mod slice;
-#[cfg(not(no_str))]
-pub mod str;
-#[cfg(not(no_string))]
-pub mod string;
-#[cfg(all(not(no_rc), not(no_sync), target_has_atomic = "ptr"))]
-pub mod sync;
-#[cfg(all(not(no_global_oom_handling), not(no_rc), not(no_sync), target_has_atomic = "ptr"))]
-pub mod task;
-#[cfg(test)]
-mod tests;
-pub mod vec;
-
-#[doc(hidden)]
-#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
-pub mod __export {
- pub use core::format_args;
-}
-
-#[cfg(test)]
-#[allow(dead_code)] // Not used in all configurations
-pub(crate) mod test_helpers {
- /// Copied from `std::test_helpers::test_rng`, since these tests rely on the
- /// seed not being the same for every RNG invocation too.
- pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
- use std::hash::{BuildHasher, Hash, Hasher};
- let mut hasher = std::hash::RandomState::new().build_hasher();
- std::panic::Location::caller().hash(&mut hasher);
- let hc64 = hasher.finish();
- let seed_vec =
- hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<crate::vec::Vec<u8>>();
- let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap();
- rand::SeedableRng::from_seed(seed)
- }
-}
diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs
deleted file mode 100644
index 98b6abf30af6..000000000000
--- a/rust/alloc/raw_vec.rs
+++ /dev/null
@@ -1,611 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-#![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")]
-
-use core::alloc::LayoutError;
-use core::cmp;
-use core::intrinsics;
-use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
-use core::ptr::{self, NonNull, Unique};
-use core::slice;
-
-#[cfg(not(no_global_oom_handling))]
-use crate::alloc::handle_alloc_error;
-use crate::alloc::{Allocator, Global, Layout};
-use crate::boxed::Box;
-use crate::collections::TryReserveError;
-use crate::collections::TryReserveErrorKind::*;
-
-#[cfg(test)]
-mod tests;
-
-enum AllocInit {
- /// The contents of the new memory are uninitialized.
- Uninitialized,
- /// The new memory is guaranteed to be zeroed.
- #[allow(dead_code)]
- Zeroed,
-}
-
-#[repr(transparent)]
-#[cfg_attr(target_pointer_width = "16", rustc_layout_scalar_valid_range_end(0x7fff))]
-#[cfg_attr(target_pointer_width = "32", rustc_layout_scalar_valid_range_end(0x7fff_ffff))]
-#[cfg_attr(target_pointer_width = "64", rustc_layout_scalar_valid_range_end(0x7fff_ffff_ffff_ffff))]
-struct Cap(usize);
-
-impl Cap {
- const ZERO: Cap = unsafe { Cap(0) };
-}
-
-/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
-/// a buffer of memory on the heap without having to worry about all the corner cases
-/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
-/// In particular:
-///
-/// * Produces `Unique::dangling()` on zero-sized types.
-/// * Produces `Unique::dangling()` on zero-length allocations.
-/// * Avoids freeing `Unique::dangling()`.
-/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
-/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
-/// * Guards against overflowing your length.
-/// * Calls `handle_alloc_error` for fallible allocations.
-/// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
-/// * Uses the excess returned from the allocator to use the largest available capacity.
-///
-/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
-/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
-/// to handle the actual things *stored* inside of a `RawVec`.
-///
-/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
-/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
-/// `Box<[T]>`, since `capacity()` won't yield the length.
-#[allow(missing_debug_implementations)]
-pub(crate) struct RawVec<T, A: Allocator = Global> {
- ptr: Unique<T>,
- /// Never used for ZSTs; it's `capacity()`'s responsibility to return usize::MAX in that case.
- ///
- /// # Safety
- ///
- /// `cap` must be in the `0..=isize::MAX` range.
- cap: Cap,
- alloc: A,
-}
-
-impl<T> RawVec<T, Global> {
- /// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so
- /// they cannot call `Self::new()`.
- ///
- /// If you change `RawVec<T>::new` or dependencies, please take care to not introduce anything
- /// that would truly const-call something unstable.
- pub const NEW: Self = Self::new();
-
- /// Creates the biggest possible `RawVec` (on the system heap)
- /// without allocating. If `T` has positive size, then this makes a
- /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
- /// `RawVec` with capacity `usize::MAX`. Useful for implementing
- /// delayed allocation.
- #[must_use]
- pub const fn new() -> Self {
- Self::new_in(Global)
- }
-
- /// Creates a `RawVec` (on the system heap) with exactly the
- /// capacity and alignment requirements for a `[T; capacity]`. This is
- /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
- /// zero-sized. Note that if `T` is zero-sized this means you will
- /// *not* get a `RawVec` with the requested capacity.
- ///
- /// # Panics
- ///
- /// Panics if the requested capacity exceeds `isize::MAX` bytes.
- ///
- /// # Aborts
- ///
- /// Aborts on OOM.
- #[cfg(not(any(no_global_oom_handling, test)))]
- #[must_use]
- #[inline]
- pub fn with_capacity(capacity: usize) -> Self {
- Self::with_capacity_in(capacity, Global)
- }
-
- /// Like `with_capacity`, but guarantees the buffer is zeroed.
- #[cfg(not(any(no_global_oom_handling, test)))]
- #[must_use]
- #[inline]
- pub fn with_capacity_zeroed(capacity: usize) -> Self {
- Self::with_capacity_zeroed_in(capacity, Global)
- }
-}
-
-impl<T, A: Allocator> RawVec<T, A> {
- // Tiny Vecs are dumb. Skip to:
- // - 8 if the element size is 1, because any heap allocators is likely
- // to round up a request of less than 8 bytes to at least 8 bytes.
- // - 4 if elements are moderate-sized (<= 1 KiB).
- // - 1 otherwise, to avoid wasting too much space for very short Vecs.
- pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
- 8
- } else if mem::size_of::<T>() <= 1024 {
- 4
- } else {
- 1
- };
-
- /// Like `new`, but parameterized over the choice of allocator for
- /// the returned `RawVec`.
- pub const fn new_in(alloc: A) -> Self {
- // `cap: 0` means "unallocated". zero-sized types are ignored.
- Self { ptr: Unique::dangling(), cap: Cap::ZERO, alloc }
- }
-
- /// Like `with_capacity`, but parameterized over the choice of
- /// allocator for the returned `RawVec`.
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
- Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
- }
-
- /// Like `try_with_capacity`, but parameterized over the choice of
- /// allocator for the returned `RawVec`.
- #[inline]
- pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
- Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc)
- }
-
- /// Like `with_capacity_zeroed`, but parameterized over the choice
- /// of allocator for the returned `RawVec`.
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
- Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
- }
-
- /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
- ///
- /// Note that this will correctly reconstitute any `cap` changes
- /// that may have been performed. (See description of type for details.)
- ///
- /// # Safety
- ///
- /// * `len` must be greater than or equal to the most recently requested capacity, and
- /// * `len` must be less than or equal to `self.capacity()`.
- ///
- /// Note, that the requested capacity and `self.capacity()` could differ, as
- /// an allocator could overallocate and return a greater memory block than requested.
- pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
- // Sanity-check one half of the safety requirement (we cannot check the other half).
- debug_assert!(
- len <= self.capacity(),
- "`len` must be smaller than or equal to `self.capacity()`"
- );
-
- let me = ManuallyDrop::new(self);
- unsafe {
- let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
- Box::from_raw_in(slice, ptr::read(&me.alloc))
- }
- }
-
- #[cfg(not(no_global_oom_handling))]
- fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
- // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
- if T::IS_ZST || capacity == 0 {
- Self::new_in(alloc)
- } else {
- // We avoid `unwrap_or_else` here because it bloats the amount of
- // LLVM IR generated.
- let layout = match Layout::array::<T>(capacity) {
- Ok(layout) => layout,
- Err(_) => capacity_overflow(),
- };
- match alloc_guard(layout.size()) {
- Ok(_) => {}
- Err(_) => capacity_overflow(),
- }
- let result = match init {
- AllocInit::Uninitialized => alloc.allocate(layout),
- AllocInit::Zeroed => alloc.allocate_zeroed(layout),
- };
- let ptr = match result {
- Ok(ptr) => ptr,
- Err(_) => handle_alloc_error(layout),
- };
-
- // Allocators currently return a `NonNull<[u8]>` whose length
- // matches the size requested. If that ever changes, the capacity
- // here should change to `ptr.len() / mem::size_of::<T>()`.
- Self {
- ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
- cap: unsafe { Cap(capacity) },
- alloc,
- }
- }
- }
-
- fn try_allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Result<Self, TryReserveError> {
- // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
- if T::IS_ZST || capacity == 0 {
- return Ok(Self::new_in(alloc));
- }
-
- let layout = Layout::array::<T>(capacity).map_err(|_| CapacityOverflow)?;
- alloc_guard(layout.size())?;
- let result = match init {
- AllocInit::Uninitialized => alloc.allocate(layout),
- AllocInit::Zeroed => alloc.allocate_zeroed(layout),
- };
- let ptr = result.map_err(|_| AllocError { layout, non_exhaustive: () })?;
-
- // Allocators currently return a `NonNull<[u8]>` whose length
- // matches the size requested. If that ever changes, the capacity
- // here should change to `ptr.len() / mem::size_of::<T>()`.
- Ok(Self {
- ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
- cap: unsafe { Cap(capacity) },
- alloc,
- })
- }
-
- /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
- ///
- /// # Safety
- ///
- /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
- /// `capacity`.
- /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
- /// systems). For ZSTs capacity is ignored.
- /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
- /// guaranteed.
- #[inline]
- pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
- let cap = if T::IS_ZST { Cap::ZERO } else { unsafe { Cap(capacity) } };
- Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc }
- }
-
- /// Gets a raw pointer to the start of the allocation. Note that this is
- /// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
- /// be careful.
- #[inline]
- pub fn ptr(&self) -> *mut T {
- self.ptr.as_ptr()
- }
-
- /// Gets the capacity of the allocation.
- ///
- /// This will always be `usize::MAX` if `T` is zero-sized.
- #[inline(always)]
- pub fn capacity(&self) -> usize {
- if T::IS_ZST { usize::MAX } else { self.cap.0 }
- }
-
- /// Returns a shared reference to the allocator backing this `RawVec`.
- pub fn allocator(&self) -> &A {
- &self.alloc
- }
-
- fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
- if T::IS_ZST || self.cap.0 == 0 {
- None
- } else {
- // We could use Layout::array here which ensures the absence of isize and usize overflows
- // and could hypothetically handle differences between stride and size, but this memory
- // has already been allocated so we know it can't overflow and currently rust does not
- // support such types. So we can do better by skipping some checks and avoid an unwrap.
- let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
- unsafe {
- let align = mem::align_of::<T>();
- let size = mem::size_of::<T>().unchecked_mul(self.cap.0);
- let layout = Layout::from_size_align_unchecked(size, align);
- Some((self.ptr.cast().into(), layout))
- }
- }
- }
-
- /// Ensures that the buffer contains at least enough space to hold `len +
- /// additional` elements. If it doesn't already have enough capacity, will
- /// reallocate enough space plus comfortable slack space to get amortized
- /// *O*(1) behavior. Will limit this behavior if it would needlessly cause
- /// itself to panic.
- ///
- /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
- /// the requested space. This is not really unsafe, but the unsafe
- /// code *you* write that relies on the behavior of this function may break.
- ///
- /// This is ideal for implementing a bulk-push operation like `extend`.
- ///
- /// # Panics
- ///
- /// Panics if the new capacity exceeds `isize::MAX` bytes.
- ///
- /// # Aborts
- ///
- /// Aborts on OOM.
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- pub fn reserve(&mut self, len: usize, additional: usize) {
- // Callers expect this function to be very cheap when there is already sufficient capacity.
- // Therefore, we move all the resizing and error-handling logic from grow_amortized and
- // handle_reserve behind a call, while making sure that this function is likely to be
- // inlined as just a comparison and a call if the comparison fails.
- #[cold]
- fn do_reserve_and_handle<T, A: Allocator>(
- slf: &mut RawVec<T, A>,
- len: usize,
- additional: usize,
- ) {
- handle_reserve(slf.grow_amortized(len, additional));
- }
-
- if self.needs_to_grow(len, additional) {
- do_reserve_and_handle(self, len, additional);
- }
- }
-
- /// A specialized version of `reserve()` used only by the hot and
- /// oft-instantiated `Vec::push()`, which does its own capacity check.
- #[cfg(not(no_global_oom_handling))]
- #[inline(never)]
- pub fn reserve_for_push(&mut self, len: usize) {
- handle_reserve(self.grow_amortized(len, 1));
- }
-
- /// The same as `reserve`, but returns on errors instead of panicking or aborting.
- pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
- if self.needs_to_grow(len, additional) {
- self.grow_amortized(len, additional)?;
- }
- unsafe {
- // Inform the optimizer that the reservation has succeeded or wasn't needed
- core::intrinsics::assume(!self.needs_to_grow(len, additional));
- }
- Ok(())
- }
-
- /// The same as `reserve_for_push`, but returns on errors instead of panicking or aborting.
- #[inline(never)]
- pub fn try_reserve_for_push(&mut self, len: usize) -> Result<(), TryReserveError> {
- self.grow_amortized(len, 1)
- }
-
- /// Ensures that the buffer contains at least enough space to hold `len +
- /// additional` elements. If it doesn't already, will reallocate the
- /// minimum possible amount of memory necessary. Generally this will be
- /// exactly the amount of memory necessary, but in principle the allocator
- /// is free to give back more than we asked for.
- ///
- /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
- /// the requested space. This is not really unsafe, but the unsafe code
- /// *you* write that relies on the behavior of this function may break.
- ///
- /// # Panics
- ///
- /// Panics if the new capacity exceeds `isize::MAX` bytes.
- ///
- /// # Aborts
- ///
- /// Aborts on OOM.
- #[cfg(not(no_global_oom_handling))]
- pub fn reserve_exact(&mut self, len: usize, additional: usize) {
- handle_reserve(self.try_reserve_exact(len, additional));
- }
-
- /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
- pub fn try_reserve_exact(
- &mut self,
- len: usize,
- additional: usize,
- ) -> Result<(), TryReserveError> {
- if self.needs_to_grow(len, additional) {
- self.grow_exact(len, additional)?;
- }
- unsafe {
- // Inform the optimizer that the reservation has succeeded or wasn't needed
- core::intrinsics::assume(!self.needs_to_grow(len, additional));
- }
- Ok(())
- }
-
- /// Shrinks the buffer down to the specified capacity. If the given amount
- /// is 0, actually completely deallocates.
- ///
- /// # Panics
- ///
- /// Panics if the given amount is *larger* than the current capacity.
- ///
- /// # Aborts
- ///
- /// Aborts on OOM.
- #[cfg(not(no_global_oom_handling))]
- pub fn shrink_to_fit(&mut self, cap: usize) {
- handle_reserve(self.shrink(cap));
- }
-}
-
-impl<T, A: Allocator> RawVec<T, A> {
- /// Returns if the buffer needs to grow to fulfill the needed extra capacity.
- /// Mainly used to make inlining reserve-calls possible without inlining `grow`.
- fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
- additional > self.capacity().wrapping_sub(len)
- }
-
- /// # Safety:
- ///
- /// `cap` must not exceed `isize::MAX`.
- unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
- // Allocators currently return a `NonNull<[u8]>` whose length matches
- // the size requested. If that ever changes, the capacity here should
- // change to `ptr.len() / mem::size_of::<T>()`.
- self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) };
- self.cap = unsafe { Cap(cap) };
- }
-
- // This method is usually instantiated many times. So we want it to be as
- // small as possible, to improve compile times. But we also want as much of
- // its contents to be statically computable as possible, to make the
- // generated code run faster. Therefore, this method is carefully written
- // so that all of the code that depends on `T` is within it, while as much
- // of the code that doesn't depend on `T` as possible is in functions that
- // are non-generic over `T`.
- fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
- // This is ensured by the calling contexts.
- debug_assert!(additional > 0);
-
- if T::IS_ZST {
- // Since we return a capacity of `usize::MAX` when `elem_size` is
- // 0, getting to here necessarily means the `RawVec` is overfull.
- return Err(CapacityOverflow.into());
- }
-
- // Nothing we can really do about these checks, sadly.
- let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
-
- // This guarantees exponential growth. The doubling cannot overflow
- // because `cap <= isize::MAX` and the type of `cap` is `usize`.
- let cap = cmp::max(self.cap.0 * 2, required_cap);
- let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
-
- let new_layout = Layout::array::<T>(cap);
-
- // `finish_grow` is non-generic over `T`.
- let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
- // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
- unsafe { self.set_ptr_and_cap(ptr, cap) };
- Ok(())
- }
-
- // The constraints on this method are much the same as those on
- // `grow_amortized`, but this method is usually instantiated less often so
- // it's less critical.
- fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
- if T::IS_ZST {
- // Since we return a capacity of `usize::MAX` when the type size is
- // 0, getting to here necessarily means the `RawVec` is overfull.
- return Err(CapacityOverflow.into());
- }
-
- let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
- let new_layout = Layout::array::<T>(cap);
-
- // `finish_grow` is non-generic over `T`.
- let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
- // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
- unsafe {
- self.set_ptr_and_cap(ptr, cap);
- }
- Ok(())
- }
-
- #[cfg(not(no_global_oom_handling))]
- fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
- assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity");
-
- let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
- // See current_memory() why this assert is here
- let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
-
- // If shrinking to 0, deallocate the buffer. We don't reach this point
- // for the T::IS_ZST case since current_memory() will have returned
- // None.
- if cap == 0 {
- unsafe { self.alloc.deallocate(ptr, layout) };
- self.ptr = Unique::dangling();
- self.cap = Cap::ZERO;
- } else {
- let ptr = unsafe {
- // `Layout::array` cannot overflow here because it would have
- // overflowed earlier when capacity was larger.
- let new_size = mem::size_of::<T>().unchecked_mul(cap);
- let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
- self.alloc
- .shrink(ptr, layout, new_layout)
- .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
- };
- // SAFETY: if the allocation is valid, then the capacity is too
- unsafe {
- self.set_ptr_and_cap(ptr, cap);
- }
- }
- Ok(())
- }
-}
-
-// This function is outside `RawVec` to minimize compile times. See the comment
-// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
-// significant, because the number of different `A` types seen in practice is
-// much smaller than the number of `T` types.)
-#[inline(never)]
-fn finish_grow<A>(
- new_layout: Result<Layout, LayoutError>,
- current_memory: Option<(NonNull<u8>, Layout)>,
- alloc: &mut A,
-) -> Result<NonNull<[u8]>, TryReserveError>
-where
- A: Allocator,
-{
- // Check for the error here to minimize the size of `RawVec::grow_*`.
- let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
-
- alloc_guard(new_layout.size())?;
-
- let memory = if let Some((ptr, old_layout)) = current_memory {
- debug_assert_eq!(old_layout.align(), new_layout.align());
- unsafe {
- // The allocator checks for alignment equality
- intrinsics::assume(old_layout.align() == new_layout.align());
- alloc.grow(ptr, old_layout, new_layout)
- }
- } else {
- alloc.allocate(new_layout)
- };
-
- memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
-}
-
-unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
- /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
- fn drop(&mut self) {
- if let Some((ptr, layout)) = self.current_memory() {
- unsafe { self.alloc.deallocate(ptr, layout) }
- }
- }
-}
-
-// Central function for reserve error handling.
-#[cfg(not(no_global_oom_handling))]
-#[inline]
-fn handle_reserve(result: Result<(), TryReserveError>) {
- match result.map_err(|e| e.kind()) {
- Err(CapacityOverflow) => capacity_overflow(),
- Err(AllocError { layout, .. }) => handle_alloc_error(layout),
- Ok(()) => { /* yay */ }
- }
-}
-
-// We need to guarantee the following:
-// * We don't ever allocate `> isize::MAX` byte-size objects.
-// * We don't overflow `usize::MAX` and actually allocate too little.
-//
-// On 64-bit we just need to check for overflow since trying to allocate
-// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
-// an extra guard for this in case we're running on a platform which can use
-// all 4GB in user-space, e.g., PAE or x32.
-
-#[inline]
-fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
- if usize::BITS < 64 && alloc_size > isize::MAX as usize {
- Err(CapacityOverflow.into())
- } else {
- Ok(())
- }
-}
-
-// One central function responsible for reporting capacity overflows. This'll
-// ensure that the code generation related to these panics is minimal as there's
-// only one location which panics rather than a bunch throughout the module.
-#[cfg(not(no_global_oom_handling))]
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
-fn capacity_overflow() -> ! {
- panic!("capacity overflow");
-}
diff --git a/rust/alloc/slice.rs b/rust/alloc/slice.rs
deleted file mode 100644
index 1181836da5f4..000000000000
--- a/rust/alloc/slice.rs
+++ /dev/null
@@ -1,890 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-//! Utilities for the slice primitive type.
-//!
-//! *[See also the slice primitive type](slice).*
-//!
-//! Most of the structs in this module are iterator types which can only be created
-//! using a certain function. For example, `slice.iter()` yields an [`Iter`].
-//!
-//! A few functions are provided to create a slice from a value reference
-//! or from a raw pointer.
-#![stable(feature = "rust1", since = "1.0.0")]
-// Many of the usings in this module are only used in the test configuration.
-// It's cleaner to just turn off the unused_imports warning than to fix them.
-#![cfg_attr(test, allow(unused_imports, dead_code))]
-
-use core::borrow::{Borrow, BorrowMut};
-#[cfg(not(no_global_oom_handling))]
-use core::cmp::Ordering::{self, Less};
-#[cfg(not(no_global_oom_handling))]
-use core::mem::{self, SizedTypeProperties};
-#[cfg(not(no_global_oom_handling))]
-use core::ptr;
-#[cfg(not(no_global_oom_handling))]
-use core::slice::sort;
-
-use crate::alloc::Allocator;
-#[cfg(not(no_global_oom_handling))]
-use crate::alloc::{self, Global};
-#[cfg(not(no_global_oom_handling))]
-use crate::borrow::ToOwned;
-use crate::boxed::Box;
-use crate::vec::Vec;
-
-#[cfg(test)]
-mod tests;
-
-#[unstable(feature = "slice_range", issue = "76393")]
-pub use core::slice::range;
-#[unstable(feature = "array_chunks", issue = "74985")]
-pub use core::slice::ArrayChunks;
-#[unstable(feature = "array_chunks", issue = "74985")]
-pub use core::slice::ArrayChunksMut;
-#[unstable(feature = "array_windows", issue = "75027")]
-pub use core::slice::ArrayWindows;
-#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
-pub use core::slice::EscapeAscii;
-#[stable(feature = "slice_get_slice", since = "1.28.0")]
-pub use core::slice::SliceIndex;
-#[stable(feature = "from_ref", since = "1.28.0")]
-pub use core::slice::{from_mut, from_ref};
-#[unstable(feature = "slice_from_ptr_range", issue = "89792")]
-pub use core::slice::{from_mut_ptr_range, from_ptr_range};
-#[stable(feature = "rust1", since = "1.0.0")]
-pub use core::slice::{from_raw_parts, from_raw_parts_mut};
-#[stable(feature = "rust1", since = "1.0.0")]
-pub use core::slice::{Chunks, Windows};
-#[stable(feature = "chunks_exact", since = "1.31.0")]
-pub use core::slice::{ChunksExact, ChunksExactMut};
-#[stable(feature = "rust1", since = "1.0.0")]
-pub use core::slice::{ChunksMut, Split, SplitMut};
-#[unstable(feature = "slice_group_by", issue = "80552")]
-pub use core::slice::{GroupBy, GroupByMut};
-#[stable(feature = "rust1", since = "1.0.0")]
-pub use core::slice::{Iter, IterMut};
-#[stable(feature = "rchunks", since = "1.31.0")]
-pub use core::slice::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
-#[stable(feature = "slice_rsplit", since = "1.27.0")]
-pub use core::slice::{RSplit, RSplitMut};
-#[stable(feature = "rust1", since = "1.0.0")]
-pub use core::slice::{RSplitN, RSplitNMut, SplitN, SplitNMut};
-#[stable(feature = "split_inclusive", since = "1.51.0")]
-pub use core::slice::{SplitInclusive, SplitInclusiveMut};
-
-////////////////////////////////////////////////////////////////////////////////
-// Basic slice extension methods
-////////////////////////////////////////////////////////////////////////////////
-
-// HACK(japaric) needed for the implementation of `vec!` macro during testing
-// N.B., see the `hack` module in this file for more details.
-#[cfg(test)]
-pub use hack::into_vec;
-
-// HACK(japaric) needed for the implementation of `Vec::clone` during testing
-// N.B., see the `hack` module in this file for more details.
-#[cfg(test)]
-pub use hack::to_vec;
-
-// HACK(japaric): With cfg(test) `impl [T]` is not available, these three
-// functions are actually methods that are in `impl [T]` but not in
-// `core::slice::SliceExt` - we need to supply these functions for the
-// `test_permutations` test
-pub(crate) mod hack {
- use core::alloc::Allocator;
-
- use crate::boxed::Box;
- use crate::vec::Vec;
-
- // We shouldn't add inline attribute to this since this is used in
- // `vec!` macro mostly and causes perf regression. See #71204 for
- // discussion and perf results.
- pub fn into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A> {
- unsafe {
- let len = b.len();
- let (b, alloc) = Box::into_raw_with_allocator(b);
- Vec::from_raw_parts_in(b as *mut T, len, len, alloc)
- }
- }
-
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- pub fn to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A> {
- T::to_vec(s, alloc)
- }
-
- #[cfg(not(no_global_oom_handling))]
- pub trait ConvertVec {
- fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A>
- where
- Self: Sized;
- }
-
- #[cfg(not(no_global_oom_handling))]
- impl<T: Clone> ConvertVec for T {
- #[inline]
- default fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
- struct DropGuard<'a, T, A: Allocator> {
- vec: &'a mut Vec<T, A>,
- num_init: usize,
- }
- impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
- #[inline]
- fn drop(&mut self) {
- // SAFETY:
- // items were marked initialized in the loop below
- unsafe {
- self.vec.set_len(self.num_init);
- }
- }
- }
- let mut vec = Vec::with_capacity_in(s.len(), alloc);
- let mut guard = DropGuard { vec: &mut vec, num_init: 0 };
- let slots = guard.vec.spare_capacity_mut();
- // .take(slots.len()) is necessary for LLVM to remove bounds checks
- // and has better codegen than zip.
- for (i, b) in s.iter().enumerate().take(slots.len()) {
- guard.num_init = i;
- slots[i].write(b.clone());
- }
- core::mem::forget(guard);
- // SAFETY:
- // the vec was allocated and initialized above to at least this length.
- unsafe {
- vec.set_len(s.len());
- }
- vec
- }
- }
-
- #[cfg(not(no_global_oom_handling))]
- impl<T: Copy> ConvertVec for T {
- #[inline]
- fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
- let mut v = Vec::with_capacity_in(s.len(), alloc);
- // SAFETY:
- // allocated above with the capacity of `s`, and initialize to `s.len()` in
- // ptr::copy_to_non_overlapping below.
- unsafe {
- s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len());
- v.set_len(s.len());
- }
- v
- }
- }
-}
-
-#[cfg(not(test))]
-impl<T> [T] {
- /// Sorts the slice.
- ///
- /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
- ///
- /// When applicable, unstable sorting is preferred because it is generally faster than stable
- /// sorting and it doesn't allocate auxiliary memory.
- /// See [`sort_unstable`](slice::sort_unstable).
- ///
- /// # Current implementation
- ///
- /// The current algorithm is an adaptive, iterative merge sort inspired by
- /// [timsort](https://en.wikipedia.org/wiki/Timsort).
- /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
- /// two or more sorted sequences concatenated one after another.
- ///
- /// Also, it allocates temporary storage half the size of `self`, but for short slices a
- /// non-allocating insertion sort is used instead.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut v = [-5, 4, 1, -3, 2];
- ///
- /// v.sort();
- /// assert!(v == [-5, -3, 1, 2, 4]);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[rustc_allow_incoherent_impl]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[inline]
- pub fn sort(&mut self)
- where
- T: Ord,
- {
- stable_sort(self, T::lt);
- }
-
- /// Sorts the slice with a comparator function.
- ///
- /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
- ///
- /// The comparator function must define a total ordering for the elements in the slice. If
- /// the ordering is not total, the order of the elements is unspecified. An order is a
- /// total order if it is (for all `a`, `b` and `c`):
- ///
- /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
- /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
- ///
- /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
- /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
- ///
- /// ```
- /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
- /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
- /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
- /// ```
- ///
- /// When applicable, unstable sorting is preferred because it is generally faster than stable
- /// sorting and it doesn't allocate auxiliary memory.
- /// See [`sort_unstable_by`](slice::sort_unstable_by).
- ///
- /// # Current implementation
- ///
- /// The current algorithm is an adaptive, iterative merge sort inspired by
- /// [timsort](https://en.wikipedia.org/wiki/Timsort).
- /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
- /// two or more sorted sequences concatenated one after another.
- ///
- /// Also, it allocates temporary storage half the size of `self`, but for short slices a
- /// non-allocating insertion sort is used instead.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut v = [5, 4, 1, 3, 2];
- /// v.sort_by(|a, b| a.cmp(b));
- /// assert!(v == [1, 2, 3, 4, 5]);
- ///
- /// // reverse sorting
- /// v.sort_by(|a, b| b.cmp(a));
- /// assert!(v == [5, 4, 3, 2, 1]);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[rustc_allow_incoherent_impl]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[inline]
- pub fn sort_by<F>(&mut self, mut compare: F)
- where
- F: FnMut(&T, &T) -> Ordering,
- {
- stable_sort(self, |a, b| compare(a, b) == Less);
- }
-
- /// Sorts the slice with a key extraction function.
- ///
- /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
- /// worst-case, where the key function is *O*(*m*).
- ///
- /// For expensive key functions (e.g. functions that are not simple property accesses or
- /// basic operations), [`sort_by_cached_key`](slice::sort_by_cached_key) is likely to be
- /// significantly faster, as it does not recompute element keys.
- ///
- /// When applicable, unstable sorting is preferred because it is generally faster than stable
- /// sorting and it doesn't allocate auxiliary memory.
- /// See [`sort_unstable_by_key`](slice::sort_unstable_by_key).
- ///
- /// # Current implementation
- ///
- /// The current algorithm is an adaptive, iterative merge sort inspired by
- /// [timsort](https://en.wikipedia.org/wiki/Timsort).
- /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
- /// two or more sorted sequences concatenated one after another.
- ///
- /// Also, it allocates temporary storage half the size of `self`, but for short slices a
- /// non-allocating insertion sort is used instead.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut v = [-5i32, 4, 1, -3, 2];
- ///
- /// v.sort_by_key(|k| k.abs());
- /// assert!(v == [1, 2, -3, 4, -5]);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[rustc_allow_incoherent_impl]
- #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
- #[inline]
- pub fn sort_by_key<K, F>(&mut self, mut f: F)
- where
- F: FnMut(&T) -> K,
- K: Ord,
- {
- stable_sort(self, |a, b| f(a).lt(&f(b)));
- }
-
- /// Sorts the slice with a key extraction function.
- ///
- /// During sorting, the key function is called at most once per element, by using
- /// temporary storage to remember the results of key evaluation.
- /// The order of calls to the key function is unspecified and may change in future versions
- /// of the standard library.
- ///
- /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
- /// worst-case, where the key function is *O*(*m*).
- ///
- /// For simple key functions (e.g., functions that are property accesses or
- /// basic operations), [`sort_by_key`](slice::sort_by_key) is likely to be
- /// faster.
- ///
- /// # Current implementation
- ///
- /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
- /// which combines the fast average case of randomized quicksort with the fast worst case of
- /// heapsort, while achieving linear time on slices with certain patterns. It uses some
- /// randomization to avoid degenerate cases, but with a fixed seed to always provide
- /// deterministic behavior.
- ///
- /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
- /// length of the slice.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut v = [-5i32, 4, 32, -3, 2];
- ///
- /// v.sort_by_cached_key(|k| k.to_string());
- /// assert!(v == [-3, -5, 2, 32, 4]);
- /// ```
- ///
- /// [pdqsort]: https://github.com/orlp/pdqsort
- #[cfg(not(no_global_oom_handling))]
- #[rustc_allow_incoherent_impl]
- #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")]
- #[inline]
- pub fn sort_by_cached_key<K, F>(&mut self, f: F)
- where
- F: FnMut(&T) -> K,
- K: Ord,
- {
- // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
- macro_rules! sort_by_key {
- ($t:ty, $slice:ident, $f:ident) => {{
- let mut indices: Vec<_> =
- $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
- // The elements of `indices` are unique, as they are indexed, so any sort will be
- // stable with respect to the original slice. We use `sort_unstable` here because
- // it requires less memory allocation.
- indices.sort_unstable();
- for i in 0..$slice.len() {
- let mut index = indices[i].1;
- while (index as usize) < i {
- index = indices[index as usize].1;
- }
- indices[i].1 = index;
- $slice.swap(i, index as usize);
- }
- }};
- }
-
- let sz_u8 = mem::size_of::<(K, u8)>();
- let sz_u16 = mem::size_of::<(K, u16)>();
- let sz_u32 = mem::size_of::<(K, u32)>();
- let sz_usize = mem::size_of::<(K, usize)>();
-
- let len = self.len();
- if len < 2 {
- return;
- }
- if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
- return sort_by_key!(u8, self, f);
- }
- if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
- return sort_by_key!(u16, self, f);
- }
- if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
- return sort_by_key!(u32, self, f);
- }
- sort_by_key!(usize, self, f)
- }
-
- /// Copies `self` into a new `Vec`.
- ///
- /// # Examples
- ///
- /// ```
- /// let s = [10, 40, 30];
- /// let x = s.to_vec();
- /// // Here, `s` and `x` can be modified independently.
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[rustc_allow_incoherent_impl]
- #[rustc_conversion_suggestion]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[inline]
- pub fn to_vec(&self) -> Vec<T>
- where
- T: Clone,
- {
- self.to_vec_in(Global)
- }
-
- /// Copies `self` into a new `Vec` with an allocator.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api)]
- ///
- /// use std::alloc::System;
- ///
- /// let s = [10, 40, 30];
- /// let x = s.to_vec_in(System);
- /// // Here, `s` and `x` can be modified independently.
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[rustc_allow_incoherent_impl]
- #[inline]
- #[unstable(feature = "allocator_api", issue = "32838")]
- pub fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
- where
- T: Clone,
- {
- // N.B., see the `hack` module in this file for more details.
- hack::to_vec(self, alloc)
- }
-
- /// Converts `self` into a vector without clones or allocation.
- ///
- /// The resulting vector can be converted back into a box via
- /// `Vec<T>`'s `into_boxed_slice` method.
- ///
- /// # Examples
- ///
- /// ```
- /// let s: Box<[i32]> = Box::new([10, 40, 30]);
- /// let x = s.into_vec();
- /// // `s` cannot be used anymore because it has been converted into `x`.
- ///
- /// assert_eq!(x, vec![10, 40, 30]);
- /// ```
- #[rustc_allow_incoherent_impl]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[inline]
- pub fn into_vec<A: Allocator>(self: Box<Self, A>) -> Vec<T, A> {
- // N.B., see the `hack` module in this file for more details.
- hack::into_vec(self)
- }
-
- /// Creates a vector by copying a slice `n` times.
- ///
- /// # Panics
- ///
- /// This function will panic if the capacity would overflow.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
- /// ```
- ///
- /// A panic upon overflow:
- ///
- /// ```should_panic
- /// // this will panic at runtime
- /// b"0123456789abcdef".repeat(usize::MAX);
- /// ```
- #[rustc_allow_incoherent_impl]
- #[cfg(not(no_global_oom_handling))]
- #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
- pub fn repeat(&self, n: usize) -> Vec<T>
- where
- T: Copy,
- {
- if n == 0 {
- return Vec::new();
- }
-
- // If `n` is larger than zero, it can be split as
- // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
- // `2^expn` is the number represented by the leftmost '1' bit of `n`,
- // and `rem` is the remaining part of `n`.
-
- // Using `Vec` to access `set_len()`.
- let capacity = self.len().checked_mul(n).expect("capacity overflow");
- let mut buf = Vec::with_capacity(capacity);
-
- // `2^expn` repetition is done by doubling `buf` `expn`-times.
- buf.extend(self);
- {
- let mut m = n >> 1;
- // If `m > 0`, there are remaining bits up to the leftmost '1'.
- while m > 0 {
- // `buf.extend(buf)`:
- unsafe {
- ptr::copy_nonoverlapping(
- buf.as_ptr(),
- (buf.as_mut_ptr() as *mut T).add(buf.len()),
- buf.len(),
- );
- // `buf` has capacity of `self.len() * n`.
- let buf_len = buf.len();
- buf.set_len(buf_len * 2);
- }
-
- m >>= 1;
- }
- }
-
- // `rem` (`= n - 2^expn`) repetition is done by copying
- // first `rem` repetitions from `buf` itself.
- let rem_len = capacity - buf.len(); // `self.len() * rem`
- if rem_len > 0 {
- // `buf.extend(buf[0 .. rem_len])`:
- unsafe {
- // This is non-overlapping since `2^expn > rem`.
- ptr::copy_nonoverlapping(
- buf.as_ptr(),
- (buf.as_mut_ptr() as *mut T).add(buf.len()),
- rem_len,
- );
- // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
- buf.set_len(capacity);
- }
- }
- buf
- }
-
- /// Flattens a slice of `T` into a single value `Self::Output`.
- ///
- /// # Examples
- ///
- /// ```
- /// assert_eq!(["hello", "world"].concat(), "helloworld");
- /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
- /// ```
- #[rustc_allow_incoherent_impl]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output
- where
- Self: Concat<Item>,
- {
- Concat::concat(self)
- }
-
- /// Flattens a slice of `T` into a single value `Self::Output`, placing a
- /// given separator between each.
- ///
- /// # Examples
- ///
- /// ```
- /// assert_eq!(["hello", "world"].join(" "), "hello world");
- /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
- /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
- /// ```
- #[rustc_allow_incoherent_impl]
- #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
- pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
- where
- Self: Join<Separator>,
- {
- Join::join(self, sep)
- }
-
- /// Flattens a slice of `T` into a single value `Self::Output`, placing a
- /// given separator between each.
- ///
- /// # Examples
- ///
- /// ```
- /// # #![allow(deprecated)]
- /// assert_eq!(["hello", "world"].connect(" "), "hello world");
- /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
- /// ```
- #[rustc_allow_incoherent_impl]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[deprecated(since = "1.3.0", note = "renamed to join", suggestion = "join")]
- pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
- where
- Self: Join<Separator>,
- {
- Join::join(self, sep)
- }
-}
-
-#[cfg(not(test))]
-impl [u8] {
- /// Returns a vector containing a copy of this slice where each byte
- /// is mapped to its ASCII upper case equivalent.
- ///
- /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
- /// but non-ASCII letters are unchanged.
- ///
- /// To uppercase the value in-place, use [`make_ascii_uppercase`].
- ///
- /// [`make_ascii_uppercase`]: slice::make_ascii_uppercase
- #[cfg(not(no_global_oom_handling))]
- #[rustc_allow_incoherent_impl]
- #[must_use = "this returns the uppercase bytes as a new Vec, \
- without modifying the original"]
- #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
- #[inline]
- pub fn to_ascii_uppercase(&self) -> Vec<u8> {
- let mut me = self.to_vec();
- me.make_ascii_uppercase();
- me
- }
-
- /// Returns a vector containing a copy of this slice where each byte
- /// is mapped to its ASCII lower case equivalent.
- ///
- /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
- /// but non-ASCII letters are unchanged.
- ///
- /// To lowercase the value in-place, use [`make_ascii_lowercase`].
- ///
- /// [`make_ascii_lowercase`]: slice::make_ascii_lowercase
- #[cfg(not(no_global_oom_handling))]
- #[rustc_allow_incoherent_impl]
- #[must_use = "this returns the lowercase bytes as a new Vec, \
- without modifying the original"]
- #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
- #[inline]
- pub fn to_ascii_lowercase(&self) -> Vec<u8> {
- let mut me = self.to_vec();
- me.make_ascii_lowercase();
- me
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Extension traits for slices over specific kinds of data
-////////////////////////////////////////////////////////////////////////////////
-
-/// Helper trait for [`[T]::concat`](slice::concat).
-///
-/// Note: the `Item` type parameter is not used in this trait,
-/// but it allows impls to be more generic.
-/// Without it, we get this error:
-///
-/// ```error
-/// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
-/// --> library/alloc/src/slice.rs:608:6
-/// |
-/// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
-/// | ^ unconstrained type parameter
-/// ```
-///
-/// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
-/// such that multiple `T` types would apply:
-///
-/// ```
-/// # #[allow(dead_code)]
-/// pub struct Foo(Vec<u32>, Vec<String>);
-///
-/// impl std::borrow::Borrow<[u32]> for Foo {
-/// fn borrow(&self) -> &[u32] { &self.0 }
-/// }
-///
-/// impl std::borrow::Borrow<[String]> for Foo {
-/// fn borrow(&self) -> &[String] { &self.1 }
-/// }
-/// ```
-#[unstable(feature = "slice_concat_trait", issue = "27747")]
-pub trait Concat<Item: ?Sized> {
- #[unstable(feature = "slice_concat_trait", issue = "27747")]
- /// The resulting type after concatenation
- type Output;
-
- /// Implementation of [`[T]::concat`](slice::concat)
- #[unstable(feature = "slice_concat_trait", issue = "27747")]
- fn concat(slice: &Self) -> Self::Output;
-}
-
-/// Helper trait for [`[T]::join`](slice::join)
-#[unstable(feature = "slice_concat_trait", issue = "27747")]
-pub trait Join<Separator> {
- #[unstable(feature = "slice_concat_trait", issue = "27747")]
- /// The resulting type after concatenation
- type Output;
-
- /// Implementation of [`[T]::join`](slice::join)
- #[unstable(feature = "slice_concat_trait", issue = "27747")]
- fn join(slice: &Self, sep: Separator) -> Self::Output;
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[unstable(feature = "slice_concat_ext", issue = "27747")]
-impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] {
- type Output = Vec<T>;
-
- fn concat(slice: &Self) -> Vec<T> {
- let size = slice.iter().map(|slice| slice.borrow().len()).sum();
- let mut result = Vec::with_capacity(size);
- for v in slice {
- result.extend_from_slice(v.borrow())
- }
- result
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[unstable(feature = "slice_concat_ext", issue = "27747")]
-impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] {
- type Output = Vec<T>;
-
- fn join(slice: &Self, sep: &T) -> Vec<T> {
- let mut iter = slice.iter();
- let first = match iter.next() {
- Some(first) => first,
- None => return vec![],
- };
- let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1;
- let mut result = Vec::with_capacity(size);
- result.extend_from_slice(first.borrow());
-
- for v in iter {
- result.push(sep.clone());
- result.extend_from_slice(v.borrow())
- }
- result
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[unstable(feature = "slice_concat_ext", issue = "27747")]
-impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
- type Output = Vec<T>;
-
- fn join(slice: &Self, sep: &[T]) -> Vec<T> {
- let mut iter = slice.iter();
- let first = match iter.next() {
- Some(first) => first,
- None => return vec![],
- };
- let size =
- slice.iter().map(|v| v.borrow().len()).sum::<usize>() + sep.len() * (slice.len() - 1);
- let mut result = Vec::with_capacity(size);
- result.extend_from_slice(first.borrow());
-
- for v in iter {
- result.extend_from_slice(sep);
- result.extend_from_slice(v.borrow())
- }
- result
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Standard trait implementations for slices
-////////////////////////////////////////////////////////////////////////////////
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T, A: Allocator> Borrow<[T]> for Vec<T, A> {
- fn borrow(&self) -> &[T] {
- &self[..]
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> {
- fn borrow_mut(&mut self) -> &mut [T] {
- &mut self[..]
- }
-}
-
-// Specializable trait for implementing ToOwned::clone_into. This is
-// public in the crate and has the Allocator parameter so that
-// vec::clone_from use it too.
-#[cfg(not(no_global_oom_handling))]
-pub(crate) trait SpecCloneIntoVec<T, A: Allocator> {
- fn clone_into(&self, target: &mut Vec<T, A>);
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Clone, A: Allocator> SpecCloneIntoVec<T, A> for [T] {
- default fn clone_into(&self, target: &mut Vec<T, A>) {
- // drop anything in target that will not be overwritten
- target.truncate(self.len());
-
- // target.len <= self.len due to the truncate above, so the
- // slices here are always in-bounds.
- let (init, tail) = self.split_at(target.len());
-
- // reuse the contained values' allocations/resources.
- target.clone_from_slice(init);
- target.extend_from_slice(tail);
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Copy, A: Allocator> SpecCloneIntoVec<T, A> for [T] {
- fn clone_into(&self, target: &mut Vec<T, A>) {
- target.clear();
- target.extend_from_slice(self);
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Clone> ToOwned for [T] {
- type Owned = Vec<T>;
- #[cfg(not(test))]
- fn to_owned(&self) -> Vec<T> {
- self.to_vec()
- }
-
- #[cfg(test)]
- fn to_owned(&self) -> Vec<T> {
- hack::to_vec(self, Global)
- }
-
- fn clone_into(&self, target: &mut Vec<T>) {
- SpecCloneIntoVec::clone_into(self, target);
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Sorting
-////////////////////////////////////////////////////////////////////////////////
-
-#[inline]
-#[cfg(not(no_global_oom_handling))]
-fn stable_sort<T, F>(v: &mut [T], mut is_less: F)
-where
- F: FnMut(&T, &T) -> bool,
-{
- if T::IS_ZST {
- // Sorting has no meaningful behavior on zero-sized types. Do nothing.
- return;
- }
-
- let elem_alloc_fn = |len: usize| -> *mut T {
- // SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
- // v.len(). Alloc in general will only be used as 'shadow-region' to store temporary swap
- // elements.
- unsafe { alloc::alloc(alloc::Layout::array::<T>(len).unwrap_unchecked()) as *mut T }
- };
-
- let elem_dealloc_fn = |buf_ptr: *mut T, len: usize| {
- // SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
- // v.len(). The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
- // len.
- unsafe {
- alloc::dealloc(buf_ptr as *mut u8, alloc::Layout::array::<T>(len).unwrap_unchecked());
- }
- };
-
- let run_alloc_fn = |len: usize| -> *mut sort::TimSortRun {
- // SAFETY: Creating the layout is safe as long as merge_sort never calls this with an
- // obscene length or 0.
- unsafe {
- alloc::alloc(alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked())
- as *mut sort::TimSortRun
- }
- };
-
- let run_dealloc_fn = |buf_ptr: *mut sort::TimSortRun, len: usize| {
- // SAFETY: The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
- // len.
- unsafe {
- alloc::dealloc(
- buf_ptr as *mut u8,
- alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked(),
- );
- }
- };
-
- sort::merge_sort(v, &mut is_less, elem_alloc_fn, elem_dealloc_fn, run_alloc_fn, run_dealloc_fn);
-}
diff --git a/rust/alloc/vec/drain.rs b/rust/alloc/vec/drain.rs
deleted file mode 100644
index 78177a9e2ad0..000000000000
--- a/rust/alloc/vec/drain.rs
+++ /dev/null
@@ -1,255 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-use crate::alloc::{Allocator, Global};
-use core::fmt;
-use core::iter::{FusedIterator, TrustedLen};
-use core::mem::{self, ManuallyDrop, SizedTypeProperties};
-use core::ptr::{self, NonNull};
-use core::slice::{self};
-
-use super::Vec;
-
-/// A draining iterator for `Vec<T>`.
-///
-/// This `struct` is created by [`Vec::drain`].
-/// See its documentation for more.
-///
-/// # Example
-///
-/// ```
-/// let mut v = vec![0, 1, 2];
-/// let iter: std::vec::Drain<'_, _> = v.drain(..);
-/// ```
-#[stable(feature = "drain", since = "1.6.0")]
-pub struct Drain<
- 'a,
- T: 'a,
- #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global,
-> {
- /// Index of tail to preserve
- pub(super) tail_start: usize,
- /// Length of tail
- pub(super) tail_len: usize,
- /// Current remaining range to remove
- pub(super) iter: slice::Iter<'a, T>,
- pub(super) vec: NonNull<Vec<T, A>>,
-}
-
-#[stable(feature = "collection_debug", since = "1.17.0")]
-impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
- }
-}
-
-impl<'a, T, A: Allocator> Drain<'a, T, A> {
- /// Returns the remaining items of this iterator as a slice.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec!['a', 'b', 'c'];
- /// let mut drain = vec.drain(..);
- /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']);
- /// let _ = drain.next().unwrap();
- /// assert_eq!(drain.as_slice(), &['b', 'c']);
- /// ```
- #[must_use]
- #[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
- pub fn as_slice(&self) -> &[T] {
- self.iter.as_slice()
- }
-
- /// Returns a reference to the underlying allocator.
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[must_use]
- #[inline]
- pub fn allocator(&self) -> &A {
- unsafe { self.vec.as_ref().allocator() }
- }
-
- /// Keep unyielded elements in the source `Vec`.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(drain_keep_rest)]
- ///
- /// let mut vec = vec!['a', 'b', 'c'];
- /// let mut drain = vec.drain(..);
- ///
- /// assert_eq!(drain.next().unwrap(), 'a');
- ///
- /// // This call keeps 'b' and 'c' in the vec.
- /// drain.keep_rest();
- ///
- /// // If we wouldn't call `keep_rest()`,
- /// // `vec` would be empty.
- /// assert_eq!(vec, ['b', 'c']);
- /// ```
- #[unstable(feature = "drain_keep_rest", issue = "101122")]
- pub fn keep_rest(self) {
- // At this moment layout looks like this:
- //
- // [head] [yielded by next] [unyielded] [yielded by next_back] [tail]
- // ^-- start \_________/-- unyielded_len \____/-- self.tail_len
- // ^-- unyielded_ptr ^-- tail
- //
- // Normally `Drop` impl would drop [unyielded] and then move [tail] to the `start`.
- // Here we want to
- // 1. Move [unyielded] to `start`
- // 2. Move [tail] to a new start at `start + len(unyielded)`
- // 3. Update length of the original vec to `len(head) + len(unyielded) + len(tail)`
- // a. In case of ZST, this is the only thing we want to do
- // 4. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
- let mut this = ManuallyDrop::new(self);
-
- unsafe {
- let source_vec = this.vec.as_mut();
-
- let start = source_vec.len();
- let tail = this.tail_start;
-
- let unyielded_len = this.iter.len();
- let unyielded_ptr = this.iter.as_slice().as_ptr();
-
- // ZSTs have no identity, so we don't need to move them around.
- if !T::IS_ZST {
- let start_ptr = source_vec.as_mut_ptr().add(start);
-
- // memmove back unyielded elements
- if unyielded_ptr != start_ptr {
- let src = unyielded_ptr;
- let dst = start_ptr;
-
- ptr::copy(src, dst, unyielded_len);
- }
-
- // memmove back untouched tail
- if tail != (start + unyielded_len) {
- let src = source_vec.as_ptr().add(tail);
- let dst = start_ptr.add(unyielded_len);
- ptr::copy(src, dst, this.tail_len);
- }
- }
-
- source_vec.set_len(start + unyielded_len + this.tail_len);
- }
- }
-}
-
-#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
-impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> {
- fn as_ref(&self) -> &[T] {
- self.as_slice()
- }
-}
-
-#[stable(feature = "drain", since = "1.6.0")]
-unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {}
-#[stable(feature = "drain", since = "1.6.0")]
-unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {}
-
-#[stable(feature = "drain", since = "1.6.0")]
-impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
- type Item = T;
-
- #[inline]
- fn next(&mut self) -> Option<T> {
- self.iter.next().map(|elt| unsafe { ptr::read(elt as *const _) })
- }
-
- fn size_hint(&self) -> (usize, Option<usize>) {
- self.iter.size_hint()
- }
-}
-
-#[stable(feature = "drain", since = "1.6.0")]
-impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
- #[inline]
- fn next_back(&mut self) -> Option<T> {
- self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) })
- }
-}
-
-#[stable(feature = "drain", since = "1.6.0")]
-impl<T, A: Allocator> Drop for Drain<'_, T, A> {
- fn drop(&mut self) {
- /// Moves back the un-`Drain`ed elements to restore the original `Vec`.
- struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
-
- impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
- fn drop(&mut self) {
- if self.0.tail_len > 0 {
- unsafe {
- let source_vec = self.0.vec.as_mut();
- // memmove back untouched tail, update to new length
- let start = source_vec.len();
- let tail = self.0.tail_start;
- if tail != start {
- let src = source_vec.as_ptr().add(tail);
- let dst = source_vec.as_mut_ptr().add(start);
- ptr::copy(src, dst, self.0.tail_len);
- }
- source_vec.set_len(start + self.0.tail_len);
- }
- }
- }
- }
-
- let iter = mem::take(&mut self.iter);
- let drop_len = iter.len();
-
- let mut vec = self.vec;
-
- if T::IS_ZST {
- // ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
- // this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
- unsafe {
- let vec = vec.as_mut();
- let old_len = vec.len();
- vec.set_len(old_len + drop_len + self.tail_len);
- vec.truncate(old_len + self.tail_len);
- }
-
- return;
- }
-
- // ensure elements are moved back into their appropriate places, even when drop_in_place panics
- let _guard = DropGuard(self);
-
- if drop_len == 0 {
- return;
- }
-
- // as_slice() must only be called when iter.len() is > 0 because
- // it also gets touched by vec::Splice which may turn it into a dangling pointer
- // which would make it and the vec pointer point to different allocations which would
- // lead to invalid pointer arithmetic below.
- let drop_ptr = iter.as_slice().as_ptr();
-
- unsafe {
- // drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place
- // a pointer with mutable provenance is necessary. Therefore we must reconstruct
- // it from the original vec but also avoid creating a &mut to the front since that could
- // invalidate raw pointers to it which some unsafe code might rely on.
- let vec_ptr = vec.as_mut().as_mut_ptr();
- let drop_offset = drop_ptr.sub_ptr(vec_ptr);
- let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len);
- ptr::drop_in_place(to_drop);
- }
- }
-}
-
-#[stable(feature = "drain", since = "1.6.0")]
-impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {
- fn is_empty(&self) -> bool {
- self.iter.is_empty()
- }
-}
-
-#[unstable(feature = "trusted_len", issue = "37572")]
-unsafe impl<T, A: Allocator> TrustedLen for Drain<'_, T, A> {}
-
-#[stable(feature = "fused", since = "1.26.0")]
-impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}
diff --git a/rust/alloc/vec/extract_if.rs b/rust/alloc/vec/extract_if.rs
deleted file mode 100644
index f314a51d4d3d..000000000000
--- a/rust/alloc/vec/extract_if.rs
+++ /dev/null
@@ -1,115 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-use crate::alloc::{Allocator, Global};
-use core::ptr;
-use core::slice;
-
-use super::Vec;
-
-/// An iterator which uses a closure to determine if an element should be removed.
-///
-/// This struct is created by [`Vec::extract_if`].
-/// See its documentation for more.
-///
-/// # Example
-///
-/// ```
-/// #![feature(extract_if)]
-///
-/// let mut v = vec![0, 1, 2];
-/// let iter: std::vec::ExtractIf<'_, _, _> = v.extract_if(|x| *x % 2 == 0);
-/// ```
-#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
-#[derive(Debug)]
-#[must_use = "iterators are lazy and do nothing unless consumed"]
-pub struct ExtractIf<
- 'a,
- T,
- F,
- #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
-> where
- F: FnMut(&mut T) -> bool,
-{
- pub(super) vec: &'a mut Vec<T, A>,
- /// The index of the item that will be inspected by the next call to `next`.
- pub(super) idx: usize,
- /// The number of items that have been drained (removed) thus far.
- pub(super) del: usize,
- /// The original length of `vec` prior to draining.
- pub(super) old_len: usize,
- /// The filter test predicate.
- pub(super) pred: F,
-}
-
-impl<T, F, A: Allocator> ExtractIf<'_, T, F, A>
-where
- F: FnMut(&mut T) -> bool,
-{
- /// Returns a reference to the underlying allocator.
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[inline]
- pub fn allocator(&self) -> &A {
- self.vec.allocator()
- }
-}
-
-#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
-impl<T, F, A: Allocator> Iterator for ExtractIf<'_, T, F, A>
-where
- F: FnMut(&mut T) -> bool,
-{
- type Item = T;
-
- fn next(&mut self) -> Option<T> {
- unsafe {
- while self.idx < self.old_len {
- let i = self.idx;
- let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
- let drained = (self.pred)(&mut v[i]);
- // Update the index *after* the predicate is called. If the index
- // is updated prior and the predicate panics, the element at this
- // index would be leaked.
- self.idx += 1;
- if drained {
- self.del += 1;
- return Some(ptr::read(&v[i]));
- } else if self.del > 0 {
- let del = self.del;
- let src: *const T = &v[i];
- let dst: *mut T = &mut v[i - del];
- ptr::copy_nonoverlapping(src, dst, 1);
- }
- }
- None
- }
- }
-
- fn size_hint(&self) -> (usize, Option<usize>) {
- (0, Some(self.old_len - self.idx))
- }
-}
-
-#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
-impl<T, F, A: Allocator> Drop for ExtractIf<'_, T, F, A>
-where
- F: FnMut(&mut T) -> bool,
-{
- fn drop(&mut self) {
- unsafe {
- if self.idx < self.old_len && self.del > 0 {
- // This is a pretty messed up state, and there isn't really an
- // obviously right thing to do. We don't want to keep trying
- // to execute `pred`, so we just backshift all the unprocessed
- // elements and tell the vec that they still exist. The backshift
- // is required to prevent a double-drop of the last successfully
- // drained item prior to a panic in the predicate.
- let ptr = self.vec.as_mut_ptr();
- let src = ptr.add(self.idx);
- let dst = src.sub(self.del);
- let tail_len = self.old_len - self.idx;
- src.copy_to(dst, tail_len);
- }
- self.vec.set_len(self.old_len - self.del);
- }
- }
-}
diff --git a/rust/alloc/vec/into_iter.rs b/rust/alloc/vec/into_iter.rs
deleted file mode 100644
index 136bfe94af6c..000000000000
--- a/rust/alloc/vec/into_iter.rs
+++ /dev/null
@@ -1,454 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-#[cfg(not(no_global_oom_handling))]
-use super::AsVecIntoIter;
-use crate::alloc::{Allocator, Global};
-#[cfg(not(no_global_oom_handling))]
-use crate::collections::VecDeque;
-use crate::raw_vec::RawVec;
-use core::array;
-use core::fmt;
-use core::iter::{
- FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen,
- TrustedRandomAccessNoCoerce,
-};
-use core::marker::PhantomData;
-use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
-use core::num::NonZeroUsize;
-#[cfg(not(no_global_oom_handling))]
-use core::ops::Deref;
-use core::ptr::{self, NonNull};
-use core::slice::{self};
-
-/// An iterator that moves out of a vector.
-///
-/// This `struct` is created by the `into_iter` method on [`Vec`](super::Vec)
-/// (provided by the [`IntoIterator`] trait).
-///
-/// # Example
-///
-/// ```
-/// let v = vec![0, 1, 2];
-/// let iter: std::vec::IntoIter<_> = v.into_iter();
-/// ```
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_insignificant_dtor]
-pub struct IntoIter<
- T,
- #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
-> {
- pub(super) buf: NonNull<T>,
- pub(super) phantom: PhantomData<T>,
- pub(super) cap: usize,
- // the drop impl reconstructs a RawVec from buf, cap and alloc
- // to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
- pub(super) alloc: ManuallyDrop<A>,
- pub(super) ptr: *const T,
- pub(super) end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
- // ptr == end is a quick test for the Iterator being empty, that works
- // for both ZST and non-ZST.
-}
-
-#[stable(feature = "vec_intoiter_debug", since = "1.13.0")]
-impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
- }
-}
-
-impl<T, A: Allocator> IntoIter<T, A> {
- /// Returns the remaining items of this iterator as a slice.
- ///
- /// # Examples
- ///
- /// ```
- /// let vec = vec!['a', 'b', 'c'];
- /// let mut into_iter = vec.into_iter();
- /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
- /// let _ = into_iter.next().unwrap();
- /// assert_eq!(into_iter.as_slice(), &['b', 'c']);
- /// ```
- #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
- pub fn as_slice(&self) -> &[T] {
- unsafe { slice::from_raw_parts(self.ptr, self.len()) }
- }
-
- /// Returns the remaining items of this iterator as a mutable slice.
- ///
- /// # Examples
- ///
- /// ```
- /// let vec = vec!['a', 'b', 'c'];
- /// let mut into_iter = vec.into_iter();
- /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
- /// into_iter.as_mut_slice()[2] = 'z';
- /// assert_eq!(into_iter.next().unwrap(), 'a');
- /// assert_eq!(into_iter.next().unwrap(), 'b');
- /// assert_eq!(into_iter.next().unwrap(), 'z');
- /// ```
- #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
- pub fn as_mut_slice(&mut self) -> &mut [T] {
- unsafe { &mut *self.as_raw_mut_slice() }
- }
-
- /// Returns a reference to the underlying allocator.
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[inline]
- pub fn allocator(&self) -> &A {
- &self.alloc
- }
-
- fn as_raw_mut_slice(&mut self) -> *mut [T] {
- ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len())
- }
-
- /// Drops remaining elements and relinquishes the backing allocation.
- /// This method guarantees it won't panic before relinquishing
- /// the backing allocation.
- ///
- /// This is roughly equivalent to the following, but more efficient
- ///
- /// ```
- /// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
- /// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter());
- /// (&mut into_iter).for_each(drop);
- /// std::mem::forget(into_iter);
- /// ```
- ///
- /// This method is used by in-place iteration, refer to the vec::in_place_collect
- /// documentation for an overview.
- #[cfg(not(no_global_oom_handling))]
- pub(super) fn forget_allocation_drop_remaining(&mut self) {
- let remaining = self.as_raw_mut_slice();
-
- // overwrite the individual fields instead of creating a new
- // struct and then overwriting &mut self.
- // this creates less assembly
- self.cap = 0;
- self.buf = unsafe { NonNull::new_unchecked(RawVec::NEW.ptr()) };
- self.ptr = self.buf.as_ptr();
- self.end = self.buf.as_ptr();
-
- // Dropping the remaining elements can panic, so this needs to be
- // done only after updating the other fields.
- unsafe {
- ptr::drop_in_place(remaining);
- }
- }
-
- /// Forgets to Drop the remaining elements while still allowing the backing allocation to be freed.
- pub(crate) fn forget_remaining_elements(&mut self) {
- // For th ZST case, it is crucial that we mutate `end` here, not `ptr`.
- // `ptr` must stay aligned, while `end` may be unaligned.
- self.end = self.ptr;
- }
-
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- pub(crate) fn into_vecdeque(self) -> VecDeque<T, A> {
- // Keep our `Drop` impl from dropping the elements and the allocator
- let mut this = ManuallyDrop::new(self);
-
- // SAFETY: This allocation originally came from a `Vec`, so it passes
- // all those checks. We have `this.buf` ≤ `this.ptr` ≤ `this.end`,
- // so the `sub_ptr`s below cannot wrap, and will produce a well-formed
- // range. `end` ≤ `buf + cap`, so the range will be in-bounds.
- // Taking `alloc` is ok because nothing else is going to look at it,
- // since our `Drop` impl isn't going to run so there's no more code.
- unsafe {
- let buf = this.buf.as_ptr();
- let initialized = if T::IS_ZST {
- // All the pointers are the same for ZSTs, so it's fine to
- // say that they're all at the beginning of the "allocation".
- 0..this.len()
- } else {
- this.ptr.sub_ptr(buf)..this.end.sub_ptr(buf)
- };
- let cap = this.cap;
- let alloc = ManuallyDrop::take(&mut this.alloc);
- VecDeque::from_contiguous_raw_parts_in(buf, initialized, cap, alloc)
- }
- }
-}
-
-#[stable(feature = "vec_intoiter_as_ref", since = "1.46.0")]
-impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> {
- fn as_ref(&self) -> &[T] {
- self.as_slice()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {}
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: Sync, A: Allocator + Sync> Sync for IntoIter<T, A> {}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T, A: Allocator> Iterator for IntoIter<T, A> {
- type Item = T;
-
- #[inline]
- fn next(&mut self) -> Option<T> {
- if self.ptr == self.end {
- None
- } else if T::IS_ZST {
- // `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by
- // reducing the `end`.
- self.end = self.end.wrapping_byte_sub(1);
-
- // Make up a value of this ZST.
- Some(unsafe { mem::zeroed() })
- } else {
- let old = self.ptr;
- self.ptr = unsafe { self.ptr.add(1) };
-
- Some(unsafe { ptr::read(old) })
- }
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- let exact = if T::IS_ZST {
- self.end.addr().wrapping_sub(self.ptr.addr())
- } else {
- unsafe { self.end.sub_ptr(self.ptr) }
- };
- (exact, Some(exact))
- }
-
- #[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
- let step_size = self.len().min(n);
- let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
- if T::IS_ZST {
- // See `next` for why we sub `end` here.
- self.end = self.end.wrapping_byte_sub(step_size);
- } else {
- // SAFETY: the min() above ensures that step_size is in bounds
- self.ptr = unsafe { self.ptr.add(step_size) };
- }
- // SAFETY: the min() above ensures that step_size is in bounds
- unsafe {
- ptr::drop_in_place(to_drop);
- }
- NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
- }
-
- #[inline]
- fn count(self) -> usize {
- self.len()
- }
-
- #[inline]
- fn next_chunk<const N: usize>(&mut self) -> Result<[T; N], core::array::IntoIter<T, N>> {
- let mut raw_ary = MaybeUninit::uninit_array();
-
- let len = self.len();
-
- if T::IS_ZST {
- if len < N {
- self.forget_remaining_elements();
- // Safety: ZSTs can be conjured ex nihilo, only the amount has to be correct
- return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) });
- }
-
- self.end = self.end.wrapping_byte_sub(N);
- // Safety: ditto
- return Ok(unsafe { raw_ary.transpose().assume_init() });
- }
-
- if len < N {
- // Safety: `len` indicates that this many elements are available and we just checked that
- // it fits into the array.
- unsafe {
- ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, len);
- self.forget_remaining_elements();
- return Err(array::IntoIter::new_unchecked(raw_ary, 0..len));
- }
- }
-
- // Safety: `len` is larger than the array size. Copy a fixed amount here to fully initialize
- // the array.
- return unsafe {
- ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, N);
- self.ptr = self.ptr.add(N);
- Ok(raw_ary.transpose().assume_init())
- };
- }
-
- unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item
- where
- Self: TrustedRandomAccessNoCoerce,
- {
- // SAFETY: the caller must guarantee that `i` is in bounds of the
- // `Vec<T>`, so `i` cannot overflow an `isize`, and the `self.ptr.add(i)`
- // is guaranteed to pointer to an element of the `Vec<T>` and
- // thus guaranteed to be valid to dereference.
- //
- // Also note the implementation of `Self: TrustedRandomAccess` requires
- // that `T: Copy` so reading elements from the buffer doesn't invalidate
- // them for `Drop`.
- unsafe { if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) } }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
- #[inline]
- fn next_back(&mut self) -> Option<T> {
- if self.end == self.ptr {
- None
- } else if T::IS_ZST {
- // See above for why 'ptr.offset' isn't used
- self.end = self.end.wrapping_byte_sub(1);
-
- // Make up a value of this ZST.
- Some(unsafe { mem::zeroed() })
- } else {
- self.end = unsafe { self.end.sub(1) };
-
- Some(unsafe { ptr::read(self.end) })
- }
- }
-
- #[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
- let step_size = self.len().min(n);
- if T::IS_ZST {
- // SAFETY: same as for advance_by()
- self.end = self.end.wrapping_byte_sub(step_size);
- } else {
- // SAFETY: same as for advance_by()
- self.end = unsafe { self.end.sub(step_size) };
- }
- let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
- // SAFETY: same as for advance_by()
- unsafe {
- ptr::drop_in_place(to_drop);
- }
- NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
- fn is_empty(&self) -> bool {
- self.ptr == self.end
- }
-}
-
-#[stable(feature = "fused", since = "1.26.0")]
-impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
-
-#[doc(hidden)]
-#[unstable(issue = "none", feature = "trusted_fused")]
-unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {}
-
-#[unstable(feature = "trusted_len", issue = "37572")]
-unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
-
-#[stable(feature = "default_iters", since = "1.70.0")]
-impl<T, A> Default for IntoIter<T, A>
-where
- A: Allocator + Default,
-{
- /// Creates an empty `vec::IntoIter`.
- ///
- /// ```
- /// # use std::vec;
- /// let iter: vec::IntoIter<u8> = Default::default();
- /// assert_eq!(iter.len(), 0);
- /// assert_eq!(iter.as_slice(), &[]);
- /// ```
- fn default() -> Self {
- super::Vec::new_in(Default::default()).into_iter()
- }
-}
-
-#[doc(hidden)]
-#[unstable(issue = "none", feature = "std_internals")]
-#[rustc_unsafe_specialization_marker]
-pub trait NonDrop {}
-
-// T: Copy as approximation for !Drop since get_unchecked does not advance self.ptr
-// and thus we can't implement drop-handling
-#[unstable(issue = "none", feature = "std_internals")]
-impl<T: Copy> NonDrop for T {}
-
-#[doc(hidden)]
-#[unstable(issue = "none", feature = "std_internals")]
-// TrustedRandomAccess (without NoCoerce) must not be implemented because
-// subtypes/supertypes of `T` might not be `NonDrop`
-unsafe impl<T, A: Allocator> TrustedRandomAccessNoCoerce for IntoIter<T, A>
-where
- T: NonDrop,
-{
- const MAY_HAVE_SIDE_EFFECT: bool = false;
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "vec_into_iter_clone", since = "1.8.0")]
-impl<T: Clone, A: Allocator + Clone> Clone for IntoIter<T, A> {
- #[cfg(not(test))]
- fn clone(&self) -> Self {
- self.as_slice().to_vec_in(self.alloc.deref().clone()).into_iter()
- }
- #[cfg(test)]
- fn clone(&self) -> Self {
- crate::slice::to_vec(self.as_slice(), self.alloc.deref().clone()).into_iter()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> {
- fn drop(&mut self) {
- struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter<T, A>);
-
- impl<T, A: Allocator> Drop for DropGuard<'_, T, A> {
- fn drop(&mut self) {
- unsafe {
- // `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec
- let alloc = ManuallyDrop::take(&mut self.0.alloc);
- // RawVec handles deallocation
- let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc);
- }
- }
- }
-
- let guard = DropGuard(self);
- // destroy the remaining elements
- unsafe {
- ptr::drop_in_place(guard.0.as_raw_mut_slice());
- }
- // now `guard` will be dropped and do the rest
- }
-}
-
-// In addition to the SAFETY invariants of the following three unsafe traits
-// also refer to the vec::in_place_collect module documentation to get an overview
-#[unstable(issue = "none", feature = "inplace_iteration")]
-#[doc(hidden)]
-unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {
- const EXPAND_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
- const MERGE_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
-}
-
-#[unstable(issue = "none", feature = "inplace_iteration")]
-#[doc(hidden)]
-unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> {
- type Source = Self;
-
- #[inline]
- unsafe fn as_inner(&mut self) -> &mut Self::Source {
- self
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-unsafe impl<T> AsVecIntoIter for IntoIter<T> {
- type Item = T;
-
- fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item> {
- self
- }
-}
diff --git a/rust/alloc/vec/is_zero.rs b/rust/alloc/vec/is_zero.rs
deleted file mode 100644
index d928dcf90e80..000000000000
--- a/rust/alloc/vec/is_zero.rs
+++ /dev/null
@@ -1,204 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-use core::num::{Saturating, Wrapping};
-
-use crate::boxed::Box;
-
-#[rustc_specialization_trait]
-pub(super) unsafe trait IsZero {
- /// Whether this value's representation is all zeros,
- /// or can be represented with all zeroes.
- fn is_zero(&self) -> bool;
-}
-
-macro_rules! impl_is_zero {
- ($t:ty, $is_zero:expr) => {
- unsafe impl IsZero for $t {
- #[inline]
- fn is_zero(&self) -> bool {
- $is_zero(*self)
- }
- }
- };
-}
-
-impl_is_zero!(i8, |x| x == 0); // It is needed to impl for arrays and tuples of i8.
-impl_is_zero!(i16, |x| x == 0);
-impl_is_zero!(i32, |x| x == 0);
-impl_is_zero!(i64, |x| x == 0);
-impl_is_zero!(i128, |x| x == 0);
-impl_is_zero!(isize, |x| x == 0);
-
-impl_is_zero!(u8, |x| x == 0); // It is needed to impl for arrays and tuples of u8.
-impl_is_zero!(u16, |x| x == 0);
-impl_is_zero!(u32, |x| x == 0);
-impl_is_zero!(u64, |x| x == 0);
-impl_is_zero!(u128, |x| x == 0);
-impl_is_zero!(usize, |x| x == 0);
-
-impl_is_zero!(bool, |x| x == false);
-impl_is_zero!(char, |x| x == '\0');
-
-impl_is_zero!(f32, |x: f32| x.to_bits() == 0);
-impl_is_zero!(f64, |x: f64| x.to_bits() == 0);
-
-unsafe impl<T> IsZero for *const T {
- #[inline]
- fn is_zero(&self) -> bool {
- (*self).is_null()
- }
-}
-
-unsafe impl<T> IsZero for *mut T {
- #[inline]
- fn is_zero(&self) -> bool {
- (*self).is_null()
- }
-}
-
-unsafe impl<T: IsZero, const N: usize> IsZero for [T; N] {
- #[inline]
- fn is_zero(&self) -> bool {
- // Because this is generated as a runtime check, it's not obvious that
- // it's worth doing if the array is really long. The threshold here
- // is largely arbitrary, but was picked because as of 2022-07-01 LLVM
- // fails to const-fold the check in `vec![[1; 32]; n]`
- // See https://github.com/rust-lang/rust/pull/97581#issuecomment-1166628022
- // Feel free to tweak if you have better evidence.
-
- N <= 16 && self.iter().all(IsZero::is_zero)
- }
-}
-
-// This is recursive macro.
-macro_rules! impl_for_tuples {
- // Stopper
- () => {
- // No use for implementing for empty tuple because it is ZST.
- };
- ($first_arg:ident $(,$rest:ident)*) => {
- unsafe impl <$first_arg: IsZero, $($rest: IsZero,)*> IsZero for ($first_arg, $($rest,)*){
- #[inline]
- fn is_zero(&self) -> bool{
- // Destructure tuple to N references
- // Rust allows to hide generic params by local variable names.
- #[allow(non_snake_case)]
- let ($first_arg, $($rest,)*) = self;
-
- $first_arg.is_zero()
- $( && $rest.is_zero() )*
- }
- }
-
- impl_for_tuples!($($rest),*);
- }
-}
-
-impl_for_tuples!(A, B, C, D, E, F, G, H);
-
-// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
-// For fat pointers, the bytes that would be the pointer metadata in the `Some`
-// variant are padding in the `None` variant, so ignoring them and
-// zero-initializing instead is ok.
-// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of
-// `SpecFromElem`.
-
-unsafe impl<T: ?Sized> IsZero for Option<&T> {
- #[inline]
- fn is_zero(&self) -> bool {
- self.is_none()
- }
-}
-
-unsafe impl<T: ?Sized> IsZero for Option<Box<T>> {
- #[inline]
- fn is_zero(&self) -> bool {
- self.is_none()
- }
-}
-
-// `Option<num::NonZeroU32>` and similar have a representation guarantee that
-// they're the same size as the corresponding `u32` type, as well as a guarantee
-// that transmuting between `NonZeroU32` and `Option<num::NonZeroU32>` works.
-// While the documentation officially makes it UB to transmute from `None`,
-// we're the standard library so we can make extra inferences, and we know that
-// the only niche available to represent `None` is the one that's all zeros.
-
-macro_rules! impl_is_zero_option_of_nonzero {
- ($($t:ident,)+) => {$(
- unsafe impl IsZero for Option<core::num::$t> {
- #[inline]
- fn is_zero(&self) -> bool {
- self.is_none()
- }
- }
- )+};
-}
-
-impl_is_zero_option_of_nonzero!(
- NonZeroU8,
- NonZeroU16,
- NonZeroU32,
- NonZeroU64,
- NonZeroU128,
- NonZeroI8,
- NonZeroI16,
- NonZeroI32,
- NonZeroI64,
- NonZeroI128,
- NonZeroUsize,
- NonZeroIsize,
-);
-
-macro_rules! impl_is_zero_option_of_num {
- ($($t:ty,)+) => {$(
- unsafe impl IsZero for Option<$t> {
- #[inline]
- fn is_zero(&self) -> bool {
- const {
- let none: Self = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
- assert!(none.is_none());
- }
- self.is_none()
- }
- }
- )+};
-}
-
-impl_is_zero_option_of_num!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, usize, isize,);
-
-unsafe impl<T: IsZero> IsZero for Wrapping<T> {
- #[inline]
- fn is_zero(&self) -> bool {
- self.0.is_zero()
- }
-}
-
-unsafe impl<T: IsZero> IsZero for Saturating<T> {
- #[inline]
- fn is_zero(&self) -> bool {
- self.0.is_zero()
- }
-}
-
-macro_rules! impl_for_optional_bool {
- ($($t:ty,)+) => {$(
- unsafe impl IsZero for $t {
- #[inline]
- fn is_zero(&self) -> bool {
- // SAFETY: This is *not* a stable layout guarantee, but
- // inside `core` we're allowed to rely on the current rustc
- // behaviour that options of bools will be one byte with
- // no padding, so long as they're nested less than 254 deep.
- let raw: u8 = unsafe { core::mem::transmute(*self) };
- raw == 0
- }
- }
- )+};
-}
-impl_for_optional_bool! {
- Option<bool>,
- Option<Option<bool>>,
- Option<Option<Option<bool>>>,
- // Could go further, but not worth the metadata overhead
-}
diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs
deleted file mode 100644
index 220fb9d6f45b..000000000000
--- a/rust/alloc/vec/mod.rs
+++ /dev/null
@@ -1,3683 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-//! A contiguous growable array type with heap-allocated contents, written
-//! `Vec<T>`.
-//!
-//! Vectors have *O*(1) indexing, amortized *O*(1) push (to the end) and
-//! *O*(1) pop (from the end).
-//!
-//! Vectors ensure they never allocate more than `isize::MAX` bytes.
-//!
-//! # Examples
-//!
-//! You can explicitly create a [`Vec`] with [`Vec::new`]:
-//!
-//! ```
-//! let v: Vec<i32> = Vec::new();
-//! ```
-//!
-//! ...or by using the [`vec!`] macro:
-//!
-//! ```
-//! let v: Vec<i32> = vec![];
-//!
-//! let v = vec![1, 2, 3, 4, 5];
-//!
-//! let v = vec![0; 10]; // ten zeroes
-//! ```
-//!
-//! You can [`push`] values onto the end of a vector (which will grow the vector
-//! as needed):
-//!
-//! ```
-//! let mut v = vec![1, 2];
-//!
-//! v.push(3);
-//! ```
-//!
-//! Popping values works in much the same way:
-//!
-//! ```
-//! let mut v = vec![1, 2];
-//!
-//! let two = v.pop();
-//! ```
-//!
-//! Vectors also support indexing (through the [`Index`] and [`IndexMut`] traits):
-//!
-//! ```
-//! let mut v = vec![1, 2, 3];
-//! let three = v[2];
-//! v[1] = v[1] + 5;
-//! ```
-//!
-//! [`push`]: Vec::push
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-#[cfg(not(no_global_oom_handling))]
-use core::cmp;
-use core::cmp::Ordering;
-use core::fmt;
-use core::hash::{Hash, Hasher};
-use core::iter;
-use core::marker::PhantomData;
-use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
-use core::ops::{self, Index, IndexMut, Range, RangeBounds};
-use core::ptr::{self, NonNull};
-use core::slice::{self, SliceIndex};
-
-use crate::alloc::{Allocator, Global};
-#[cfg(not(no_borrow))]
-use crate::borrow::{Cow, ToOwned};
-use crate::boxed::Box;
-use crate::collections::{TryReserveError, TryReserveErrorKind};
-use crate::raw_vec::RawVec;
-
-#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
-pub use self::extract_if::ExtractIf;
-
-mod extract_if;
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "vec_splice", since = "1.21.0")]
-pub use self::splice::Splice;
-
-#[cfg(not(no_global_oom_handling))]
-mod splice;
-
-#[stable(feature = "drain", since = "1.6.0")]
-pub use self::drain::Drain;
-
-mod drain;
-
-#[cfg(not(no_borrow))]
-#[cfg(not(no_global_oom_handling))]
-mod cow;
-
-#[cfg(not(no_global_oom_handling))]
-pub(crate) use self::in_place_collect::AsVecIntoIter;
-#[stable(feature = "rust1", since = "1.0.0")]
-pub use self::into_iter::IntoIter;
-
-mod into_iter;
-
-#[cfg(not(no_global_oom_handling))]
-use self::is_zero::IsZero;
-
-#[cfg(not(no_global_oom_handling))]
-mod is_zero;
-
-#[cfg(not(no_global_oom_handling))]
-mod in_place_collect;
-
-mod partial_eq;
-
-#[cfg(not(no_global_oom_handling))]
-use self::spec_from_elem::SpecFromElem;
-
-#[cfg(not(no_global_oom_handling))]
-mod spec_from_elem;
-
-use self::set_len_on_drop::SetLenOnDrop;
-
-mod set_len_on_drop;
-
-#[cfg(not(no_global_oom_handling))]
-use self::in_place_drop::{InPlaceDrop, InPlaceDstDataSrcBufDrop};
-
-#[cfg(not(no_global_oom_handling))]
-mod in_place_drop;
-
-#[cfg(not(no_global_oom_handling))]
-use self::spec_from_iter_nested::SpecFromIterNested;
-
-#[cfg(not(no_global_oom_handling))]
-mod spec_from_iter_nested;
-
-#[cfg(not(no_global_oom_handling))]
-use self::spec_from_iter::SpecFromIter;
-
-#[cfg(not(no_global_oom_handling))]
-mod spec_from_iter;
-
-#[cfg(not(no_global_oom_handling))]
-use self::spec_extend::SpecExtend;
-
-use self::spec_extend::TrySpecExtend;
-
-mod spec_extend;
-
-/// A contiguous growable array type, written as `Vec<T>`, short for 'vector'.
-///
-/// # Examples
-///
-/// ```
-/// let mut vec = Vec::new();
-/// vec.push(1);
-/// vec.push(2);
-///
-/// assert_eq!(vec.len(), 2);
-/// assert_eq!(vec[0], 1);
-///
-/// assert_eq!(vec.pop(), Some(2));
-/// assert_eq!(vec.len(), 1);
-///
-/// vec[0] = 7;
-/// assert_eq!(vec[0], 7);
-///
-/// vec.extend([1, 2, 3]);
-///
-/// for x in &vec {
-/// println!("{x}");
-/// }
-/// assert_eq!(vec, [7, 1, 2, 3]);
-/// ```
-///
-/// The [`vec!`] macro is provided for convenient initialization:
-///
-/// ```
-/// let mut vec1 = vec![1, 2, 3];
-/// vec1.push(4);
-/// let vec2 = Vec::from([1, 2, 3, 4]);
-/// assert_eq!(vec1, vec2);
-/// ```
-///
-/// It can also initialize each element of a `Vec<T>` with a given value.
-/// This may be more efficient than performing allocation and initialization
-/// in separate steps, especially when initializing a vector of zeros:
-///
-/// ```
-/// let vec = vec![0; 5];
-/// assert_eq!(vec, [0, 0, 0, 0, 0]);
-///
-/// // The following is equivalent, but potentially slower:
-/// let mut vec = Vec::with_capacity(5);
-/// vec.resize(5, 0);
-/// assert_eq!(vec, [0, 0, 0, 0, 0]);
-/// ```
-///
-/// For more information, see
-/// [Capacity and Reallocation](#capacity-and-reallocation).
-///
-/// Use a `Vec<T>` as an efficient stack:
-///
-/// ```
-/// let mut stack = Vec::new();
-///
-/// stack.push(1);
-/// stack.push(2);
-/// stack.push(3);
-///
-/// while let Some(top) = stack.pop() {
-/// // Prints 3, 2, 1
-/// println!("{top}");
-/// }
-/// ```
-///
-/// # Indexing
-///
-/// The `Vec` type allows access to values by index, because it implements the
-/// [`Index`] trait. An example will be more explicit:
-///
-/// ```
-/// let v = vec![0, 2, 4, 6];
-/// println!("{}", v[1]); // it will display '2'
-/// ```
-///
-/// However be careful: if you try to access an index which isn't in the `Vec`,
-/// your software will panic! You cannot do this:
-///
-/// ```should_panic
-/// let v = vec![0, 2, 4, 6];
-/// println!("{}", v[6]); // it will panic!
-/// ```
-///
-/// Use [`get`] and [`get_mut`] if you want to check whether the index is in
-/// the `Vec`.
-///
-/// # Slicing
-///
-/// A `Vec` can be mutable. On the other hand, slices are read-only objects.
-/// To get a [slice][prim@slice], use [`&`]. Example:
-///
-/// ```
-/// fn read_slice(slice: &[usize]) {
-/// // ...
-/// }
-///
-/// let v = vec![0, 1];
-/// read_slice(&v);
-///
-/// // ... and that's all!
-/// // you can also do it like this:
-/// let u: &[usize] = &v;
-/// // or like this:
-/// let u: &[_] = &v;
-/// ```
-///
-/// In Rust, it's more common to pass slices as arguments rather than vectors
-/// when you just want to provide read access. The same goes for [`String`] and
-/// [`&str`].
-///
-/// # Capacity and reallocation
-///
-/// The capacity of a vector is the amount of space allocated for any future
-/// elements that will be added onto the vector. This is not to be confused with
-/// the *length* of a vector, which specifies the number of actual elements
-/// within the vector. If a vector's length exceeds its capacity, its capacity
-/// will automatically be increased, but its elements will have to be
-/// reallocated.
-///
-/// For example, a vector with capacity 10 and length 0 would be an empty vector
-/// with space for 10 more elements. Pushing 10 or fewer elements onto the
-/// vector will not change its capacity or cause reallocation to occur. However,
-/// if the vector's length is increased to 11, it will have to reallocate, which
-/// can be slow. For this reason, it is recommended to use [`Vec::with_capacity`]
-/// whenever possible to specify how big the vector is expected to get.
-///
-/// # Guarantees
-///
-/// Due to its incredibly fundamental nature, `Vec` makes a lot of guarantees
-/// about its design. This ensures that it's as low-overhead as possible in
-/// the general case, and can be correctly manipulated in primitive ways
-/// by unsafe code. Note that these guarantees refer to an unqualified `Vec<T>`.
-/// If additional type parameters are added (e.g., to support custom allocators),
-/// overriding their defaults may change the behavior.
-///
-/// Most fundamentally, `Vec` is and always will be a (pointer, capacity, length)
-/// triplet. No more, no less. The order of these fields is completely
-/// unspecified, and you should use the appropriate methods to modify these.
-/// The pointer will never be null, so this type is null-pointer-optimized.
-///
-/// However, the pointer might not actually point to allocated memory. In particular,
-/// if you construct a `Vec` with capacity 0 via [`Vec::new`], [`vec![]`][`vec!`],
-/// [`Vec::with_capacity(0)`][`Vec::with_capacity`], or by calling [`shrink_to_fit`]
-/// on an empty Vec, it will not allocate memory. Similarly, if you store zero-sized
-/// types inside a `Vec`, it will not allocate space for them. *Note that in this case
-/// the `Vec` might not report a [`capacity`] of 0*. `Vec` will allocate if and only
-/// if <code>[mem::size_of::\<T>]\() * [capacity]\() > 0</code>. In general, `Vec`'s allocation
-/// details are very subtle --- if you intend to allocate memory using a `Vec`
-/// and use it for something else (either to pass to unsafe code, or to build your
-/// own memory-backed collection), be sure to deallocate this memory by using
-/// `from_raw_parts` to recover the `Vec` and then dropping it.
-///
-/// If a `Vec` *has* allocated memory, then the memory it points to is on the heap
-/// (as defined by the allocator Rust is configured to use by default), and its
-/// pointer points to [`len`] initialized, contiguous elements in order (what
-/// you would see if you coerced it to a slice), followed by <code>[capacity] - [len]</code>
-/// logically uninitialized, contiguous elements.
-///
-/// A vector containing the elements `'a'` and `'b'` with capacity 4 can be
-/// visualized as below. The top part is the `Vec` struct, it contains a
-/// pointer to the head of the allocation in the heap, length and capacity.
-/// The bottom part is the allocation on the heap, a contiguous memory block.
-///
-/// ```text
-/// ptr len capacity
-/// +--------+--------+--------+
-/// | 0x0123 | 2 | 4 |
-/// +--------+--------+--------+
-/// |
-/// v
-/// Heap +--------+--------+--------+--------+
-/// | 'a' | 'b' | uninit | uninit |
-/// +--------+--------+--------+--------+
-/// ```
-///
-/// - **uninit** represents memory that is not initialized, see [`MaybeUninit`].
-/// - Note: the ABI is not stable and `Vec` makes no guarantees about its memory
-/// layout (including the order of fields).
-///
-/// `Vec` will never perform a "small optimization" where elements are actually
-/// stored on the stack for two reasons:
-///
-/// * It would make it more difficult for unsafe code to correctly manipulate
-/// a `Vec`. The contents of a `Vec` wouldn't have a stable address if it were
-/// only moved, and it would be more difficult to determine if a `Vec` had
-/// actually allocated memory.
-///
-/// * It would penalize the general case, incurring an additional branch
-/// on every access.
-///
-/// `Vec` will never automatically shrink itself, even if completely empty. This
-/// ensures no unnecessary allocations or deallocations occur. Emptying a `Vec`
-/// and then filling it back up to the same [`len`] should incur no calls to
-/// the allocator. If you wish to free up unused memory, use
-/// [`shrink_to_fit`] or [`shrink_to`].
-///
-/// [`push`] and [`insert`] will never (re)allocate if the reported capacity is
-/// sufficient. [`push`] and [`insert`] *will* (re)allocate if
-/// <code>[len] == [capacity]</code>. That is, the reported capacity is completely
-/// accurate, and can be relied on. It can even be used to manually free the memory
-/// allocated by a `Vec` if desired. Bulk insertion methods *may* reallocate, even
-/// when not necessary.
-///
-/// `Vec` does not guarantee any particular growth strategy when reallocating
-/// when full, nor when [`reserve`] is called. The current strategy is basic
-/// and it may prove desirable to use a non-constant growth factor. Whatever
-/// strategy is used will of course guarantee *O*(1) amortized [`push`].
-///
-/// `vec![x; n]`, `vec![a, b, c, d]`, and
-/// [`Vec::with_capacity(n)`][`Vec::with_capacity`], will all produce a `Vec`
-/// with exactly the requested capacity. If <code>[len] == [capacity]</code>,
-/// (as is the case for the [`vec!`] macro), then a `Vec<T>` can be converted to
-/// and from a [`Box<[T]>`][owned slice] without reallocating or moving the elements.
-///
-/// `Vec` will not specifically overwrite any data that is removed from it,
-/// but also won't specifically preserve it. Its uninitialized memory is
-/// scratch space that it may use however it wants. It will generally just do
-/// whatever is most efficient or otherwise easy to implement. Do not rely on
-/// removed data to be erased for security purposes. Even if you drop a `Vec`, its
-/// buffer may simply be reused by another allocation. Even if you zero a `Vec`'s memory
-/// first, that might not actually happen because the optimizer does not consider
-/// this a side-effect that must be preserved. There is one case which we will
-/// not break, however: using `unsafe` code to write to the excess capacity,
-/// and then increasing the length to match, is always valid.
-///
-/// Currently, `Vec` does not guarantee the order in which elements are dropped.
-/// The order has changed in the past and may change again.
-///
-/// [`get`]: slice::get
-/// [`get_mut`]: slice::get_mut
-/// [`String`]: crate::string::String
-/// [`&str`]: type@str
-/// [`shrink_to_fit`]: Vec::shrink_to_fit
-/// [`shrink_to`]: Vec::shrink_to
-/// [capacity]: Vec::capacity
-/// [`capacity`]: Vec::capacity
-/// [mem::size_of::\<T>]: core::mem::size_of
-/// [len]: Vec::len
-/// [`len`]: Vec::len
-/// [`push`]: Vec::push
-/// [`insert`]: Vec::insert
-/// [`reserve`]: Vec::reserve
-/// [`MaybeUninit`]: core::mem::MaybeUninit
-/// [owned slice]: Box
-#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(test), rustc_diagnostic_item = "Vec")]
-#[rustc_insignificant_dtor]
-pub struct Vec<T, #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global> {
- buf: RawVec<T, A>,
- len: usize,
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Inherent methods
-////////////////////////////////////////////////////////////////////////////////
-
-impl<T> Vec<T> {
- /// Constructs a new, empty `Vec<T>`.
- ///
- /// The vector will not allocate until elements are pushed onto it.
- ///
- /// # Examples
- ///
- /// ```
- /// # #![allow(unused_mut)]
- /// let mut vec: Vec<i32> = Vec::new();
- /// ```
- #[inline]
- #[rustc_const_stable(feature = "const_vec_new", since = "1.39.0")]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- pub const fn new() -> Self {
- Vec { buf: RawVec::NEW, len: 0 }
- }
-
- /// Constructs a new, empty `Vec<T>` with at least the specified capacity.
- ///
- /// The vector will be able to hold at least `capacity` elements without
- /// reallocating. This method is allowed to allocate for more elements than
- /// `capacity`. If `capacity` is 0, the vector will not allocate.
- ///
- /// It is important to note that although the returned vector has the
- /// minimum *capacity* specified, the vector will have a zero *length*. For
- /// an explanation of the difference between length and capacity, see
- /// *[Capacity and reallocation]*.
- ///
- /// If it is important to know the exact allocated capacity of a `Vec`,
- /// always use the [`capacity`] method after construction.
- ///
- /// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
- /// and the capacity will always be `usize::MAX`.
- ///
- /// [Capacity and reallocation]: #capacity-and-reallocation
- /// [`capacity`]: Vec::capacity
- ///
- /// # Panics
- ///
- /// Panics if the new capacity exceeds `isize::MAX` bytes.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = Vec::with_capacity(10);
- ///
- /// // The vector contains no items, even though it has capacity for more
- /// assert_eq!(vec.len(), 0);
- /// assert!(vec.capacity() >= 10);
- ///
- /// // These are all done without reallocating...
- /// for i in 0..10 {
- /// vec.push(i);
- /// }
- /// assert_eq!(vec.len(), 10);
- /// assert!(vec.capacity() >= 10);
- ///
- /// // ...but this may make the vector reallocate
- /// vec.push(11);
- /// assert_eq!(vec.len(), 11);
- /// assert!(vec.capacity() >= 11);
- ///
- /// // A vector of a zero-sized type will always over-allocate, since no
- /// // allocation is necessary
- /// let vec_units = Vec::<()>::with_capacity(10);
- /// assert_eq!(vec_units.capacity(), usize::MAX);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- pub fn with_capacity(capacity: usize) -> Self {
- Self::with_capacity_in(capacity, Global)
- }
-
- /// Tries to construct a new, empty `Vec<T>` with at least the specified capacity.
- ///
- /// The vector will be able to hold at least `capacity` elements without
- /// reallocating. This method is allowed to allocate for more elements than
- /// `capacity`. If `capacity` is 0, the vector will not allocate.
- ///
- /// It is important to note that although the returned vector has the
- /// minimum *capacity* specified, the vector will have a zero *length*. For
- /// an explanation of the difference between length and capacity, see
- /// *[Capacity and reallocation]*.
- ///
- /// If it is important to know the exact allocated capacity of a `Vec`,
- /// always use the [`capacity`] method after construction.
- ///
- /// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
- /// and the capacity will always be `usize::MAX`.
- ///
- /// [Capacity and reallocation]: #capacity-and-reallocation
- /// [`capacity`]: Vec::capacity
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = Vec::try_with_capacity(10).unwrap();
- ///
- /// // The vector contains no items, even though it has capacity for more
- /// assert_eq!(vec.len(), 0);
- /// assert!(vec.capacity() >= 10);
- ///
- /// // These are all done without reallocating...
- /// for i in 0..10 {
- /// vec.push(i);
- /// }
- /// assert_eq!(vec.len(), 10);
- /// assert!(vec.capacity() >= 10);
- ///
- /// // ...but this may make the vector reallocate
- /// vec.push(11);
- /// assert_eq!(vec.len(), 11);
- /// assert!(vec.capacity() >= 11);
- ///
- /// let mut result = Vec::try_with_capacity(usize::MAX);
- /// assert!(result.is_err());
- ///
- /// // A vector of a zero-sized type will always over-allocate, since no
- /// // allocation is necessary
- /// let vec_units = Vec::<()>::try_with_capacity(10).unwrap();
- /// assert_eq!(vec_units.capacity(), usize::MAX);
- /// ```
- #[inline]
- #[stable(feature = "kernel", since = "1.0.0")]
- pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
- Self::try_with_capacity_in(capacity, Global)
- }
-
- /// Creates a `Vec<T>` directly from a pointer, a capacity, and a length.
- ///
- /// # Safety
- ///
- /// This is highly unsafe, due to the number of invariants that aren't
- /// checked:
- ///
- /// * `ptr` must have been allocated using the global allocator, such as via
- /// the [`alloc::alloc`] function.
- /// * `T` needs to have the same alignment as what `ptr` was allocated with.
- /// (`T` having a less strict alignment is not sufficient, the alignment really
- /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
- /// allocated and deallocated with the same layout.)
- /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
- /// to be the same size as the pointer was allocated with. (Because similar to
- /// alignment, [`dealloc`] must be called with the same layout `size`.)
- /// * `length` needs to be less than or equal to `capacity`.
- /// * The first `length` values must be properly initialized values of type `T`.
- /// * `capacity` needs to be the capacity that the pointer was allocated with.
- /// * The allocated size in bytes must be no larger than `isize::MAX`.
- /// See the safety documentation of [`pointer::offset`].
- ///
- /// These requirements are always upheld by any `ptr` that has been allocated
- /// via `Vec<T>`. Other allocation sources are allowed if the invariants are
- /// upheld.
- ///
- /// Violating these may cause problems like corrupting the allocator's
- /// internal data structures. For example it is normally **not** safe
- /// to build a `Vec<u8>` from a pointer to a C `char` array with length
- /// `size_t`, doing so is only safe if the array was initially allocated by
- /// a `Vec` or `String`.
- /// It's also not safe to build one from a `Vec<u16>` and its length, because
- /// the allocator cares about the alignment, and these two types have different
- /// alignments. The buffer was allocated with alignment 2 (for `u16`), but after
- /// turning it into a `Vec<u8>` it'll be deallocated with alignment 1. To avoid
- /// these issues, it is often preferable to do casting/transmuting using
- /// [`slice::from_raw_parts`] instead.
- ///
- /// The ownership of `ptr` is effectively transferred to the
- /// `Vec<T>` which may then deallocate, reallocate or change the
- /// contents of memory pointed to by the pointer at will. Ensure
- /// that nothing else uses the pointer after calling this
- /// function.
- ///
- /// [`String`]: crate::string::String
- /// [`alloc::alloc`]: crate::alloc::alloc
- /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
- ///
- /// # Examples
- ///
- /// ```
- /// use std::ptr;
- /// use std::mem;
- ///
- /// let v = vec![1, 2, 3];
- ///
- // FIXME Update this when vec_into_raw_parts is stabilized
- /// // Prevent running `v`'s destructor so we are in complete control
- /// // of the allocation.
- /// let mut v = mem::ManuallyDrop::new(v);
- ///
- /// // Pull out the various important pieces of information about `v`
- /// let p = v.as_mut_ptr();
- /// let len = v.len();
- /// let cap = v.capacity();
- ///
- /// unsafe {
- /// // Overwrite memory with 4, 5, 6
- /// for i in 0..len {
- /// ptr::write(p.add(i), 4 + i);
- /// }
- ///
- /// // Put everything back together into a Vec
- /// let rebuilt = Vec::from_raw_parts(p, len, cap);
- /// assert_eq!(rebuilt, [4, 5, 6]);
- /// }
- /// ```
- ///
- /// Using memory that was allocated elsewhere:
- ///
- /// ```rust
- /// use std::alloc::{alloc, Layout};
- ///
- /// fn main() {
- /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
- ///
- /// let vec = unsafe {
- /// let mem = alloc(layout).cast::<u32>();
- /// if mem.is_null() {
- /// return;
- /// }
- ///
- /// mem.write(1_000_000);
- ///
- /// Vec::from_raw_parts(mem, 1, 16)
- /// };
- ///
- /// assert_eq!(vec, &[1_000_000]);
- /// assert_eq!(vec.capacity(), 16);
- /// }
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self {
- unsafe { Self::from_raw_parts_in(ptr, length, capacity, Global) }
- }
-}
-
-impl<T, A: Allocator> Vec<T, A> {
- /// Constructs a new, empty `Vec<T, A>`.
- ///
- /// The vector will not allocate until elements are pushed onto it.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api)]
- ///
- /// use std::alloc::System;
- ///
- /// # #[allow(unused_mut)]
- /// let mut vec: Vec<i32, _> = Vec::new_in(System);
- /// ```
- #[inline]
- #[unstable(feature = "allocator_api", issue = "32838")]
- pub const fn new_in(alloc: A) -> Self {
- Vec { buf: RawVec::new_in(alloc), len: 0 }
- }
-
- /// Constructs a new, empty `Vec<T, A>` with at least the specified capacity
- /// with the provided allocator.
- ///
- /// The vector will be able to hold at least `capacity` elements without
- /// reallocating. This method is allowed to allocate for more elements than
- /// `capacity`. If `capacity` is 0, the vector will not allocate.
- ///
- /// It is important to note that although the returned vector has the
- /// minimum *capacity* specified, the vector will have a zero *length*. For
- /// an explanation of the difference between length and capacity, see
- /// *[Capacity and reallocation]*.
- ///
- /// If it is important to know the exact allocated capacity of a `Vec`,
- /// always use the [`capacity`] method after construction.
- ///
- /// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
- /// and the capacity will always be `usize::MAX`.
- ///
- /// [Capacity and reallocation]: #capacity-and-reallocation
- /// [`capacity`]: Vec::capacity
- ///
- /// # Panics
- ///
- /// Panics if the new capacity exceeds `isize::MAX` bytes.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api)]
- ///
- /// use std::alloc::System;
- ///
- /// let mut vec = Vec::with_capacity_in(10, System);
- ///
- /// // The vector contains no items, even though it has capacity for more
- /// assert_eq!(vec.len(), 0);
- /// assert!(vec.capacity() >= 10);
- ///
- /// // These are all done without reallocating...
- /// for i in 0..10 {
- /// vec.push(i);
- /// }
- /// assert_eq!(vec.len(), 10);
- /// assert!(vec.capacity() >= 10);
- ///
- /// // ...but this may make the vector reallocate
- /// vec.push(11);
- /// assert_eq!(vec.len(), 11);
- /// assert!(vec.capacity() >= 11);
- ///
- /// // A vector of a zero-sized type will always over-allocate, since no
- /// // allocation is necessary
- /// let vec_units = Vec::<(), System>::with_capacity_in(10, System);
- /// assert_eq!(vec_units.capacity(), usize::MAX);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- #[unstable(feature = "allocator_api", issue = "32838")]
- pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
- Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 }
- }
-
- /// Tries to construct a new, empty `Vec<T, A>` with at least the specified capacity
- /// with the provided allocator.
- ///
- /// The vector will be able to hold at least `capacity` elements without
- /// reallocating. This method is allowed to allocate for more elements than
- /// `capacity`. If `capacity` is 0, the vector will not allocate.
- ///
- /// It is important to note that although the returned vector has the
- /// minimum *capacity* specified, the vector will have a zero *length*. For
- /// an explanation of the difference between length and capacity, see
- /// *[Capacity and reallocation]*.
- ///
- /// If it is important to know the exact allocated capacity of a `Vec`,
- /// always use the [`capacity`] method after construction.
- ///
- /// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
- /// and the capacity will always be `usize::MAX`.
- ///
- /// [Capacity and reallocation]: #capacity-and-reallocation
- /// [`capacity`]: Vec::capacity
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api)]
- ///
- /// use std::alloc::System;
- ///
- /// let mut vec = Vec::try_with_capacity_in(10, System).unwrap();
- ///
- /// // The vector contains no items, even though it has capacity for more
- /// assert_eq!(vec.len(), 0);
- /// assert!(vec.capacity() >= 10);
- ///
- /// // These are all done without reallocating...
- /// for i in 0..10 {
- /// vec.push(i);
- /// }
- /// assert_eq!(vec.len(), 10);
- /// assert!(vec.capacity() >= 10);
- ///
- /// // ...but this may make the vector reallocate
- /// vec.push(11);
- /// assert_eq!(vec.len(), 11);
- /// assert!(vec.capacity() >= 11);
- ///
- /// let mut result = Vec::try_with_capacity_in(usize::MAX, System);
- /// assert!(result.is_err());
- ///
- /// // A vector of a zero-sized type will always over-allocate, since no
- /// // allocation is necessary
- /// let vec_units = Vec::<(), System>::try_with_capacity_in(10, System).unwrap();
- /// assert_eq!(vec_units.capacity(), usize::MAX);
- /// ```
- #[inline]
- #[stable(feature = "kernel", since = "1.0.0")]
- pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
- Ok(Vec { buf: RawVec::try_with_capacity_in(capacity, alloc)?, len: 0 })
- }
-
- /// Creates a `Vec<T, A>` directly from a pointer, a capacity, a length,
- /// and an allocator.
- ///
- /// # Safety
- ///
- /// This is highly unsafe, due to the number of invariants that aren't
- /// checked:
- ///
- /// * `ptr` must be [*currently allocated*] via the given allocator `alloc`.
- /// * `T` needs to have the same alignment as what `ptr` was allocated with.
- /// (`T` having a less strict alignment is not sufficient, the alignment really
- /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
- /// allocated and deallocated with the same layout.)
- /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
- /// to be the same size as the pointer was allocated with. (Because similar to
- /// alignment, [`dealloc`] must be called with the same layout `size`.)
- /// * `length` needs to be less than or equal to `capacity`.
- /// * The first `length` values must be properly initialized values of type `T`.
- /// * `capacity` needs to [*fit*] the layout size that the pointer was allocated with.
- /// * The allocated size in bytes must be no larger than `isize::MAX`.
- /// See the safety documentation of [`pointer::offset`].
- ///
- /// These requirements are always upheld by any `ptr` that has been allocated
- /// via `Vec<T, A>`. Other allocation sources are allowed if the invariants are
- /// upheld.
- ///
- /// Violating these may cause problems like corrupting the allocator's
- /// internal data structures. For example it is **not** safe
- /// to build a `Vec<u8>` from a pointer to a C `char` array with length `size_t`.
- /// It's also not safe to build one from a `Vec<u16>` and its length, because
- /// the allocator cares about the alignment, and these two types have different
- /// alignments. The buffer was allocated with alignment 2 (for `u16`), but after
- /// turning it into a `Vec<u8>` it'll be deallocated with alignment 1.
- ///
- /// The ownership of `ptr` is effectively transferred to the
- /// `Vec<T>` which may then deallocate, reallocate or change the
- /// contents of memory pointed to by the pointer at will. Ensure
- /// that nothing else uses the pointer after calling this
- /// function.
- ///
- /// [`String`]: crate::string::String
- /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
- /// [*currently allocated*]: crate::alloc::Allocator#currently-allocated-memory
- /// [*fit*]: crate::alloc::Allocator#memory-fitting
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api)]
- ///
- /// use std::alloc::System;
- ///
- /// use std::ptr;
- /// use std::mem;
- ///
- /// let mut v = Vec::with_capacity_in(3, System);
- /// v.push(1);
- /// v.push(2);
- /// v.push(3);
- ///
- // FIXME Update this when vec_into_raw_parts is stabilized
- /// // Prevent running `v`'s destructor so we are in complete control
- /// // of the allocation.
- /// let mut v = mem::ManuallyDrop::new(v);
- ///
- /// // Pull out the various important pieces of information about `v`
- /// let p = v.as_mut_ptr();
- /// let len = v.len();
- /// let cap = v.capacity();
- /// let alloc = v.allocator();
- ///
- /// unsafe {
- /// // Overwrite memory with 4, 5, 6
- /// for i in 0..len {
- /// ptr::write(p.add(i), 4 + i);
- /// }
- ///
- /// // Put everything back together into a Vec
- /// let rebuilt = Vec::from_raw_parts_in(p, len, cap, alloc.clone());
- /// assert_eq!(rebuilt, [4, 5, 6]);
- /// }
- /// ```
- ///
- /// Using memory that was allocated elsewhere:
- ///
- /// ```rust
- /// #![feature(allocator_api)]
- ///
- /// use std::alloc::{AllocError, Allocator, Global, Layout};
- ///
- /// fn main() {
- /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
- ///
- /// let vec = unsafe {
- /// let mem = match Global.allocate(layout) {
- /// Ok(mem) => mem.cast::<u32>().as_ptr(),
- /// Err(AllocError) => return,
- /// };
- ///
- /// mem.write(1_000_000);
- ///
- /// Vec::from_raw_parts_in(mem, 1, 16, Global)
- /// };
- ///
- /// assert_eq!(vec, &[1_000_000]);
- /// assert_eq!(vec.capacity(), 16);
- /// }
- /// ```
- #[inline]
- #[unstable(feature = "allocator_api", issue = "32838")]
- pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self {
- unsafe { Vec { buf: RawVec::from_raw_parts_in(ptr, capacity, alloc), len: length } }
- }
-
- /// Decomposes a `Vec<T>` into its raw components.
- ///
- /// Returns the raw pointer to the underlying data, the length of
- /// the vector (in elements), and the allocated capacity of the
- /// data (in elements). These are the same arguments in the same
- /// order as the arguments to [`from_raw_parts`].
- ///
- /// After calling this function, the caller is responsible for the
- /// memory previously managed by the `Vec`. The only way to do
- /// this is to convert the raw pointer, length, and capacity back
- /// into a `Vec` with the [`from_raw_parts`] function, allowing
- /// the destructor to perform the cleanup.
- ///
- /// [`from_raw_parts`]: Vec::from_raw_parts
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(vec_into_raw_parts)]
- /// let v: Vec<i32> = vec![-1, 0, 1];
- ///
- /// let (ptr, len, cap) = v.into_raw_parts();
- ///
- /// let rebuilt = unsafe {
- /// // We can now make changes to the components, such as
- /// // transmuting the raw pointer to a compatible type.
- /// let ptr = ptr as *mut u32;
- ///
- /// Vec::from_raw_parts(ptr, len, cap)
- /// };
- /// assert_eq!(rebuilt, [4294967295, 0, 1]);
- /// ```
- #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
- pub fn into_raw_parts(self) -> (*mut T, usize, usize) {
- let mut me = ManuallyDrop::new(self);
- (me.as_mut_ptr(), me.len(), me.capacity())
- }
-
- /// Decomposes a `Vec<T>` into its raw components.
- ///
- /// Returns the raw pointer to the underlying data, the length of the vector (in elements),
- /// the allocated capacity of the data (in elements), and the allocator. These are the same
- /// arguments in the same order as the arguments to [`from_raw_parts_in`].
- ///
- /// After calling this function, the caller is responsible for the
- /// memory previously managed by the `Vec`. The only way to do
- /// this is to convert the raw pointer, length, and capacity back
- /// into a `Vec` with the [`from_raw_parts_in`] function, allowing
- /// the destructor to perform the cleanup.
- ///
- /// [`from_raw_parts_in`]: Vec::from_raw_parts_in
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(allocator_api, vec_into_raw_parts)]
- ///
- /// use std::alloc::System;
- ///
- /// let mut v: Vec<i32, System> = Vec::new_in(System);
- /// v.push(-1);
- /// v.push(0);
- /// v.push(1);
- ///
- /// let (ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
- ///
- /// let rebuilt = unsafe {
- /// // We can now make changes to the components, such as
- /// // transmuting the raw pointer to a compatible type.
- /// let ptr = ptr as *mut u32;
- ///
- /// Vec::from_raw_parts_in(ptr, len, cap, alloc)
- /// };
- /// assert_eq!(rebuilt, [4294967295, 0, 1]);
- /// ```
- #[unstable(feature = "allocator_api", issue = "32838")]
- // #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
- pub fn into_raw_parts_with_alloc(self) -> (*mut T, usize, usize, A) {
- let mut me = ManuallyDrop::new(self);
- let len = me.len();
- let capacity = me.capacity();
- let ptr = me.as_mut_ptr();
- let alloc = unsafe { ptr::read(me.allocator()) };
- (ptr, len, capacity, alloc)
- }
-
- /// Returns the total number of elements the vector can hold without
- /// reallocating.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec: Vec<i32> = Vec::with_capacity(10);
- /// vec.push(42);
- /// assert!(vec.capacity() >= 10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn capacity(&self) -> usize {
- self.buf.capacity()
- }
-
- /// Reserves capacity for at least `additional` more elements to be inserted
- /// in the given `Vec<T>`. The collection may reserve more space to
- /// speculatively avoid frequent reallocations. After calling `reserve`,
- /// capacity will be greater than or equal to `self.len() + additional`.
- /// Does nothing if capacity is already sufficient.
- ///
- /// # Panics
- ///
- /// Panics if the new capacity exceeds `isize::MAX` bytes.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1];
- /// vec.reserve(10);
- /// assert!(vec.capacity() >= 11);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn reserve(&mut self, additional: usize) {
- self.buf.reserve(self.len, additional);
- }
-
- /// Reserves the minimum capacity for at least `additional` more elements to
- /// be inserted in the given `Vec<T>`. Unlike [`reserve`], this will not
- /// deliberately over-allocate to speculatively avoid frequent allocations.
- /// After calling `reserve_exact`, capacity will be greater than or equal to
- /// `self.len() + additional`. Does nothing if the capacity is already
- /// sufficient.
- ///
- /// Note that the allocator may give the collection more space than it
- /// requests. Therefore, capacity can not be relied upon to be precisely
- /// minimal. Prefer [`reserve`] if future insertions are expected.
- ///
- /// [`reserve`]: Vec::reserve
- ///
- /// # Panics
- ///
- /// Panics if the new capacity exceeds `isize::MAX` bytes.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1];
- /// vec.reserve_exact(10);
- /// assert!(vec.capacity() >= 11);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn reserve_exact(&mut self, additional: usize) {
- self.buf.reserve_exact(self.len, additional);
- }
-
- /// Tries to reserve capacity for at least `additional` more elements to be inserted
- /// in the given `Vec<T>`. The collection may reserve more space to speculatively avoid
- /// frequent reallocations. After calling `try_reserve`, capacity will be
- /// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient. This method
- /// preserves the contents even if an error occurs.
- ///
- /// # Errors
- ///
- /// If the capacity overflows, or the allocator reports a failure, then an error
- /// is returned.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::collections::TryReserveError;
- ///
- /// fn process_data(data: &[u32]) -> Result<Vec<u32>, TryReserveError> {
- /// let mut output = Vec::new();
- ///
- /// // Pre-reserve the memory, exiting if we can't
- /// output.try_reserve(data.len())?;
- ///
- /// // Now we know this can't OOM in the middle of our complex work
- /// output.extend(data.iter().map(|&val| {
- /// val * 2 + 5 // very complicated
- /// }));
- ///
- /// Ok(output)
- /// }
- /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
- /// ```
- #[stable(feature = "try_reserve", since = "1.57.0")]
- pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
- self.buf.try_reserve(self.len, additional)
- }
-
- /// Tries to reserve the minimum capacity for at least `additional`
- /// elements to be inserted in the given `Vec<T>`. Unlike [`try_reserve`],
- /// this will not deliberately over-allocate to speculatively avoid frequent
- /// allocations. After calling `try_reserve_exact`, capacity will be greater
- /// than or equal to `self.len() + additional` if it returns `Ok(())`.
- /// Does nothing if the capacity is already sufficient.
- ///
- /// Note that the allocator may give the collection more space than it
- /// requests. Therefore, capacity can not be relied upon to be precisely
- /// minimal. Prefer [`try_reserve`] if future insertions are expected.
- ///
- /// [`try_reserve`]: Vec::try_reserve
- ///
- /// # Errors
- ///
- /// If the capacity overflows, or the allocator reports a failure, then an error
- /// is returned.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::collections::TryReserveError;
- ///
- /// fn process_data(data: &[u32]) -> Result<Vec<u32>, TryReserveError> {
- /// let mut output = Vec::new();
- ///
- /// // Pre-reserve the memory, exiting if we can't
- /// output.try_reserve_exact(data.len())?;
- ///
- /// // Now we know this can't OOM in the middle of our complex work
- /// output.extend(data.iter().map(|&val| {
- /// val * 2 + 5 // very complicated
- /// }));
- ///
- /// Ok(output)
- /// }
- /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
- /// ```
- #[stable(feature = "try_reserve", since = "1.57.0")]
- pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
- self.buf.try_reserve_exact(self.len, additional)
- }
-
- /// Shrinks the capacity of the vector as much as possible.
- ///
- /// It will drop down as close as possible to the length but the allocator
- /// may still inform the vector that there is space for a few more elements.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = Vec::with_capacity(10);
- /// vec.extend([1, 2, 3]);
- /// assert!(vec.capacity() >= 10);
- /// vec.shrink_to_fit();
- /// assert!(vec.capacity() >= 3);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn shrink_to_fit(&mut self) {
- // The capacity is never less than the length, and there's nothing to do when
- // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit`
- // by only calling it with a greater capacity.
- if self.capacity() > self.len {
- self.buf.shrink_to_fit(self.len);
- }
- }
-
- /// Shrinks the capacity of the vector with a lower bound.
- ///
- /// The capacity will remain at least as large as both the length
- /// and the supplied value.
- ///
- /// If the current capacity is less than the lower limit, this is a no-op.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = Vec::with_capacity(10);
- /// vec.extend([1, 2, 3]);
- /// assert!(vec.capacity() >= 10);
- /// vec.shrink_to(4);
- /// assert!(vec.capacity() >= 4);
- /// vec.shrink_to(0);
- /// assert!(vec.capacity() >= 3);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[stable(feature = "shrink_to", since = "1.56.0")]
- pub fn shrink_to(&mut self, min_capacity: usize) {
- if self.capacity() > min_capacity {
- self.buf.shrink_to_fit(cmp::max(self.len, min_capacity));
- }
- }
-
- /// Converts the vector into [`Box<[T]>`][owned slice].
- ///
- /// If the vector has excess capacity, its items will be moved into a
- /// newly-allocated buffer with exactly the right capacity.
- ///
- /// [owned slice]: Box
- ///
- /// # Examples
- ///
- /// ```
- /// let v = vec![1, 2, 3];
- ///
- /// let slice = v.into_boxed_slice();
- /// ```
- ///
- /// Any excess capacity is removed:
- ///
- /// ```
- /// let mut vec = Vec::with_capacity(10);
- /// vec.extend([1, 2, 3]);
- ///
- /// assert!(vec.capacity() >= 10);
- /// let slice = vec.into_boxed_slice();
- /// assert_eq!(slice.into_vec().capacity(), 3);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn into_boxed_slice(mut self) -> Box<[T], A> {
- unsafe {
- self.shrink_to_fit();
- let me = ManuallyDrop::new(self);
- let buf = ptr::read(&me.buf);
- let len = me.len();
- buf.into_box(len).assume_init()
- }
- }
-
- /// Shortens the vector, keeping the first `len` elements and dropping
- /// the rest.
- ///
- /// If `len` is greater or equal to the vector's current length, this has
- /// no effect.
- ///
- /// The [`drain`] method can emulate `truncate`, but causes the excess
- /// elements to be returned instead of dropped.
- ///
- /// Note that this method has no effect on the allocated capacity
- /// of the vector.
- ///
- /// # Examples
- ///
- /// Truncating a five element vector to two elements:
- ///
- /// ```
- /// let mut vec = vec![1, 2, 3, 4, 5];
- /// vec.truncate(2);
- /// assert_eq!(vec, [1, 2]);
- /// ```
- ///
- /// No truncation occurs when `len` is greater than the vector's current
- /// length:
- ///
- /// ```
- /// let mut vec = vec![1, 2, 3];
- /// vec.truncate(8);
- /// assert_eq!(vec, [1, 2, 3]);
- /// ```
- ///
- /// Truncating when `len == 0` is equivalent to calling the [`clear`]
- /// method.
- ///
- /// ```
- /// let mut vec = vec![1, 2, 3];
- /// vec.truncate(0);
- /// assert_eq!(vec, []);
- /// ```
- ///
- /// [`clear`]: Vec::clear
- /// [`drain`]: Vec::drain
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn truncate(&mut self, len: usize) {
- // This is safe because:
- //
- // * the slice passed to `drop_in_place` is valid; the `len > self.len`
- // case avoids creating an invalid slice, and
- // * the `len` of the vector is shrunk before calling `drop_in_place`,
- // such that no value will be dropped twice in case `drop_in_place`
- // were to panic once (if it panics twice, the program aborts).
- unsafe {
- // Note: It's intentional that this is `>` and not `>=`.
- // Changing it to `>=` has negative performance
- // implications in some cases. See #78884 for more.
- if len > self.len {
- return;
- }
- let remaining_len = self.len - len;
- let s = ptr::slice_from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len);
- self.len = len;
- ptr::drop_in_place(s);
- }
- }
-
- /// Extracts a slice containing the entire vector.
- ///
- /// Equivalent to `&s[..]`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::io::{self, Write};
- /// let buffer = vec![1, 2, 3, 5, 8];
- /// io::sink().write(buffer.as_slice()).unwrap();
- /// ```
- #[inline]
- #[stable(feature = "vec_as_slice", since = "1.7.0")]
- pub fn as_slice(&self) -> &[T] {
- self
- }
-
- /// Extracts a mutable slice of the entire vector.
- ///
- /// Equivalent to `&mut s[..]`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::io::{self, Read};
- /// let mut buffer = vec![0; 3];
- /// io::repeat(0b101).read_exact(buffer.as_mut_slice()).unwrap();
- /// ```
- #[inline]
- #[stable(feature = "vec_as_slice", since = "1.7.0")]
- pub fn as_mut_slice(&mut self) -> &mut [T] {
- self
- }
-
- /// Returns a raw pointer to the vector's buffer, or a dangling raw pointer
- /// valid for zero sized reads if the vector didn't allocate.
- ///
- /// The caller must ensure that the vector outlives the pointer this
- /// function returns, or else it will end up pointing to garbage.
- /// Modifying the vector may cause its buffer to be reallocated,
- /// which would also make any pointers to it invalid.
- ///
- /// The caller must also ensure that the memory the pointer (non-transitively) points to
- /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
- /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
- ///
- /// This method guarantees that for the purpose of the aliasing model, this method
- /// does not materialize a reference to the underlying slice, and thus the returned pointer
- /// will remain valid when mixed with other calls to [`as_ptr`] and [`as_mut_ptr`].
- /// Note that calling other methods that materialize mutable references to the slice,
- /// or mutable references to specific elements you are planning on accessing through this pointer,
- /// as well as writing to those elements, may still invalidate this pointer.
- /// See the second example below for how this guarantee can be used.
- ///
- ///
- /// # Examples
- ///
- /// ```
- /// let x = vec![1, 2, 4];
- /// let x_ptr = x.as_ptr();
- ///
- /// unsafe {
- /// for i in 0..x.len() {
- /// assert_eq!(*x_ptr.add(i), 1 << i);
- /// }
- /// }
- /// ```
- ///
- /// Due to the aliasing guarantee, the following code is legal:
- ///
- /// ```rust
- /// unsafe {
- /// let mut v = vec![0, 1, 2];
- /// let ptr1 = v.as_ptr();
- /// let _ = ptr1.read();
- /// let ptr2 = v.as_mut_ptr().offset(2);
- /// ptr2.write(2);
- /// // Notably, the write to `ptr2` did *not* invalidate `ptr1`
- /// // because it mutated a different element:
- /// let _ = ptr1.read();
- /// }
- /// ```
- ///
- /// [`as_mut_ptr`]: Vec::as_mut_ptr
- /// [`as_ptr`]: Vec::as_ptr
- #[stable(feature = "vec_as_ptr", since = "1.37.0")]
- #[rustc_never_returns_null_ptr]
- #[inline]
- pub fn as_ptr(&self) -> *const T {
- // We shadow the slice method of the same name to avoid going through
- // `deref`, which creates an intermediate reference.
- self.buf.ptr()
- }
-
- /// Returns an unsafe mutable pointer to the vector's buffer, or a dangling
- /// raw pointer valid for zero sized reads if the vector didn't allocate.
- ///
- /// The caller must ensure that the vector outlives the pointer this
- /// function returns, or else it will end up pointing to garbage.
- /// Modifying the vector may cause its buffer to be reallocated,
- /// which would also make any pointers to it invalid.
- ///
- /// This method guarantees that for the purpose of the aliasing model, this method
- /// does not materialize a reference to the underlying slice, and thus the returned pointer
- /// will remain valid when mixed with other calls to [`as_ptr`] and [`as_mut_ptr`].
- /// Note that calling other methods that materialize references to the slice,
- /// or references to specific elements you are planning on accessing through this pointer,
- /// may still invalidate this pointer.
- /// See the second example below for how this guarantee can be used.
- ///
- ///
- /// # Examples
- ///
- /// ```
- /// // Allocate vector big enough for 4 elements.
- /// let size = 4;
- /// let mut x: Vec<i32> = Vec::with_capacity(size);
- /// let x_ptr = x.as_mut_ptr();
- ///
- /// // Initialize elements via raw pointer writes, then set length.
- /// unsafe {
- /// for i in 0..size {
- /// *x_ptr.add(i) = i as i32;
- /// }
- /// x.set_len(size);
- /// }
- /// assert_eq!(&*x, &[0, 1, 2, 3]);
- /// ```
- ///
- /// Due to the aliasing guarantee, the following code is legal:
- ///
- /// ```rust
- /// unsafe {
- /// let mut v = vec![0];
- /// let ptr1 = v.as_mut_ptr();
- /// ptr1.write(1);
- /// let ptr2 = v.as_mut_ptr();
- /// ptr2.write(2);
- /// // Notably, the write to `ptr2` did *not* invalidate `ptr1`:
- /// ptr1.write(3);
- /// }
- /// ```
- ///
- /// [`as_mut_ptr`]: Vec::as_mut_ptr
- /// [`as_ptr`]: Vec::as_ptr
- #[stable(feature = "vec_as_ptr", since = "1.37.0")]
- #[rustc_never_returns_null_ptr]
- #[inline]
- pub fn as_mut_ptr(&mut self) -> *mut T {
- // We shadow the slice method of the same name to avoid going through
- // `deref_mut`, which creates an intermediate reference.
- self.buf.ptr()
- }
-
- /// Returns a reference to the underlying allocator.
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[inline]
- pub fn allocator(&self) -> &A {
- self.buf.allocator()
- }
-
- /// Forces the length of the vector to `new_len`.
- ///
- /// This is a low-level operation that maintains none of the normal
- /// invariants of the type. Normally changing the length of a vector
- /// is done using one of the safe operations instead, such as
- /// [`truncate`], [`resize`], [`extend`], or [`clear`].
- ///
- /// [`truncate`]: Vec::truncate
- /// [`resize`]: Vec::resize
- /// [`extend`]: Extend::extend
- /// [`clear`]: Vec::clear
- ///
- /// # Safety
- ///
- /// - `new_len` must be less than or equal to [`capacity()`].
- /// - The elements at `old_len..new_len` must be initialized.
- ///
- /// [`capacity()`]: Vec::capacity
- ///
- /// # Examples
- ///
- /// This method can be useful for situations in which the vector
- /// is serving as a buffer for other code, particularly over FFI:
- ///
- /// ```no_run
- /// # #![allow(dead_code)]
- /// # // This is just a minimal skeleton for the doc example;
- /// # // don't use this as a starting point for a real library.
- /// # pub struct StreamWrapper { strm: *mut std::ffi::c_void }
- /// # const Z_OK: i32 = 0;
- /// # extern "C" {
- /// # fn deflateGetDictionary(
- /// # strm: *mut std::ffi::c_void,
- /// # dictionary: *mut u8,
- /// # dictLength: *mut usize,
- /// # ) -> i32;
- /// # }
- /// # impl StreamWrapper {
- /// pub fn get_dictionary(&self) -> Option<Vec<u8>> {
- /// // Per the FFI method's docs, "32768 bytes is always enough".
- /// let mut dict = Vec::with_capacity(32_768);
- /// let mut dict_length = 0;
- /// // SAFETY: When `deflateGetDictionary` returns `Z_OK`, it holds that:
- /// // 1. `dict_length` elements were initialized.
- /// // 2. `dict_length` <= the capacity (32_768)
- /// // which makes `set_len` safe to call.
- /// unsafe {
- /// // Make the FFI call...
- /// let r = deflateGetDictionary(self.strm, dict.as_mut_ptr(), &mut dict_length);
- /// if r == Z_OK {
- /// // ...and update the length to what was initialized.
- /// dict.set_len(dict_length);
- /// Some(dict)
- /// } else {
- /// None
- /// }
- /// }
- /// }
- /// # }
- /// ```
- ///
- /// While the following example is sound, there is a memory leak since
- /// the inner vectors were not freed prior to the `set_len` call:
- ///
- /// ```
- /// let mut vec = vec![vec![1, 0, 0],
- /// vec![0, 1, 0],
- /// vec![0, 0, 1]];
- /// // SAFETY:
- /// // 1. `old_len..0` is empty so no elements need to be initialized.
- /// // 2. `0 <= capacity` always holds whatever `capacity` is.
- /// unsafe {
- /// vec.set_len(0);
- /// }
- /// ```
- ///
- /// Normally, here, one would use [`clear`] instead to correctly drop
- /// the contents and thus not leak memory.
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub unsafe fn set_len(&mut self, new_len: usize) {
- debug_assert!(new_len <= self.capacity());
-
- self.len = new_len;
- }
-
- /// Removes an element from the vector and returns it.
- ///
- /// The removed element is replaced by the last element of the vector.
- ///
- /// This does not preserve ordering, but is *O*(1).
- /// If you need to preserve the element order, use [`remove`] instead.
- ///
- /// [`remove`]: Vec::remove
- ///
- /// # Panics
- ///
- /// Panics if `index` is out of bounds.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut v = vec!["foo", "bar", "baz", "qux"];
- ///
- /// assert_eq!(v.swap_remove(1), "bar");
- /// assert_eq!(v, ["foo", "qux", "baz"]);
- ///
- /// assert_eq!(v.swap_remove(0), "foo");
- /// assert_eq!(v, ["baz", "qux"]);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn swap_remove(&mut self, index: usize) -> T {
- #[cold]
- #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
- #[track_caller]
- fn assert_failed(index: usize, len: usize) -> ! {
- panic!("swap_remove index (is {index}) should be < len (is {len})");
- }
-
- let len = self.len();
- if index >= len {
- assert_failed(index, len);
- }
- unsafe {
- // We replace self[index] with the last element. Note that if the
- // bounds check above succeeds there must be a last element (which
- // can be self[index] itself).
- let value = ptr::read(self.as_ptr().add(index));
- let base_ptr = self.as_mut_ptr();
- ptr::copy(base_ptr.add(len - 1), base_ptr.add(index), 1);
- self.set_len(len - 1);
- value
- }
- }
-
- /// Inserts an element at position `index` within the vector, shifting all
- /// elements after it to the right.
- ///
- /// # Panics
- ///
- /// Panics if `index > len`.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1, 2, 3];
- /// vec.insert(1, 4);
- /// assert_eq!(vec, [1, 4, 2, 3]);
- /// vec.insert(4, 5);
- /// assert_eq!(vec, [1, 4, 2, 3, 5]);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn insert(&mut self, index: usize, element: T) {
- #[cold]
- #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
- #[track_caller]
- fn assert_failed(index: usize, len: usize) -> ! {
- panic!("insertion index (is {index}) should be <= len (is {len})");
- }
-
- let len = self.len();
-
- // space for the new element
- if len == self.buf.capacity() {
- self.reserve(1);
- }
-
- unsafe {
- // infallible
- // The spot to put the new value
- {
- let p = self.as_mut_ptr().add(index);
- if index < len {
- // Shift everything over to make space. (Duplicating the
- // `index`th element into two consecutive places.)
- ptr::copy(p, p.add(1), len - index);
- } else if index == len {
- // No elements need shifting.
- } else {
- assert_failed(index, len);
- }
- // Write it in, overwriting the first copy of the `index`th
- // element.
- ptr::write(p, element);
- }
- self.set_len(len + 1);
- }
- }
-
- /// Removes and returns the element at position `index` within the vector,
- /// shifting all elements after it to the left.
- ///
- /// Note: Because this shifts over the remaining elements, it has a
- /// worst-case performance of *O*(*n*). If you don't need the order of elements
- /// to be preserved, use [`swap_remove`] instead. If you'd like to remove
- /// elements from the beginning of the `Vec`, consider using
- /// [`VecDeque::pop_front`] instead.
- ///
- /// [`swap_remove`]: Vec::swap_remove
- /// [`VecDeque::pop_front`]: crate::collections::VecDeque::pop_front
- ///
- /// # Panics
- ///
- /// Panics if `index` is out of bounds.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut v = vec![1, 2, 3];
- /// assert_eq!(v.remove(1), 2);
- /// assert_eq!(v, [1, 3]);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- #[track_caller]
- pub fn remove(&mut self, index: usize) -> T {
- #[cold]
- #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
- #[track_caller]
- fn assert_failed(index: usize, len: usize) -> ! {
- panic!("removal index (is {index}) should be < len (is {len})");
- }
-
- let len = self.len();
- if index >= len {
- assert_failed(index, len);
- }
- unsafe {
- // infallible
- let ret;
- {
- // the place we are taking from.
- let ptr = self.as_mut_ptr().add(index);
- // copy it out, unsafely having a copy of the value on
- // the stack and in the vector at the same time.
- ret = ptr::read(ptr);
-
- // Shift everything down to fill in that spot.
- ptr::copy(ptr.add(1), ptr, len - index - 1);
- }
- self.set_len(len - 1);
- ret
- }
- }
-
- /// Retains only the elements specified by the predicate.
- ///
- /// In other words, remove all elements `e` for which `f(&e)` returns `false`.
- /// This method operates in place, visiting each element exactly once in the
- /// original order, and preserves the order of the retained elements.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1, 2, 3, 4];
- /// vec.retain(|&x| x % 2 == 0);
- /// assert_eq!(vec, [2, 4]);
- /// ```
- ///
- /// Because the elements are visited exactly once in the original order,
- /// external state may be used to decide which elements to keep.
- ///
- /// ```
- /// let mut vec = vec![1, 2, 3, 4, 5];
- /// let keep = [false, true, true, false, true];
- /// let mut iter = keep.iter();
- /// vec.retain(|_| *iter.next().unwrap());
- /// assert_eq!(vec, [2, 3, 5]);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn retain<F>(&mut self, mut f: F)
- where
- F: FnMut(&T) -> bool,
- {
- self.retain_mut(|elem| f(elem));
- }
-
- /// Retains only the elements specified by the predicate, passing a mutable reference to it.
- ///
- /// In other words, remove all elements `e` such that `f(&mut e)` returns `false`.
- /// This method operates in place, visiting each element exactly once in the
- /// original order, and preserves the order of the retained elements.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1, 2, 3, 4];
- /// vec.retain_mut(|x| if *x <= 3 {
- /// *x += 1;
- /// true
- /// } else {
- /// false
- /// });
- /// assert_eq!(vec, [2, 3, 4]);
- /// ```
- #[stable(feature = "vec_retain_mut", since = "1.61.0")]
- pub fn retain_mut<F>(&mut self, mut f: F)
- where
- F: FnMut(&mut T) -> bool,
- {
- let original_len = self.len();
- // Avoid double drop if the drop guard is not executed,
- // since we may make some holes during the process.
- unsafe { self.set_len(0) };
-
- // Vec: [Kept, Kept, Hole, Hole, Hole, Hole, Unchecked, Unchecked]
- // |<- processed len ->| ^- next to check
- // |<- deleted cnt ->|
- // |<- original_len ->|
- // Kept: Elements which predicate returns true on.
- // Hole: Moved or dropped element slot.
- // Unchecked: Unchecked valid elements.
- //
- // This drop guard will be invoked when predicate or `drop` of element panicked.
- // It shifts unchecked elements to cover holes and `set_len` to the correct length.
- // In cases when predicate and `drop` never panick, it will be optimized out.
- struct BackshiftOnDrop<'a, T, A: Allocator> {
- v: &'a mut Vec<T, A>,
- processed_len: usize,
- deleted_cnt: usize,
- original_len: usize,
- }
-
- impl<T, A: Allocator> Drop for BackshiftOnDrop<'_, T, A> {
- fn drop(&mut self) {
- if self.deleted_cnt > 0 {
- // SAFETY: Trailing unchecked items must be valid since we never touch them.
- unsafe {
- ptr::copy(
- self.v.as_ptr().add(self.processed_len),
- self.v.as_mut_ptr().add(self.processed_len - self.deleted_cnt),
- self.original_len - self.processed_len,
- );
- }
- }
- // SAFETY: After filling holes, all items are in contiguous memory.
- unsafe {
- self.v.set_len(self.original_len - self.deleted_cnt);
- }
- }
- }
-
- let mut g = BackshiftOnDrop { v: self, processed_len: 0, deleted_cnt: 0, original_len };
-
- fn process_loop<F, T, A: Allocator, const DELETED: bool>(
- original_len: usize,
- f: &mut F,
- g: &mut BackshiftOnDrop<'_, T, A>,
- ) where
- F: FnMut(&mut T) -> bool,
- {
- while g.processed_len != original_len {
- // SAFETY: Unchecked element must be valid.
- let cur = unsafe { &mut *g.v.as_mut_ptr().add(g.processed_len) };
- if !f(cur) {
- // Advance early to avoid double drop if `drop_in_place` panicked.
- g.processed_len += 1;
- g.deleted_cnt += 1;
- // SAFETY: We never touch this element again after dropped.
- unsafe { ptr::drop_in_place(cur) };
- // We already advanced the counter.
- if DELETED {
- continue;
- } else {
- break;
- }
- }
- if DELETED {
- // SAFETY: `deleted_cnt` > 0, so the hole slot must not overlap with current element.
- // We use copy for move, and never touch this element again.
- unsafe {
- let hole_slot = g.v.as_mut_ptr().add(g.processed_len - g.deleted_cnt);
- ptr::copy_nonoverlapping(cur, hole_slot, 1);
- }
- }
- g.processed_len += 1;
- }
- }
-
- // Stage 1: Nothing was deleted.
- process_loop::<F, T, A, false>(original_len, &mut f, &mut g);
-
- // Stage 2: Some elements were deleted.
- process_loop::<F, T, A, true>(original_len, &mut f, &mut g);
-
- // All item are processed. This can be optimized to `set_len` by LLVM.
- drop(g);
- }
-
- /// Removes all but the first of consecutive elements in the vector that resolve to the same
- /// key.
- ///
- /// If the vector is sorted, this removes all duplicates.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![10, 20, 21, 30, 20];
- ///
- /// vec.dedup_by_key(|i| *i / 10);
- ///
- /// assert_eq!(vec, [10, 20, 30, 20]);
- /// ```
- #[stable(feature = "dedup_by", since = "1.16.0")]
- #[inline]
- pub fn dedup_by_key<F, K>(&mut self, mut key: F)
- where
- F: FnMut(&mut T) -> K,
- K: PartialEq,
- {
- self.dedup_by(|a, b| key(a) == key(b))
- }
-
- /// Removes all but the first of consecutive elements in the vector satisfying a given equality
- /// relation.
- ///
- /// The `same_bucket` function is passed references to two elements from the vector and
- /// must determine if the elements compare equal. The elements are passed in opposite order
- /// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is removed.
- ///
- /// If the vector is sorted, this removes all duplicates.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec!["foo", "bar", "Bar", "baz", "bar"];
- ///
- /// vec.dedup_by(|a, b| a.eq_ignore_ascii_case(b));
- ///
- /// assert_eq!(vec, ["foo", "bar", "baz", "bar"]);
- /// ```
- #[stable(feature = "dedup_by", since = "1.16.0")]
- pub fn dedup_by<F>(&mut self, mut same_bucket: F)
- where
- F: FnMut(&mut T, &mut T) -> bool,
- {
- let len = self.len();
- if len <= 1 {
- return;
- }
-
- // Check if we ever want to remove anything.
- // This allows to use copy_non_overlapping in next cycle.
- // And avoids any memory writes if we don't need to remove anything.
- let mut first_duplicate_idx: usize = 1;
- let start = self.as_mut_ptr();
- while first_duplicate_idx != len {
- let found_duplicate = unsafe {
- // SAFETY: first_duplicate always in range [1..len)
- // Note that we start iteration from 1 so we never overflow.
- let prev = start.add(first_duplicate_idx.wrapping_sub(1));
- let current = start.add(first_duplicate_idx);
- // We explicitly say in docs that references are reversed.
- same_bucket(&mut *current, &mut *prev)
- };
- if found_duplicate {
- break;
- }
- first_duplicate_idx += 1;
- }
- // Don't need to remove anything.
- // We cannot get bigger than len.
- if first_duplicate_idx == len {
- return;
- }
-
- /* INVARIANT: vec.len() > read > write > write-1 >= 0 */
- struct FillGapOnDrop<'a, T, A: core::alloc::Allocator> {
- /* Offset of the element we want to check if it is duplicate */
- read: usize,
-
- /* Offset of the place where we want to place the non-duplicate
- * when we find it. */
- write: usize,
-
- /* The Vec that would need correction if `same_bucket` panicked */
- vec: &'a mut Vec<T, A>,
- }
-
- impl<'a, T, A: core::alloc::Allocator> Drop for FillGapOnDrop<'a, T, A> {
- fn drop(&mut self) {
- /* This code gets executed when `same_bucket` panics */
-
- /* SAFETY: invariant guarantees that `read - write`
- * and `len - read` never overflow and that the copy is always
- * in-bounds. */
- unsafe {
- let ptr = self.vec.as_mut_ptr();
- let len = self.vec.len();
-
- /* How many items were left when `same_bucket` panicked.
- * Basically vec[read..].len() */
- let items_left = len.wrapping_sub(self.read);
-
- /* Pointer to first item in vec[write..write+items_left] slice */
- let dropped_ptr = ptr.add(self.write);
- /* Pointer to first item in vec[read..] slice */
- let valid_ptr = ptr.add(self.read);
-
- /* Copy `vec[read..]` to `vec[write..write+items_left]`.
- * The slices can overlap, so `copy_nonoverlapping` cannot be used */
- ptr::copy(valid_ptr, dropped_ptr, items_left);
-
- /* How many items have been already dropped
- * Basically vec[read..write].len() */
- let dropped = self.read.wrapping_sub(self.write);
-
- self.vec.set_len(len - dropped);
- }
- }
- }
-
- /* Drop items while going through Vec, it should be more efficient than
- * doing slice partition_dedup + truncate */
-
- // Construct gap first and then drop item to avoid memory corruption if `T::drop` panics.
- let mut gap =
- FillGapOnDrop { read: first_duplicate_idx + 1, write: first_duplicate_idx, vec: self };
- unsafe {
- // SAFETY: we checked that first_duplicate_idx in bounds before.
- // If drop panics, `gap` would remove this item without drop.
- ptr::drop_in_place(start.add(first_duplicate_idx));
- }
-
- /* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
- * are always in-bounds and read_ptr never aliases prev_ptr */
- unsafe {
- while gap.read < len {
- let read_ptr = start.add(gap.read);
- let prev_ptr = start.add(gap.write.wrapping_sub(1));
-
- // We explicitly say in docs that references are reversed.
- let found_duplicate = same_bucket(&mut *read_ptr, &mut *prev_ptr);
- if found_duplicate {
- // Increase `gap.read` now since the drop may panic.
- gap.read += 1;
- /* We have found duplicate, drop it in-place */
- ptr::drop_in_place(read_ptr);
- } else {
- let write_ptr = start.add(gap.write);
-
- /* read_ptr cannot be equal to write_ptr because at this point
- * we guaranteed to skip at least one element (before loop starts).
- */
- ptr::copy_nonoverlapping(read_ptr, write_ptr, 1);
-
- /* We have filled that place, so go further */
- gap.write += 1;
- gap.read += 1;
- }
- }
-
- /* Technically we could let `gap` clean up with its Drop, but
- * when `same_bucket` is guaranteed to not panic, this bloats a little
- * the codegen, so we just do it manually */
- gap.vec.set_len(gap.write);
- mem::forget(gap);
- }
- }
-
- /// Appends an element to the back of a collection.
- ///
- /// # Panics
- ///
- /// Panics if the new capacity exceeds `isize::MAX` bytes.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1, 2];
- /// vec.push(3);
- /// assert_eq!(vec, [1, 2, 3]);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn push(&mut self, value: T) {
- // This will panic or abort if we would allocate > isize::MAX bytes
- // or if the length increment would overflow for zero-sized types.
- if self.len == self.buf.capacity() {
- self.buf.reserve_for_push(self.len);
- }
- unsafe {
- let end = self.as_mut_ptr().add(self.len);
- ptr::write(end, value);
- self.len += 1;
- }
- }
-
- /// Tries to append an element to the back of a collection.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1, 2];
- /// vec.try_push(3).unwrap();
- /// assert_eq!(vec, [1, 2, 3]);
- /// ```
- #[inline]
- #[stable(feature = "kernel", since = "1.0.0")]
- pub fn try_push(&mut self, value: T) -> Result<(), TryReserveError> {
- if self.len == self.buf.capacity() {
- self.buf.try_reserve_for_push(self.len)?;
- }
- unsafe {
- let end = self.as_mut_ptr().add(self.len);
- ptr::write(end, value);
- self.len += 1;
- }
- Ok(())
- }
-
- /// Appends an element if there is sufficient spare capacity, otherwise an error is returned
- /// with the element.
- ///
- /// Unlike [`push`] this method will not reallocate when there's insufficient capacity.
- /// The caller should use [`reserve`] or [`try_reserve`] to ensure that there is enough capacity.
- ///
- /// [`push`]: Vec::push
- /// [`reserve`]: Vec::reserve
- /// [`try_reserve`]: Vec::try_reserve
- ///
- /// # Examples
- ///
- /// A manual, panic-free alternative to [`FromIterator`]:
- ///
- /// ```
- /// #![feature(vec_push_within_capacity)]
- ///
- /// use std::collections::TryReserveError;
- /// fn from_iter_fallible<T>(iter: impl Iterator<Item=T>) -> Result<Vec<T>, TryReserveError> {
- /// let mut vec = Vec::new();
- /// for value in iter {
- /// if let Err(value) = vec.push_within_capacity(value) {
- /// vec.try_reserve(1)?;
- /// // this cannot fail, the previous line either returned or added at least 1 free slot
- /// let _ = vec.push_within_capacity(value);
- /// }
- /// }
- /// Ok(vec)
- /// }
- /// assert_eq!(from_iter_fallible(0..100), Ok(Vec::from_iter(0..100)));
- /// ```
- #[inline]
- #[unstable(feature = "vec_push_within_capacity", issue = "100486")]
- pub fn push_within_capacity(&mut self, value: T) -> Result<(), T> {
- if self.len == self.buf.capacity() {
- return Err(value);
- }
- unsafe {
- let end = self.as_mut_ptr().add(self.len);
- ptr::write(end, value);
- self.len += 1;
- }
- Ok(())
- }
-
- /// Removes the last element from a vector and returns it, or [`None`] if it
- /// is empty.
- ///
- /// If you'd like to pop the first element, consider using
- /// [`VecDeque::pop_front`] instead.
- ///
- /// [`VecDeque::pop_front`]: crate::collections::VecDeque::pop_front
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1, 2, 3];
- /// assert_eq!(vec.pop(), Some(3));
- /// assert_eq!(vec, [1, 2]);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn pop(&mut self) -> Option<T> {
- if self.len == 0 {
- None
- } else {
- unsafe {
- self.len -= 1;
- core::intrinsics::assume(self.len < self.capacity());
- Some(ptr::read(self.as_ptr().add(self.len())))
- }
- }
- }
-
- /// Moves all the elements of `other` into `self`, leaving `other` empty.
- ///
- /// # Panics
- ///
- /// Panics if the new capacity exceeds `isize::MAX` bytes.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1, 2, 3];
- /// let mut vec2 = vec![4, 5, 6];
- /// vec.append(&mut vec2);
- /// assert_eq!(vec, [1, 2, 3, 4, 5, 6]);
- /// assert_eq!(vec2, []);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- #[stable(feature = "append", since = "1.4.0")]
- pub fn append(&mut self, other: &mut Self) {
- unsafe {
- self.append_elements(other.as_slice() as _);
- other.set_len(0);
- }
- }
-
- /// Appends elements to `self` from other buffer.
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- unsafe fn append_elements(&mut self, other: *const [T]) {
- let count = unsafe { (*other).len() };
- self.reserve(count);
- let len = self.len();
- unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
- self.len += count;
- }
-
- /// Tries to append elements to `self` from other buffer.
- #[inline]
- unsafe fn try_append_elements(&mut self, other: *const [T]) -> Result<(), TryReserveError> {
- let count = unsafe { (*other).len() };
- self.try_reserve(count)?;
- let len = self.len();
- unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
- self.len += count;
- Ok(())
- }
-
- /// Removes the specified range from the vector in bulk, returning all
- /// removed elements as an iterator. If the iterator is dropped before
- /// being fully consumed, it drops the remaining removed elements.
- ///
- /// The returned iterator keeps a mutable borrow on the vector to optimize
- /// its implementation.
- ///
- /// # Panics
- ///
- /// Panics if the starting point is greater than the end point or if
- /// the end point is greater than the length of the vector.
- ///
- /// # Leaking
- ///
- /// If the returned iterator goes out of scope without being dropped (due to
- /// [`mem::forget`], for example), the vector may have lost and leaked
- /// elements arbitrarily, including elements outside the range.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut v = vec![1, 2, 3];
- /// let u: Vec<_> = v.drain(1..).collect();
- /// assert_eq!(v, &[1]);
- /// assert_eq!(u, &[2, 3]);
- ///
- /// // A full range clears the vector, like `clear()` does
- /// v.drain(..);
- /// assert_eq!(v, &[]);
- /// ```
- #[stable(feature = "drain", since = "1.6.0")]
- pub fn drain<R>(&mut self, range: R) -> Drain<'_, T, A>
- where
- R: RangeBounds<usize>,
- {
- // Memory safety
- //
- // When the Drain is first created, it shortens the length of
- // the source vector to make sure no uninitialized or moved-from elements
- // are accessible at all if the Drain's destructor never gets to run.
- //
- // Drain will ptr::read out the values to remove.
- // When finished, remaining tail of the vec is copied back to cover
- // the hole, and the vector length is restored to the new length.
- //
- let len = self.len();
- let Range { start, end } = slice::range(range, ..len);
-
- unsafe {
- // set self.vec length's to start, to be safe in case Drain is leaked
- self.set_len(start);
- let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start);
- Drain {
- tail_start: end,
- tail_len: len - end,
- iter: range_slice.iter(),
- vec: NonNull::from(self),
- }
- }
- }
-
- /// Clears the vector, removing all values.
- ///
- /// Note that this method has no effect on the allocated capacity
- /// of the vector.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut v = vec![1, 2, 3];
- ///
- /// v.clear();
- ///
- /// assert!(v.is_empty());
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn clear(&mut self) {
- let elems: *mut [T] = self.as_mut_slice();
-
- // SAFETY:
- // - `elems` comes directly from `as_mut_slice` and is therefore valid.
- // - Setting `self.len` before calling `drop_in_place` means that,
- // if an element's `Drop` impl panics, the vector's `Drop` impl will
- // do nothing (leaking the rest of the elements) instead of dropping
- // some twice.
- unsafe {
- self.len = 0;
- ptr::drop_in_place(elems);
- }
- }
-
- /// Returns the number of elements in the vector, also referred to
- /// as its 'length'.
- ///
- /// # Examples
- ///
- /// ```
- /// let a = vec![1, 2, 3];
- /// assert_eq!(a.len(), 3);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn len(&self) -> usize {
- self.len
- }
-
- /// Returns `true` if the vector contains no elements.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut v = Vec::new();
- /// assert!(v.is_empty());
- ///
- /// v.push(1);
- /// assert!(!v.is_empty());
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn is_empty(&self) -> bool {
- self.len() == 0
- }
-
- /// Splits the collection into two at the given index.
- ///
- /// Returns a newly allocated vector containing the elements in the range
- /// `[at, len)`. After the call, the original vector will be left containing
- /// the elements `[0, at)` with its previous capacity unchanged.
- ///
- /// # Panics
- ///
- /// Panics if `at > len`.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1, 2, 3];
- /// let vec2 = vec.split_off(1);
- /// assert_eq!(vec, [1]);
- /// assert_eq!(vec2, [2, 3]);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- #[must_use = "use `.truncate()` if you don't need the other half"]
- #[stable(feature = "split_off", since = "1.4.0")]
- pub fn split_off(&mut self, at: usize) -> Self
- where
- A: Clone,
- {
- #[cold]
- #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
- #[track_caller]
- fn assert_failed(at: usize, len: usize) -> ! {
- panic!("`at` split index (is {at}) should be <= len (is {len})");
- }
-
- if at > self.len() {
- assert_failed(at, self.len());
- }
-
- if at == 0 {
- // the new vector can take over the original buffer and avoid the copy
- return mem::replace(
- self,
- Vec::with_capacity_in(self.capacity(), self.allocator().clone()),
- );
- }
-
- let other_len = self.len - at;
- let mut other = Vec::with_capacity_in(other_len, self.allocator().clone());
-
- // Unsafely `set_len` and copy items to `other`.
- unsafe {
- self.set_len(at);
- other.set_len(other_len);
-
- ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len());
- }
- other
- }
-
- /// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
- ///
- /// If `new_len` is greater than `len`, the `Vec` is extended by the
- /// difference, with each additional slot filled with the result of
- /// calling the closure `f`. The return values from `f` will end up
- /// in the `Vec` in the order they have been generated.
- ///
- /// If `new_len` is less than `len`, the `Vec` is simply truncated.
- ///
- /// This method uses a closure to create new values on every push. If
- /// you'd rather [`Clone`] a given value, use [`Vec::resize`]. If you
- /// want to use the [`Default`] trait to generate values, you can
- /// pass [`Default::default`] as the second argument.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1, 2, 3];
- /// vec.resize_with(5, Default::default);
- /// assert_eq!(vec, [1, 2, 3, 0, 0]);
- ///
- /// let mut vec = vec![];
- /// let mut p = 1;
- /// vec.resize_with(4, || { p *= 2; p });
- /// assert_eq!(vec, [2, 4, 8, 16]);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[stable(feature = "vec_resize_with", since = "1.33.0")]
- pub fn resize_with<F>(&mut self, new_len: usize, f: F)
- where
- F: FnMut() -> T,
- {
- let len = self.len();
- if new_len > len {
- self.extend_trusted(iter::repeat_with(f).take(new_len - len));
- } else {
- self.truncate(new_len);
- }
- }
-
- /// Consumes and leaks the `Vec`, returning a mutable reference to the contents,
- /// `&'a mut [T]`. Note that the type `T` must outlive the chosen lifetime
- /// `'a`. If the type has only static references, or none at all, then this
- /// may be chosen to be `'static`.
- ///
- /// As of Rust 1.57, this method does not reallocate or shrink the `Vec`,
- /// so the leaked allocation may include unused capacity that is not part
- /// of the returned slice.
- ///
- /// This function is mainly useful for data that lives for the remainder of
- /// the program's life. Dropping the returned reference will cause a memory
- /// leak.
- ///
- /// # Examples
- ///
- /// Simple usage:
- ///
- /// ```
- /// let x = vec![1, 2, 3];
- /// let static_ref: &'static mut [usize] = x.leak();
- /// static_ref[0] += 1;
- /// assert_eq!(static_ref, &[2, 2, 3]);
- /// ```
- #[stable(feature = "vec_leak", since = "1.47.0")]
- #[inline]
- pub fn leak<'a>(self) -> &'a mut [T]
- where
- A: 'a,
- {
- let mut me = ManuallyDrop::new(self);
- unsafe { slice::from_raw_parts_mut(me.as_mut_ptr(), me.len) }
- }
-
- /// Returns the remaining spare capacity of the vector as a slice of
- /// `MaybeUninit<T>`.
- ///
- /// The returned slice can be used to fill the vector with data (e.g. by
- /// reading from a file) before marking the data as initialized using the
- /// [`set_len`] method.
- ///
- /// [`set_len`]: Vec::set_len
- ///
- /// # Examples
- ///
- /// ```
- /// // Allocate vector big enough for 10 elements.
- /// let mut v = Vec::with_capacity(10);
- ///
- /// // Fill in the first 3 elements.
- /// let uninit = v.spare_capacity_mut();
- /// uninit[0].write(0);
- /// uninit[1].write(1);
- /// uninit[2].write(2);
- ///
- /// // Mark the first 3 elements of the vector as being initialized.
- /// unsafe {
- /// v.set_len(3);
- /// }
- ///
- /// assert_eq!(&v, &[0, 1, 2]);
- /// ```
- #[stable(feature = "vec_spare_capacity", since = "1.60.0")]
- #[inline]
- pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<T>] {
- // Note:
- // This method is not implemented in terms of `split_at_spare_mut`,
- // to prevent invalidation of pointers to the buffer.
- unsafe {
- slice::from_raw_parts_mut(
- self.as_mut_ptr().add(self.len) as *mut MaybeUninit<T>,
- self.buf.capacity() - self.len,
- )
- }
- }
-
- /// Returns vector content as a slice of `T`, along with the remaining spare
- /// capacity of the vector as a slice of `MaybeUninit<T>`.
- ///
- /// The returned spare capacity slice can be used to fill the vector with data
- /// (e.g. by reading from a file) before marking the data as initialized using
- /// the [`set_len`] method.
- ///
- /// [`set_len`]: Vec::set_len
- ///
- /// Note that this is a low-level API, which should be used with care for
- /// optimization purposes. If you need to append data to a `Vec`
- /// you can use [`push`], [`extend`], [`extend_from_slice`],
- /// [`extend_from_within`], [`insert`], [`append`], [`resize`] or
- /// [`resize_with`], depending on your exact needs.
- ///
- /// [`push`]: Vec::push
- /// [`extend`]: Vec::extend
- /// [`extend_from_slice`]: Vec::extend_from_slice
- /// [`extend_from_within`]: Vec::extend_from_within
- /// [`insert`]: Vec::insert
- /// [`append`]: Vec::append
- /// [`resize`]: Vec::resize
- /// [`resize_with`]: Vec::resize_with
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(vec_split_at_spare)]
- ///
- /// let mut v = vec![1, 1, 2];
- ///
- /// // Reserve additional space big enough for 10 elements.
- /// v.reserve(10);
- ///
- /// let (init, uninit) = v.split_at_spare_mut();
- /// let sum = init.iter().copied().sum::<u32>();
- ///
- /// // Fill in the next 4 elements.
- /// uninit[0].write(sum);
- /// uninit[1].write(sum * 2);
- /// uninit[2].write(sum * 3);
- /// uninit[3].write(sum * 4);
- ///
- /// // Mark the 4 elements of the vector as being initialized.
- /// unsafe {
- /// let len = v.len();
- /// v.set_len(len + 4);
- /// }
- ///
- /// assert_eq!(&v, &[1, 1, 2, 4, 8, 12, 16]);
- /// ```
- #[unstable(feature = "vec_split_at_spare", issue = "81944")]
- #[inline]
- pub fn split_at_spare_mut(&mut self) -> (&mut [T], &mut [MaybeUninit<T>]) {
- // SAFETY:
- // - len is ignored and so never changed
- let (init, spare, _) = unsafe { self.split_at_spare_mut_with_len() };
- (init, spare)
- }
-
- /// Safety: changing returned .2 (&mut usize) is considered the same as calling `.set_len(_)`.
- ///
- /// This method provides unique access to all vec parts at once in `extend_from_within`.
- unsafe fn split_at_spare_mut_with_len(
- &mut self,
- ) -> (&mut [T], &mut [MaybeUninit<T>], &mut usize) {
- let ptr = self.as_mut_ptr();
- // SAFETY:
- // - `ptr` is guaranteed to be valid for `self.len` elements
- // - but the allocation extends out to `self.buf.capacity()` elements, possibly
- // uninitialized
- let spare_ptr = unsafe { ptr.add(self.len) };
- let spare_ptr = spare_ptr.cast::<MaybeUninit<T>>();
- let spare_len = self.buf.capacity() - self.len;
-
- // SAFETY:
- // - `ptr` is guaranteed to be valid for `self.len` elements
- // - `spare_ptr` is pointing one element past the buffer, so it doesn't overlap with `initialized`
- unsafe {
- let initialized = slice::from_raw_parts_mut(ptr, self.len);
- let spare = slice::from_raw_parts_mut(spare_ptr, spare_len);
-
- (initialized, spare, &mut self.len)
- }
- }
-}
-
-impl<T: Clone, A: Allocator> Vec<T, A> {
- /// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
- ///
- /// If `new_len` is greater than `len`, the `Vec` is extended by the
- /// difference, with each additional slot filled with `value`.
- /// If `new_len` is less than `len`, the `Vec` is simply truncated.
- ///
- /// This method requires `T` to implement [`Clone`],
- /// in order to be able to clone the passed value.
- /// If you need more flexibility (or want to rely on [`Default`] instead of
- /// [`Clone`]), use [`Vec::resize_with`].
- /// If you only need to resize to a smaller size, use [`Vec::truncate`].
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec!["hello"];
- /// vec.resize(3, "world");
- /// assert_eq!(vec, ["hello", "world", "world"]);
- ///
- /// let mut vec = vec![1, 2, 3, 4];
- /// vec.resize(2, 0);
- /// assert_eq!(vec, [1, 2]);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[stable(feature = "vec_resize", since = "1.5.0")]
- pub fn resize(&mut self, new_len: usize, value: T) {
- let len = self.len();
-
- if new_len > len {
- self.extend_with(new_len - len, value)
- } else {
- self.truncate(new_len);
- }
- }
-
- /// Tries to resize the `Vec` in-place so that `len` is equal to `new_len`.
- ///
- /// If `new_len` is greater than `len`, the `Vec` is extended by the
- /// difference, with each additional slot filled with `value`.
- /// If `new_len` is less than `len`, the `Vec` is simply truncated.
- ///
- /// This method requires `T` to implement [`Clone`],
- /// in order to be able to clone the passed value.
- /// If you need more flexibility (or want to rely on [`Default`] instead of
- /// [`Clone`]), use [`Vec::resize_with`].
- /// If you only need to resize to a smaller size, use [`Vec::truncate`].
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec!["hello"];
- /// vec.try_resize(3, "world").unwrap();
- /// assert_eq!(vec, ["hello", "world", "world"]);
- ///
- /// let mut vec = vec![1, 2, 3, 4];
- /// vec.try_resize(2, 0).unwrap();
- /// assert_eq!(vec, [1, 2]);
- ///
- /// let mut vec = vec![42];
- /// let result = vec.try_resize(usize::MAX, 0);
- /// assert!(result.is_err());
- /// ```
- #[stable(feature = "kernel", since = "1.0.0")]
- pub fn try_resize(&mut self, new_len: usize, value: T) -> Result<(), TryReserveError> {
- let len = self.len();
-
- if new_len > len {
- self.try_extend_with(new_len - len, value)
- } else {
- self.truncate(new_len);
- Ok(())
- }
- }
-
- /// Clones and appends all elements in a slice to the `Vec`.
- ///
- /// Iterates over the slice `other`, clones each element, and then appends
- /// it to this `Vec`. The `other` slice is traversed in-order.
- ///
- /// Note that this function is same as [`extend`] except that it is
- /// specialized to work with slices instead. If and when Rust gets
- /// specialization this function will likely be deprecated (but still
- /// available).
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1];
- /// vec.extend_from_slice(&[2, 3, 4]);
- /// assert_eq!(vec, [1, 2, 3, 4]);
- /// ```
- ///
- /// [`extend`]: Vec::extend
- #[cfg(not(no_global_oom_handling))]
- #[stable(feature = "vec_extend_from_slice", since = "1.6.0")]
- pub fn extend_from_slice(&mut self, other: &[T]) {
- self.spec_extend(other.iter())
- }
-
- /// Tries to clone and append all elements in a slice to the `Vec`.
- ///
- /// Iterates over the slice `other`, clones each element, and then appends
- /// it to this `Vec`. The `other` slice is traversed in-order.
- ///
- /// Note that this function is same as [`extend`] except that it is
- /// specialized to work with slices instead. If and when Rust gets
- /// specialization this function will likely be deprecated (but still
- /// available).
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1];
- /// vec.try_extend_from_slice(&[2, 3, 4]).unwrap();
- /// assert_eq!(vec, [1, 2, 3, 4]);
- /// ```
- ///
- /// [`extend`]: Vec::extend
- #[stable(feature = "kernel", since = "1.0.0")]
- pub fn try_extend_from_slice(&mut self, other: &[T]) -> Result<(), TryReserveError> {
- self.try_spec_extend(other.iter())
- }
-
- /// Copies elements from `src` range to the end of the vector.
- ///
- /// # Panics
- ///
- /// Panics if the starting point is greater than the end point or if
- /// the end point is greater than the length of the vector.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![0, 1, 2, 3, 4];
- ///
- /// vec.extend_from_within(2..);
- /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4]);
- ///
- /// vec.extend_from_within(..2);
- /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4, 0, 1]);
- ///
- /// vec.extend_from_within(4..8);
- /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4, 0, 1, 4, 2, 3, 4]);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[stable(feature = "vec_extend_from_within", since = "1.53.0")]
- pub fn extend_from_within<R>(&mut self, src: R)
- where
- R: RangeBounds<usize>,
- {
- let range = slice::range(src, ..self.len());
- self.reserve(range.len());
-
- // SAFETY:
- // - `slice::range` guarantees that the given range is valid for indexing self
- unsafe {
- self.spec_extend_from_within(range);
- }
- }
-}
-
-impl<T, A: Allocator, const N: usize> Vec<[T; N], A> {
- /// Takes a `Vec<[T; N]>` and flattens it into a `Vec<T>`.
- ///
- /// # Panics
- ///
- /// Panics if the length of the resulting vector would overflow a `usize`.
- ///
- /// This is only possible when flattening a vector of arrays of zero-sized
- /// types, and thus tends to be irrelevant in practice. If
- /// `size_of::<T>() > 0`, this will never panic.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(slice_flatten)]
- ///
- /// let mut vec = vec![[1, 2, 3], [4, 5, 6], [7, 8, 9]];
- /// assert_eq!(vec.pop(), Some([7, 8, 9]));
- ///
- /// let mut flattened = vec.into_flattened();
- /// assert_eq!(flattened.pop(), Some(6));
- /// ```
- #[unstable(feature = "slice_flatten", issue = "95629")]
- pub fn into_flattened(self) -> Vec<T, A> {
- let (ptr, len, cap, alloc) = self.into_raw_parts_with_alloc();
- let (new_len, new_cap) = if T::IS_ZST {
- (len.checked_mul(N).expect("vec len overflow"), usize::MAX)
- } else {
- // SAFETY:
- // - `cap * N` cannot overflow because the allocation is already in
- // the address space.
- // - Each `[T; N]` has `N` valid elements, so there are `len * N`
- // valid elements in the allocation.
- unsafe { (len.unchecked_mul(N), cap.unchecked_mul(N)) }
- };
- // SAFETY:
- // - `ptr` was allocated by `self`
- // - `ptr` is well-aligned because `[T; N]` has the same alignment as `T`.
- // - `new_cap` refers to the same sized allocation as `cap` because
- // `new_cap * size_of::<T>()` == `cap * size_of::<[T; N]>()`
- // - `len` <= `cap`, so `len * N` <= `cap * N`.
- unsafe { Vec::<T, A>::from_raw_parts_in(ptr.cast(), new_len, new_cap, alloc) }
- }
-}
-
-impl<T: Clone, A: Allocator> Vec<T, A> {
- #[cfg(not(no_global_oom_handling))]
- /// Extend the vector by `n` clones of value.
- fn extend_with(&mut self, n: usize, value: T) {
- self.reserve(n);
-
- unsafe {
- let mut ptr = self.as_mut_ptr().add(self.len());
- // Use SetLenOnDrop to work around bug where compiler
- // might not realize the store through `ptr` through self.set_len()
- // don't alias.
- let mut local_len = SetLenOnDrop::new(&mut self.len);
-
- // Write all elements except the last one
- for _ in 1..n {
- ptr::write(ptr, value.clone());
- ptr = ptr.add(1);
- // Increment the length in every step in case clone() panics
- local_len.increment_len(1);
- }
-
- if n > 0 {
- // We can write the last element directly without cloning needlessly
- ptr::write(ptr, value);
- local_len.increment_len(1);
- }
-
- // len set by scope guard
- }
- }
-
- /// Try to extend the vector by `n` clones of value.
- fn try_extend_with(&mut self, n: usize, value: T) -> Result<(), TryReserveError> {
- self.try_reserve(n)?;
-
- unsafe {
- let mut ptr = self.as_mut_ptr().add(self.len());
- // Use SetLenOnDrop to work around bug where compiler
- // might not realize the store through `ptr` through self.set_len()
- // don't alias.
- let mut local_len = SetLenOnDrop::new(&mut self.len);
-
- // Write all elements except the last one
- for _ in 1..n {
- ptr::write(ptr, value.clone());
- ptr = ptr.add(1);
- // Increment the length in every step in case clone() panics
- local_len.increment_len(1);
- }
-
- if n > 0 {
- // We can write the last element directly without cloning needlessly
- ptr::write(ptr, value);
- local_len.increment_len(1);
- }
-
- // len set by scope guard
- Ok(())
- }
- }
-}
-
-impl<T: PartialEq, A: Allocator> Vec<T, A> {
- /// Removes consecutive repeated elements in the vector according to the
- /// [`PartialEq`] trait implementation.
- ///
- /// If the vector is sorted, this removes all duplicates.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut vec = vec![1, 2, 2, 3, 2];
- ///
- /// vec.dedup();
- ///
- /// assert_eq!(vec, [1, 2, 3, 2]);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- #[inline]
- pub fn dedup(&mut self) {
- self.dedup_by(|a, b| a == b)
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Internal methods and functions
-////////////////////////////////////////////////////////////////////////////////
-
-#[doc(hidden)]
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub fn from_elem<T: Clone>(elem: T, n: usize) -> Vec<T> {
- <T as SpecFromElem>::from_elem(elem, n, Global)
-}
-
-#[doc(hidden)]
-#[cfg(not(no_global_oom_handling))]
-#[unstable(feature = "allocator_api", issue = "32838")]
-pub fn from_elem_in<T: Clone, A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<T, A> {
- <T as SpecFromElem>::from_elem(elem, n, alloc)
-}
-
-#[cfg(not(no_global_oom_handling))]
-trait ExtendFromWithinSpec {
- /// # Safety
- ///
- /// - `src` needs to be valid index
- /// - `self.capacity() - self.len()` must be `>= src.len()`
- unsafe fn spec_extend_from_within(&mut self, src: Range<usize>);
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
- default unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
- // SAFETY:
- // - len is increased only after initializing elements
- let (this, spare, len) = unsafe { self.split_at_spare_mut_with_len() };
-
- // SAFETY:
- // - caller guarantees that src is a valid index
- let to_clone = unsafe { this.get_unchecked(src) };
-
- iter::zip(to_clone, spare)
- .map(|(src, dst)| dst.write(src.clone()))
- // Note:
- // - Element was just initialized with `MaybeUninit::write`, so it's ok to increase len
- // - len is increased after each element to prevent leaks (see issue #82533)
- .for_each(|_| *len += 1);
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
- unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
- let count = src.len();
- {
- let (init, spare) = self.split_at_spare_mut();
-
- // SAFETY:
- // - caller guarantees that `src` is a valid index
- let source = unsafe { init.get_unchecked(src) };
-
- // SAFETY:
- // - Both pointers are created from unique slice references (`&mut [_]`)
- // so they are valid and do not overlap.
- // - Elements are :Copy so it's OK to copy them, without doing
- // anything with the original values
- // - `count` is equal to the len of `source`, so source is valid for
- // `count` reads
- // - `.reserve(count)` guarantees that `spare.len() >= count` so spare
- // is valid for `count` writes
- unsafe { ptr::copy_nonoverlapping(source.as_ptr(), spare.as_mut_ptr() as _, count) };
- }
-
- // SAFETY:
- // - The elements were just initialized by `copy_nonoverlapping`
- self.len += count;
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Common trait implementations for Vec
-////////////////////////////////////////////////////////////////////////////////
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T, A: Allocator> ops::Deref for Vec<T, A> {
- type Target = [T];
-
- #[inline]
- fn deref(&self) -> &[T] {
- unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T, A: Allocator> ops::DerefMut for Vec<T, A> {
- #[inline]
- fn deref_mut(&mut self) -> &mut [T] {
- unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
- #[cfg(not(test))]
- fn clone(&self) -> Self {
- let alloc = self.allocator().clone();
- <[T]>::to_vec_in(&**self, alloc)
- }
-
- // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
- // required for this method definition, is not available. Instead use the
- // `slice::to_vec` function which is only available with cfg(test)
- // NB see the slice::hack module in slice.rs for more information
- #[cfg(test)]
- fn clone(&self) -> Self {
- let alloc = self.allocator().clone();
- crate::slice::to_vec(&**self, alloc)
- }
-
- fn clone_from(&mut self, other: &Self) {
- crate::slice::SpecCloneIntoVec::clone_into(other.as_slice(), self);
- }
-}
-
-/// The hash of a vector is the same as that of the corresponding slice,
-/// as required by the `core::borrow::Borrow` implementation.
-///
-/// ```
-/// use std::hash::BuildHasher;
-///
-/// let b = std::hash::RandomState::new();
-/// let v: Vec<u8> = vec![0xa8, 0x3c, 0x09];
-/// let s: &[u8] = &[0xa8, 0x3c, 0x09];
-/// assert_eq!(b.hash_one(v), b.hash_one(s));
-/// ```
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Hash, A: Allocator> Hash for Vec<T, A> {
- #[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
- Hash::hash(&**self, state)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_on_unimplemented(
- message = "vector indices are of type `usize` or ranges of `usize`",
- label = "vector indices are of type `usize` or ranges of `usize`"
-)]
-impl<T, I: SliceIndex<[T]>, A: Allocator> Index<I> for Vec<T, A> {
- type Output = I::Output;
-
- #[inline]
- fn index(&self, index: I) -> &Self::Output {
- Index::index(&**self, index)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_on_unimplemented(
- message = "vector indices are of type `usize` or ranges of `usize`",
- label = "vector indices are of type `usize` or ranges of `usize`"
-)]
-impl<T, I: SliceIndex<[T]>, A: Allocator> IndexMut<I> for Vec<T, A> {
- #[inline]
- fn index_mut(&mut self, index: I) -> &mut Self::Output {
- IndexMut::index_mut(&mut **self, index)
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> FromIterator<T> for Vec<T> {
- #[inline]
- fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Vec<T> {
- <Self as SpecFromIter<T, I::IntoIter>>::from_iter(iter.into_iter())
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T, A: Allocator> IntoIterator for Vec<T, A> {
- type Item = T;
- type IntoIter = IntoIter<T, A>;
-
- /// Creates a consuming iterator, that is, one that moves each value out of
- /// the vector (from start to end). The vector cannot be used after calling
- /// this.
- ///
- /// # Examples
- ///
- /// ```
- /// let v = vec!["a".to_string(), "b".to_string()];
- /// let mut v_iter = v.into_iter();
- ///
- /// let first_element: Option<String> = v_iter.next();
- ///
- /// assert_eq!(first_element, Some("a".to_string()));
- /// assert_eq!(v_iter.next(), Some("b".to_string()));
- /// assert_eq!(v_iter.next(), None);
- /// ```
- #[inline]
- fn into_iter(self) -> Self::IntoIter {
- unsafe {
- let mut me = ManuallyDrop::new(self);
- let alloc = ManuallyDrop::new(ptr::read(me.allocator()));
- let begin = me.as_mut_ptr();
- let end = if T::IS_ZST {
- begin.wrapping_byte_add(me.len())
- } else {
- begin.add(me.len()) as *const T
- };
- let cap = me.buf.capacity();
- IntoIter {
- buf: NonNull::new_unchecked(begin),
- phantom: PhantomData,
- cap,
- alloc,
- ptr: begin,
- end,
- }
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T, A: Allocator> IntoIterator for &'a Vec<T, A> {
- type Item = &'a T;
- type IntoIter = slice::Iter<'a, T>;
-
- fn into_iter(self) -> Self::IntoIter {
- self.iter()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A> {
- type Item = &'a mut T;
- type IntoIter = slice::IterMut<'a, T>;
-
- fn into_iter(self) -> Self::IntoIter {
- self.iter_mut()
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T, A: Allocator> Extend<T> for Vec<T, A> {
- #[inline]
- fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
- <Self as SpecExtend<T, I::IntoIter>>::spec_extend(self, iter.into_iter())
- }
-
- #[inline]
- fn extend_one(&mut self, item: T) {
- self.push(item);
- }
-
- #[inline]
- fn extend_reserve(&mut self, additional: usize) {
- self.reserve(additional);
- }
-}
-
-impl<T, A: Allocator> Vec<T, A> {
- // leaf method to which various SpecFrom/SpecExtend implementations delegate when
- // they have no further optimizations to apply
- #[cfg(not(no_global_oom_handling))]
- fn extend_desugared<I: Iterator<Item = T>>(&mut self, mut iterator: I) {
- // This is the case for a general iterator.
- //
- // This function should be the moral equivalent of:
- //
- // for item in iterator {
- // self.push(item);
- // }
- while let Some(element) = iterator.next() {
- let len = self.len();
- if len == self.capacity() {
- let (lower, _) = iterator.size_hint();
- self.reserve(lower.saturating_add(1));
- }
- unsafe {
- ptr::write(self.as_mut_ptr().add(len), element);
- // Since next() executes user code which can panic we have to bump the length
- // after each step.
- // NB can't overflow since we would have had to alloc the address space
- self.set_len(len + 1);
- }
- }
- }
-
- // leaf method to which various SpecFrom/SpecExtend implementations delegate when
- // they have no further optimizations to apply
- fn try_extend_desugared<I: Iterator<Item = T>>(&mut self, mut iterator: I) -> Result<(), TryReserveError> {
- // This is the case for a general iterator.
- //
- // This function should be the moral equivalent of:
- //
- // for item in iterator {
- // self.push(item);
- // }
- while let Some(element) = iterator.next() {
- let len = self.len();
- if len == self.capacity() {
- let (lower, _) = iterator.size_hint();
- self.try_reserve(lower.saturating_add(1))?;
- }
- unsafe {
- ptr::write(self.as_mut_ptr().add(len), element);
- // Since next() executes user code which can panic we have to bump the length
- // after each step.
- // NB can't overflow since we would have had to alloc the address space
- self.set_len(len + 1);
- }
- }
-
- Ok(())
- }
-
- // specific extend for `TrustedLen` iterators, called both by the specializations
- // and internal places where resolving specialization makes compilation slower
- #[cfg(not(no_global_oom_handling))]
- fn extend_trusted(&mut self, iterator: impl iter::TrustedLen<Item = T>) {
- let (low, high) = iterator.size_hint();
- if let Some(additional) = high {
- debug_assert_eq!(
- low,
- additional,
- "TrustedLen iterator's size hint is not exact: {:?}",
- (low, high)
- );
- self.reserve(additional);
- unsafe {
- let ptr = self.as_mut_ptr();
- let mut local_len = SetLenOnDrop::new(&mut self.len);
- iterator.for_each(move |element| {
- ptr::write(ptr.add(local_len.current_len()), element);
- // Since the loop executes user code which can panic we have to update
- // the length every step to correctly drop what we've written.
- // NB can't overflow since we would have had to alloc the address space
- local_len.increment_len(1);
- });
- }
- } else {
- // Per TrustedLen contract a `None` upper bound means that the iterator length
- // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
- // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
- // This avoids additional codegen for a fallback code path which would eventually
- // panic anyway.
- panic!("capacity overflow");
- }
- }
-
- // specific extend for `TrustedLen` iterators, called both by the specializations
- // and internal places where resolving specialization makes compilation slower
- fn try_extend_trusted(&mut self, iterator: impl iter::TrustedLen<Item = T>) -> Result<(), TryReserveError> {
- let (low, high) = iterator.size_hint();
- if let Some(additional) = high {
- debug_assert_eq!(
- low,
- additional,
- "TrustedLen iterator's size hint is not exact: {:?}",
- (low, high)
- );
- self.try_reserve(additional)?;
- unsafe {
- let ptr = self.as_mut_ptr();
- let mut local_len = SetLenOnDrop::new(&mut self.len);
- iterator.for_each(move |element| {
- ptr::write(ptr.add(local_len.current_len()), element);
- // Since the loop executes user code which can panic we have to update
- // the length every step to correctly drop what we've written.
- // NB can't overflow since we would have had to alloc the address space
- local_len.increment_len(1);
- });
- }
- Ok(())
- } else {
- Err(TryReserveErrorKind::CapacityOverflow.into())
- }
- }
-
- /// Creates a splicing iterator that replaces the specified range in the vector
- /// with the given `replace_with` iterator and yields the removed items.
- /// `replace_with` does not need to be the same length as `range`.
- ///
- /// `range` is removed even if the iterator is not consumed until the end.
- ///
- /// It is unspecified how many elements are removed from the vector
- /// if the `Splice` value is leaked.
- ///
- /// The input iterator `replace_with` is only consumed when the `Splice` value is dropped.
- ///
- /// This is optimal if:
- ///
- /// * The tail (elements in the vector after `range`) is empty,
- /// * or `replace_with` yields fewer or equal elements than `range`’s length
- /// * or the lower bound of its `size_hint()` is exact.
- ///
- /// Otherwise, a temporary vector is allocated and the tail is moved twice.
- ///
- /// # Panics
- ///
- /// Panics if the starting point is greater than the end point or if
- /// the end point is greater than the length of the vector.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut v = vec![1, 2, 3, 4];
- /// let new = [7, 8, 9];
- /// let u: Vec<_> = v.splice(1..3, new).collect();
- /// assert_eq!(v, &[1, 7, 8, 9, 4]);
- /// assert_eq!(u, &[2, 3]);
- /// ```
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- #[stable(feature = "vec_splice", since = "1.21.0")]
- pub fn splice<R, I>(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter, A>
- where
- R: RangeBounds<usize>,
- I: IntoIterator<Item = T>,
- {
- Splice { drain: self.drain(range), replace_with: replace_with.into_iter() }
- }
-
- /// Creates an iterator which uses a closure to determine if an element should be removed.
- ///
- /// If the closure returns true, then the element is removed and yielded.
- /// If the closure returns false, the element will remain in the vector and will not be yielded
- /// by the iterator.
- ///
- /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating
- /// or the iteration short-circuits, then the remaining elements will be retained.
- /// Use [`retain`] with a negated predicate if you do not need the returned iterator.
- ///
- /// [`retain`]: Vec::retain
- ///
- /// Using this method is equivalent to the following code:
- ///
- /// ```
- /// # let some_predicate = |x: &mut i32| { *x == 2 || *x == 3 || *x == 6 };
- /// # let mut vec = vec![1, 2, 3, 4, 5, 6];
- /// let mut i = 0;
- /// while i < vec.len() {
- /// if some_predicate(&mut vec[i]) {
- /// let val = vec.remove(i);
- /// // your code here
- /// } else {
- /// i += 1;
- /// }
- /// }
- ///
- /// # assert_eq!(vec, vec![1, 4, 5]);
- /// ```
- ///
- /// But `extract_if` is easier to use. `extract_if` is also more efficient,
- /// because it can backshift the elements of the array in bulk.
- ///
- /// Note that `extract_if` also lets you mutate every element in the filter closure,
- /// regardless of whether you choose to keep or remove it.
- ///
- /// # Examples
- ///
- /// Splitting an array into evens and odds, reusing the original allocation:
- ///
- /// ```
- /// #![feature(extract_if)]
- /// let mut numbers = vec![1, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15];
- ///
- /// let evens = numbers.extract_if(|x| *x % 2 == 0).collect::<Vec<_>>();
- /// let odds = numbers;
- ///
- /// assert_eq!(evens, vec![2, 4, 6, 8, 14]);
- /// assert_eq!(odds, vec![1, 3, 5, 9, 11, 13, 15]);
- /// ```
- #[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
- pub fn extract_if<F>(&mut self, filter: F) -> ExtractIf<'_, T, F, A>
- where
- F: FnMut(&mut T) -> bool,
- {
- let old_len = self.len();
-
- // Guard against us getting leaked (leak amplification)
- unsafe {
- self.set_len(0);
- }
-
- ExtractIf { vec: self, idx: 0, del: 0, old_len, pred: filter }
- }
-}
-
-/// Extend implementation that copies elements out of references before pushing them onto the Vec.
-///
-/// This implementation is specialized for slice iterators, where it uses [`copy_from_slice`] to
-/// append the entire slice at once.
-///
-/// [`copy_from_slice`]: slice::copy_from_slice
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "extend_ref", since = "1.2.0")]
-impl<'a, T: Copy + 'a, A: Allocator> Extend<&'a T> for Vec<T, A> {
- fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
- self.spec_extend(iter.into_iter())
- }
-
- #[inline]
- fn extend_one(&mut self, &item: &'a T) {
- self.push(item);
- }
-
- #[inline]
- fn extend_reserve(&mut self, additional: usize) {
- self.reserve(additional);
- }
-}
-
-/// Implements comparison of vectors, [lexicographically](Ord#lexicographical-comparison).
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T, A1, A2> PartialOrd<Vec<T, A2>> for Vec<T, A1>
-where
- T: PartialOrd,
- A1: Allocator,
- A2: Allocator,
-{
- #[inline]
- fn partial_cmp(&self, other: &Vec<T, A2>) -> Option<Ordering> {
- PartialOrd::partial_cmp(&**self, &**other)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Eq, A: Allocator> Eq for Vec<T, A> {}
-
-/// Implements ordering of vectors, [lexicographically](Ord#lexicographical-comparison).
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Ord, A: Allocator> Ord for Vec<T, A> {
- #[inline]
- fn cmp(&self, other: &Self) -> Ordering {
- Ord::cmp(&**self, &**other)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec<T, A> {
- fn drop(&mut self) {
- unsafe {
- // use drop for [T]
- // use a raw slice to refer to the elements of the vector as weakest necessary type;
- // could avoid questions of validity in certain cases
- ptr::drop_in_place(ptr::slice_from_raw_parts_mut(self.as_mut_ptr(), self.len))
- }
- // RawVec handles deallocation
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Default for Vec<T> {
- /// Creates an empty `Vec<T>`.
- ///
- /// The vector will not allocate until elements are pushed onto it.
- fn default() -> Vec<T> {
- Vec::new()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: fmt::Debug, A: Allocator> fmt::Debug for Vec<T, A> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Debug::fmt(&**self, f)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T, A: Allocator> AsRef<Vec<T, A>> for Vec<T, A> {
- fn as_ref(&self) -> &Vec<T, A> {
- self
- }
-}
-
-#[stable(feature = "vec_as_mut", since = "1.5.0")]
-impl<T, A: Allocator> AsMut<Vec<T, A>> for Vec<T, A> {
- fn as_mut(&mut self) -> &mut Vec<T, A> {
- self
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T, A: Allocator> AsRef<[T]> for Vec<T, A> {
- fn as_ref(&self) -> &[T] {
- self
- }
-}
-
-#[stable(feature = "vec_as_mut", since = "1.5.0")]
-impl<T, A: Allocator> AsMut<[T]> for Vec<T, A> {
- fn as_mut(&mut self) -> &mut [T] {
- self
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Clone> From<&[T]> for Vec<T> {
- /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
- ///
- /// # Examples
- ///
- /// ```
- /// assert_eq!(Vec::from(&[1, 2, 3][..]), vec![1, 2, 3]);
- /// ```
- #[cfg(not(test))]
- fn from(s: &[T]) -> Vec<T> {
- s.to_vec()
- }
- #[cfg(test)]
- fn from(s: &[T]) -> Vec<T> {
- crate::slice::to_vec(s, Global)
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "vec_from_mut", since = "1.19.0")]
-impl<T: Clone> From<&mut [T]> for Vec<T> {
- /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
- ///
- /// # Examples
- ///
- /// ```
- /// assert_eq!(Vec::from(&mut [1, 2, 3][..]), vec![1, 2, 3]);
- /// ```
- #[cfg(not(test))]
- fn from(s: &mut [T]) -> Vec<T> {
- s.to_vec()
- }
- #[cfg(test)]
- fn from(s: &mut [T]) -> Vec<T> {
- crate::slice::to_vec(s, Global)
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "vec_from_array_ref", since = "1.74.0")]
-impl<T: Clone, const N: usize> From<&[T; N]> for Vec<T> {
- /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
- ///
- /// # Examples
- ///
- /// ```
- /// assert_eq!(Vec::from(&[1, 2, 3]), vec![1, 2, 3]);
- /// ```
- fn from(s: &[T; N]) -> Vec<T> {
- Self::from(s.as_slice())
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "vec_from_array_ref", since = "1.74.0")]
-impl<T: Clone, const N: usize> From<&mut [T; N]> for Vec<T> {
- /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
- ///
- /// # Examples
- ///
- /// ```
- /// assert_eq!(Vec::from(&mut [1, 2, 3]), vec![1, 2, 3]);
- /// ```
- fn from(s: &mut [T; N]) -> Vec<T> {
- Self::from(s.as_mut_slice())
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "vec_from_array", since = "1.44.0")]
-impl<T, const N: usize> From<[T; N]> for Vec<T> {
- /// Allocate a `Vec<T>` and move `s`'s items into it.
- ///
- /// # Examples
- ///
- /// ```
- /// assert_eq!(Vec::from([1, 2, 3]), vec![1, 2, 3]);
- /// ```
- #[cfg(not(test))]
- fn from(s: [T; N]) -> Vec<T> {
- <[T]>::into_vec(Box::new(s))
- }
-
- #[cfg(test)]
- fn from(s: [T; N]) -> Vec<T> {
- crate::slice::into_vec(Box::new(s))
- }
-}
-
-#[cfg(not(no_borrow))]
-#[stable(feature = "vec_from_cow_slice", since = "1.14.0")]
-impl<'a, T> From<Cow<'a, [T]>> for Vec<T>
-where
- [T]: ToOwned<Owned = Vec<T>>,
-{
- /// Convert a clone-on-write slice into a vector.
- ///
- /// If `s` already owns a `Vec<T>`, it will be returned directly.
- /// If `s` is borrowing a slice, a new `Vec<T>` will be allocated and
- /// filled by cloning `s`'s items into it.
- ///
- /// # Examples
- ///
- /// ```
- /// # use std::borrow::Cow;
- /// let o: Cow<'_, [i32]> = Cow::Owned(vec![1, 2, 3]);
- /// let b: Cow<'_, [i32]> = Cow::Borrowed(&[1, 2, 3]);
- /// assert_eq!(Vec::from(o), Vec::from(b));
- /// ```
- fn from(s: Cow<'a, [T]>) -> Vec<T> {
- s.into_owned()
- }
-}
-
-// note: test pulls in std, which causes errors here
-#[cfg(not(test))]
-#[stable(feature = "vec_from_box", since = "1.18.0")]
-impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
- /// Convert a boxed slice into a vector by transferring ownership of
- /// the existing heap allocation.
- ///
- /// # Examples
- ///
- /// ```
- /// let b: Box<[i32]> = vec![1, 2, 3].into_boxed_slice();
- /// assert_eq!(Vec::from(b), vec![1, 2, 3]);
- /// ```
- fn from(s: Box<[T], A>) -> Self {
- s.into_vec()
- }
-}
-
-// note: test pulls in std, which causes errors here
-#[cfg(not(no_global_oom_handling))]
-#[cfg(not(test))]
-#[stable(feature = "box_from_vec", since = "1.20.0")]
-impl<T, A: Allocator> From<Vec<T, A>> for Box<[T], A> {
- /// Convert a vector into a boxed slice.
- ///
- /// If `v` has excess capacity, its items will be moved into a
- /// newly-allocated buffer with exactly the right capacity.
- ///
- /// # Examples
- ///
- /// ```
- /// assert_eq!(Box::from(vec![1, 2, 3]), vec![1, 2, 3].into_boxed_slice());
- /// ```
- ///
- /// Any excess capacity is removed:
- /// ```
- /// let mut vec = Vec::with_capacity(10);
- /// vec.extend([1, 2, 3]);
- ///
- /// assert_eq!(Box::from(vec), vec![1, 2, 3].into_boxed_slice());
- /// ```
- fn from(v: Vec<T, A>) -> Self {
- v.into_boxed_slice()
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl From<&str> for Vec<u8> {
- /// Allocate a `Vec<u8>` and fill it with a UTF-8 string.
- ///
- /// # Examples
- ///
- /// ```
- /// assert_eq!(Vec::from("123"), vec![b'1', b'2', b'3']);
- /// ```
- fn from(s: &str) -> Vec<u8> {
- From::from(s.as_bytes())
- }
-}
-
-#[stable(feature = "array_try_from_vec", since = "1.48.0")]
-impl<T, A: Allocator, const N: usize> TryFrom<Vec<T, A>> for [T; N] {
- type Error = Vec<T, A>;
-
- /// Gets the entire contents of the `Vec<T>` as an array,
- /// if its size exactly matches that of the requested array.
- ///
- /// # Examples
- ///
- /// ```
- /// assert_eq!(vec![1, 2, 3].try_into(), Ok([1, 2, 3]));
- /// assert_eq!(<Vec<i32>>::new().try_into(), Ok([]));
- /// ```
- ///
- /// If the length doesn't match, the input comes back in `Err`:
- /// ```
- /// let r: Result<[i32; 4], _> = (0..10).collect::<Vec<_>>().try_into();
- /// assert_eq!(r, Err(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]));
- /// ```
- ///
- /// If you're fine with just getting a prefix of the `Vec<T>`,
- /// you can call [`.truncate(N)`](Vec::truncate) first.
- /// ```
- /// let mut v = String::from("hello world").into_bytes();
- /// v.sort();
- /// v.truncate(2);
- /// let [a, b]: [_; 2] = v.try_into().unwrap();
- /// assert_eq!(a, b' ');
- /// assert_eq!(b, b'd');
- /// ```
- fn try_from(mut vec: Vec<T, A>) -> Result<[T; N], Vec<T, A>> {
- if vec.len() != N {
- return Err(vec);
- }
-
- // SAFETY: `.set_len(0)` is always sound.
- unsafe { vec.set_len(0) };
-
- // SAFETY: A `Vec`'s pointer is always aligned properly, and
- // the alignment the array needs is the same as the items.
- // We checked earlier that we have sufficient items.
- // The items will not double-drop as the `set_len`
- // tells the `Vec` not to also drop them.
- let array = unsafe { ptr::read(vec.as_ptr() as *const [T; N]) };
- Ok(array)
- }
-}
diff --git a/rust/alloc/vec/partial_eq.rs b/rust/alloc/vec/partial_eq.rs
deleted file mode 100644
index 10ad4e492287..000000000000
--- a/rust/alloc/vec/partial_eq.rs
+++ /dev/null
@@ -1,49 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-use crate::alloc::Allocator;
-#[cfg(not(no_global_oom_handling))]
-use crate::borrow::Cow;
-
-use super::Vec;
-
-macro_rules! __impl_slice_eq1 {
- ([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?, #[$stability:meta]) => {
- #[$stability]
- impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs
- where
- T: PartialEq<U>,
- $($ty: $bound)?
- {
- #[inline]
- fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
- #[inline]
- fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] }
- }
- }
-}
-
-__impl_slice_eq1! { [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2>, #[stable(feature = "rust1", since = "1.0.0")] }
-__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U], #[stable(feature = "rust1", since = "1.0.0")] }
-__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U], #[stable(feature = "rust1", since = "1.0.0")] }
-__impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
-__impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
-__impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U], #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] }
-__impl_slice_eq1! { [A: Allocator] [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] }
-#[cfg(not(no_global_oom_handling))]
-__impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
-#[cfg(not(no_global_oom_handling))]
-__impl_slice_eq1! { [] Cow<'_, [T]>, &[U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
-#[cfg(not(no_global_oom_handling))]
-__impl_slice_eq1! { [] Cow<'_, [T]>, &mut [U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
-__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N], #[stable(feature = "rust1", since = "1.0.0")] }
-__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, &[U; N], #[stable(feature = "rust1", since = "1.0.0")] }
-
-// NOTE: some less important impls are omitted to reduce code bloat
-// FIXME(Centril): Reconsider this?
-//__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], }
-//__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, }
-//__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, }
-//__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, }
-//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], }
-//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], }
-//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], }
diff --git a/rust/alloc/vec/set_len_on_drop.rs b/rust/alloc/vec/set_len_on_drop.rs
deleted file mode 100644
index d3c7297b80ec..000000000000
--- a/rust/alloc/vec/set_len_on_drop.rs
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
-//
-// The idea is: The length field in SetLenOnDrop is a local variable
-// that the optimizer will see does not alias with any stores through the Vec's data
-// pointer. This is a workaround for alias analysis issue #32155
-pub(super) struct SetLenOnDrop<'a> {
- len: &'a mut usize,
- local_len: usize,
-}
-
-impl<'a> SetLenOnDrop<'a> {
- #[inline]
- pub(super) fn new(len: &'a mut usize) -> Self {
- SetLenOnDrop { local_len: *len, len }
- }
-
- #[inline]
- pub(super) fn increment_len(&mut self, increment: usize) {
- self.local_len += increment;
- }
-
- #[inline]
- pub(super) fn current_len(&self) -> usize {
- self.local_len
- }
-}
-
-impl Drop for SetLenOnDrop<'_> {
- #[inline]
- fn drop(&mut self) {
- *self.len = self.local_len;
- }
-}
diff --git a/rust/alloc/vec/spec_extend.rs b/rust/alloc/vec/spec_extend.rs
deleted file mode 100644
index ada919537446..000000000000
--- a/rust/alloc/vec/spec_extend.rs
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-use crate::alloc::Allocator;
-use crate::collections::TryReserveError;
-use core::iter::TrustedLen;
-use core::slice::{self};
-
-use super::{IntoIter, Vec};
-
-// Specialization trait used for Vec::extend
-#[cfg(not(no_global_oom_handling))]
-pub(super) trait SpecExtend<T, I> {
- fn spec_extend(&mut self, iter: I);
-}
-
-// Specialization trait used for Vec::try_extend
-pub(super) trait TrySpecExtend<T, I> {
- fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError>;
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
-where
- I: Iterator<Item = T>,
-{
- default fn spec_extend(&mut self, iter: I) {
- self.extend_desugared(iter)
- }
-}
-
-impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
-where
- I: Iterator<Item = T>,
-{
- default fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError> {
- self.try_extend_desugared(iter)
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
-where
- I: TrustedLen<Item = T>,
-{
- default fn spec_extend(&mut self, iterator: I) {
- self.extend_trusted(iterator)
- }
-}
-
-impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
-where
- I: TrustedLen<Item = T>,
-{
- default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
- self.try_extend_trusted(iterator)
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> {
- fn spec_extend(&mut self, mut iterator: IntoIter<T>) {
- unsafe {
- self.append_elements(iterator.as_slice() as _);
- }
- iterator.forget_remaining_elements();
- }
-}
-
-impl<T, A: Allocator> TrySpecExtend<T, IntoIter<T>> for Vec<T, A> {
- fn try_spec_extend(&mut self, mut iterator: IntoIter<T>) -> Result<(), TryReserveError> {
- unsafe {
- self.try_append_elements(iterator.as_slice() as _)?;
- }
- iterator.forget_remaining_elements();
- Ok(())
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<'a, T: 'a, I, A: Allocator> SpecExtend<&'a T, I> for Vec<T, A>
-where
- I: Iterator<Item = &'a T>,
- T: Clone,
-{
- default fn spec_extend(&mut self, iterator: I) {
- self.spec_extend(iterator.cloned())
- }
-}
-
-impl<'a, T: 'a, I, A: Allocator> TrySpecExtend<&'a T, I> for Vec<T, A>
-where
- I: Iterator<Item = &'a T>,
- T: Clone,
-{
- default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
- self.try_spec_extend(iterator.cloned())
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<'a, T: 'a, A: Allocator> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
-where
- T: Copy,
-{
- fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
- let slice = iterator.as_slice();
- unsafe { self.append_elements(slice) };
- }
-}
-
-impl<'a, T: 'a, A: Allocator> TrySpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
-where
- T: Copy,
-{
- fn try_spec_extend(&mut self, iterator: slice::Iter<'a, T>) -> Result<(), TryReserveError> {
- let slice = iterator.as_slice();
- unsafe { self.try_append_elements(slice) }
- }
-}
diff --git a/rust/bindgen_parameters b/rust/bindgen_parameters
index a721d466bee4..0f96af8b9a7f 100644
--- a/rust/bindgen_parameters
+++ b/rust/bindgen_parameters
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
+# We want to map these types to `isize`/`usize` manually, instead of
+# define them as `int`/`long` depending on platform bitwidth.
+--blocklist-type __kernel_s?size_t
+--blocklist-type __kernel_ptrdiff_t
+
--opaque-type xregs_state
--opaque-type desc_struct
--opaque-type arch_lbr_state
@@ -24,3 +29,8 @@
# These functions use the `__preserve_most` calling convention, which neither bindgen
# nor Rust currently understand, and which Clang currently declares to be unstable.
--blocklist-function __list_.*_report
+
+# These constants are sometimes not recognized by bindgen depending on config.
+# We use const helpers to aid bindgen, to avoid conflicts when constants are
+# recognized, block generation of the non-helper constants.
+--blocklist-item ARCH_SLAB_MINALIGN
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 65b98831b975..a5a6fb45d405 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -6,19 +6,64 @@
* Sorted alphabetically.
*/
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
#include <kunit/test.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/blk-mq.h>
+#include <linux/blk_types.h>
+#include <linux/blkdev.h>
+#include <linux/clk.h>
+#include <linux/configfs.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cred.h>
+#include <linux/device/faux.h>
+#include <linux/dma-mapping.h>
#include <linux/errname.h>
#include <linux/ethtool.h>
+#include <linux/file.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
#include <linux/jiffies.h>
+#include <linux/jump_label.h>
#include <linux/mdio.h>
+#include <linux/miscdevice.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
#include <linux/phy.h>
+#include <linux/pid_namespace.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/poll.h>
+#include <linux/property.h>
#include <linux/refcount.h>
#include <linux/sched.h>
+#include <linux/security.h>
#include <linux/slab.h>
+#include <linux/tracepoint.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <trace/events/rust_sample.h>
+
+#if defined(CONFIG_DRM_PANIC_SCREEN_QR_CODE)
+// Used by `#[export]` in `drivers/gpu/drm/drm_panic_qr.rs`.
+#include <drm/drm_panic.h>
+#endif
/* `bindgen` gets confused at certain things. */
const size_t RUST_CONST_HELPER_ARCH_SLAB_MINALIGN = ARCH_SLAB_MINALIGN;
+const size_t RUST_CONST_HELPER_PAGE_SIZE = PAGE_SIZE;
+const gfp_t RUST_CONST_HELPER_GFP_ATOMIC = GFP_ATOMIC;
const gfp_t RUST_CONST_HELPER_GFP_KERNEL = GFP_KERNEL;
+const gfp_t RUST_CONST_HELPER_GFP_KERNEL_ACCOUNT = GFP_KERNEL_ACCOUNT;
+const gfp_t RUST_CONST_HELPER_GFP_NOWAIT = GFP_NOWAIT;
const gfp_t RUST_CONST_HELPER___GFP_ZERO = __GFP_ZERO;
+const gfp_t RUST_CONST_HELPER___GFP_HIGHMEM = ___GFP_HIGHMEM;
+const gfp_t RUST_CONST_HELPER___GFP_NOWARN = ___GFP_NOWARN;
+const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL;
+const fop_flags_t RUST_CONST_HELPER_FOP_UNSIGNED_OFFSET = FOP_UNSIGNED_OFFSET;
diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs
index 40ddaee50d8b..a08eb5518cac 100644
--- a/rust/bindings/lib.rs
+++ b/rust/bindings/lib.rs
@@ -24,7 +24,15 @@
unsafe_op_in_unsafe_fn
)]
+#[allow(dead_code)]
+#[allow(clippy::undocumented_unsafe_blocks)]
+#[cfg_attr(CONFIG_RUSTC_HAS_UNNECESSARY_TRANSMUTES, allow(unnecessary_transmutes))]
mod bindings_raw {
+ // Manual definition for blocklisted types.
+ type __kernel_size_t = usize;
+ type __kernel_ssize_t = isize;
+ type __kernel_ptrdiff_t = isize;
+
// Use glob import here to expose all helpers.
// Symbols defined within the module will take precedence to the glob import.
pub use super::bindings_helper::*;
diff --git a/rust/compiler_builtins.rs b/rust/compiler_builtins.rs
index bba2922c6ef7..dd16c1dc899c 100644
--- a/rust/compiler_builtins.rs
+++ b/rust/compiler_builtins.rs
@@ -40,16 +40,19 @@ macro_rules! define_panicking_intrinsics(
define_panicking_intrinsics!("`f32` should not be used", {
__addsf3,
__eqsf2,
+ __extendsfdf2,
__gesf2,
__lesf2,
__ltsf2,
__mulsf3,
__nesf2,
+ __truncdfsf2,
__unordsf2,
});
define_panicking_intrinsics!("`f64` should not be used", {
__adddf3,
+ __eqdf2,
__ledf2,
__ltdf2,
__muldf3,
@@ -70,5 +73,29 @@ define_panicking_intrinsics!("`u128` should not be used", {
__umodti3,
});
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`f32` should not be used", {
+ __aeabi_fadd,
+ __aeabi_fmul,
+ __aeabi_fcmpeq,
+ __aeabi_fcmple,
+ __aeabi_fcmplt,
+ __aeabi_fcmpun,
+});
+
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`f64` should not be used", {
+ __aeabi_dadd,
+ __aeabi_dmul,
+ __aeabi_dcmple,
+ __aeabi_dcmplt,
+ __aeabi_dcmpun,
+});
+
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`u64` division/modulo should not be used", {
+ __aeabi_uldivmod,
+});
+
// NOTE: if you are adding a new intrinsic here, you should also add it to
// `redirect-intrinsics` in `rust/Makefile`.
diff --git a/rust/exports.c b/rust/exports.c
index 3803c21d1403..587f0e776aba 100644
--- a/rust/exports.c
+++ b/rust/exports.c
@@ -3,9 +3,9 @@
* A hack to export Rust symbols for loadable modules without having to redo
* the entire `include/linux/export.h` logic in Rust.
*
- * This requires the Rust's new/future `v0` mangling scheme because the default
- * one ("legacy") uses invalid characters for C identifiers (thus we cannot use
- * the `EXPORT_SYMBOL_*` macros).
+ * This requires Rust's new/future `v0` mangling scheme because the default one
+ * ("legacy") uses invalid characters for C identifiers (thus we cannot use the
+ * `EXPORT_SYMBOL_*` macros).
*
* All symbols are exported as GPL-only to guarantee no GPL-only feature is
* accidentally exposed.
@@ -16,7 +16,7 @@
#define EXPORT_SYMBOL_RUST_GPL(sym) extern int sym; EXPORT_SYMBOL_GPL(sym)
#include "exports_core_generated.h"
-#include "exports_alloc_generated.h"
+#include "exports_helpers_generated.h"
#include "exports_bindings_generated.h"
#include "exports_kernel_generated.h"
diff --git a/rust/ffi.rs b/rust/ffi.rs
new file mode 100644
index 000000000000..584f75b49862
--- /dev/null
+++ b/rust/ffi.rs
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Foreign function interface (FFI) types.
+//!
+//! This crate provides mapping from C primitive types to Rust ones.
+//!
+//! The Rust [`core`] crate provides [`core::ffi`], which maps integer types to the platform default
+//! C ABI. The kernel does not use [`core::ffi`], so it can customise the mapping that deviates from
+//! the platform default.
+
+#![no_std]
+
+macro_rules! alias {
+ ($($name:ident = $ty:ty;)*) => {$(
+ #[allow(non_camel_case_types, missing_docs)]
+ pub type $name = $ty;
+
+ // Check size compatibility with `core`.
+ const _: () = assert!(
+ core::mem::size_of::<$name>() == core::mem::size_of::<core::ffi::$name>()
+ );
+ )*}
+}
+
+alias! {
+ // `core::ffi::c_char` is either `i8` or `u8` depending on architecture. In the kernel, we use
+ // `-funsigned-char` so it's always mapped to `u8`.
+ c_char = u8;
+
+ c_schar = i8;
+ c_uchar = u8;
+
+ c_short = i16;
+ c_ushort = u16;
+
+ c_int = i32;
+ c_uint = u32;
+
+ // In the kernel, `intptr_t` is defined to be `long` in all platforms, so we can map the type to
+ // `isize`.
+ c_long = isize;
+ c_ulong = usize;
+
+ c_longlong = i64;
+ c_ulonglong = u64;
+}
+
+pub use core::ffi::c_void;
diff --git a/rust/helpers.c b/rust/helpers.c
deleted file mode 100644
index 70e59efd92bc..000000000000
--- a/rust/helpers.c
+++ /dev/null
@@ -1,180 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Non-trivial C macros cannot be used in Rust. Similarly, inlined C functions
- * cannot be called either. This file explicitly creates functions ("helpers")
- * that wrap those so that they can be called from Rust.
- *
- * Even though Rust kernel modules should never use directly the bindings, some
- * of these helpers need to be exported because Rust generics and inlined
- * functions may not get their code generated in the crate where they are
- * defined. Other helpers, called from non-inline functions, may not be
- * exported, in principle. However, in general, the Rust compiler does not
- * guarantee codegen will be performed for a non-inline function either.
- * Therefore, this file exports all the helpers. In the future, this may be
- * revisited to reduce the number of exports after the compiler is informed
- * about the places codegen is required.
- *
- * All symbols are exported as GPL-only to guarantee no GPL-only feature is
- * accidentally exposed.
- *
- * Sorted alphabetically.
- */
-
-#include <kunit/test-bug.h>
-#include <linux/bug.h>
-#include <linux/build_bug.h>
-#include <linux/err.h>
-#include <linux/errname.h>
-#include <linux/mutex.h>
-#include <linux/refcount.h>
-#include <linux/sched/signal.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-
-__noreturn void rust_helper_BUG(void)
-{
- BUG();
-}
-EXPORT_SYMBOL_GPL(rust_helper_BUG);
-
-void rust_helper_mutex_lock(struct mutex *lock)
-{
- mutex_lock(lock);
-}
-EXPORT_SYMBOL_GPL(rust_helper_mutex_lock);
-
-void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
- __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
-#else
- spin_lock_init(lock);
-#endif
-}
-EXPORT_SYMBOL_GPL(rust_helper___spin_lock_init);
-
-void rust_helper_spin_lock(spinlock_t *lock)
-{
- spin_lock(lock);
-}
-EXPORT_SYMBOL_GPL(rust_helper_spin_lock);
-
-void rust_helper_spin_unlock(spinlock_t *lock)
-{
- spin_unlock(lock);
-}
-EXPORT_SYMBOL_GPL(rust_helper_spin_unlock);
-
-void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
-{
- init_wait(wq_entry);
-}
-EXPORT_SYMBOL_GPL(rust_helper_init_wait);
-
-int rust_helper_signal_pending(struct task_struct *t)
-{
- return signal_pending(t);
-}
-EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
-
-refcount_t rust_helper_REFCOUNT_INIT(int n)
-{
- return (refcount_t)REFCOUNT_INIT(n);
-}
-EXPORT_SYMBOL_GPL(rust_helper_REFCOUNT_INIT);
-
-void rust_helper_refcount_inc(refcount_t *r)
-{
- refcount_inc(r);
-}
-EXPORT_SYMBOL_GPL(rust_helper_refcount_inc);
-
-bool rust_helper_refcount_dec_and_test(refcount_t *r)
-{
- return refcount_dec_and_test(r);
-}
-EXPORT_SYMBOL_GPL(rust_helper_refcount_dec_and_test);
-
-__force void *rust_helper_ERR_PTR(long err)
-{
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(rust_helper_ERR_PTR);
-
-bool rust_helper_IS_ERR(__force const void *ptr)
-{
- return IS_ERR(ptr);
-}
-EXPORT_SYMBOL_GPL(rust_helper_IS_ERR);
-
-long rust_helper_PTR_ERR(__force const void *ptr)
-{
- return PTR_ERR(ptr);
-}
-EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR);
-
-const char *rust_helper_errname(int err)
-{
- return errname(err);
-}
-EXPORT_SYMBOL_GPL(rust_helper_errname);
-
-struct task_struct *rust_helper_get_current(void)
-{
- return current;
-}
-EXPORT_SYMBOL_GPL(rust_helper_get_current);
-
-void rust_helper_get_task_struct(struct task_struct *t)
-{
- get_task_struct(t);
-}
-EXPORT_SYMBOL_GPL(rust_helper_get_task_struct);
-
-void rust_helper_put_task_struct(struct task_struct *t)
-{
- put_task_struct(t);
-}
-EXPORT_SYMBOL_GPL(rust_helper_put_task_struct);
-
-struct kunit *rust_helper_kunit_get_current_test(void)
-{
- return kunit_get_current_test();
-}
-EXPORT_SYMBOL_GPL(rust_helper_kunit_get_current_test);
-
-void rust_helper_init_work_with_key(struct work_struct *work, work_func_t func,
- bool onstack, const char *name,
- struct lock_class_key *key)
-{
- __init_work(work, onstack);
- work->data = (atomic_long_t)WORK_DATA_INIT();
- lockdep_init_map(&work->lockdep_map, name, key, 0);
- INIT_LIST_HEAD(&work->entry);
- work->func = func;
-}
-EXPORT_SYMBOL_GPL(rust_helper_init_work_with_key);
-
-/*
- * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
- * use it in contexts where Rust expects a `usize` like slice (array) indices.
- * `usize` is defined to be the same as C's `uintptr_t` type (can hold any
- * pointer) but not necessarily the same as `size_t` (can hold the size of any
- * single object). Most modern platforms use the same concrete integer type for
- * both of them, but in case we find ourselves on a platform where
- * that's not true, fail early instead of risking ABI or
- * integer-overflow issues.
- *
- * If your platform fails this assertion, it means that you are in
- * danger of integer-overflow bugs (even if you attempt to add
- * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on
- * your platform such that `size_t` matches `uintptr_t` (i.e., to increase
- * `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
- */
-static_assert(
- sizeof(size_t) == sizeof(uintptr_t) &&
- __alignof__(size_t) == __alignof__(uintptr_t),
- "Rust code expects C `size_t` to match Rust `usize`"
-);
diff --git a/rust/helpers/auxiliary.c b/rust/helpers/auxiliary.c
new file mode 100644
index 000000000000..0db3860d774e
--- /dev/null
+++ b/rust/helpers/auxiliary.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/auxiliary_bus.h>
+
+void rust_helper_auxiliary_set_drvdata(struct auxiliary_device *adev, void *data)
+{
+ auxiliary_set_drvdata(adev, data);
+}
+
+void *rust_helper_auxiliary_get_drvdata(struct auxiliary_device *adev)
+{
+ return auxiliary_get_drvdata(adev);
+}
+
+void rust_helper_auxiliary_device_uninit(struct auxiliary_device *adev)
+{
+ return auxiliary_device_uninit(adev);
+}
+
+void rust_helper_auxiliary_device_delete(struct auxiliary_device *adev)
+{
+ return auxiliary_device_delete(adev);
+}
diff --git a/rust/helpers/blk.c b/rust/helpers/blk.c
new file mode 100644
index 000000000000..cc9f4e6a2d23
--- /dev/null
+++ b/rust/helpers/blk.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/blk-mq.h>
+#include <linux/blkdev.h>
+
+void *rust_helper_blk_mq_rq_to_pdu(struct request *rq)
+{
+ return blk_mq_rq_to_pdu(rq);
+}
+
+struct request *rust_helper_blk_mq_rq_from_pdu(void *pdu)
+{
+ return blk_mq_rq_from_pdu(pdu);
+}
diff --git a/rust/helpers/bug.c b/rust/helpers/bug.c
new file mode 100644
index 000000000000..e2d13babc737
--- /dev/null
+++ b/rust/helpers/bug.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bug.h>
+
+__noreturn void rust_helper_BUG(void)
+{
+ BUG();
+}
diff --git a/rust/helpers/build_assert.c b/rust/helpers/build_assert.c
new file mode 100644
index 000000000000..6a54b2680b14
--- /dev/null
+++ b/rust/helpers/build_assert.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/build_bug.h>
+
+/*
+ * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
+ * use it in contexts where Rust expects a `usize` like slice (array) indices.
+ * `usize` is defined to be the same as C's `uintptr_t` type (can hold any
+ * pointer) but not necessarily the same as `size_t` (can hold the size of any
+ * single object). Most modern platforms use the same concrete integer type for
+ * both of them, but in case we find ourselves on a platform where
+ * that's not true, fail early instead of risking ABI or
+ * integer-overflow issues.
+ *
+ * If your platform fails this assertion, it means that you are in
+ * danger of integer-overflow bugs (even if you attempt to add
+ * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on
+ * your platform such that `size_t` matches `uintptr_t` (i.e., to increase
+ * `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
+ */
+static_assert(
+ sizeof(size_t) == sizeof(uintptr_t) &&
+ __alignof__(size_t) == __alignof__(uintptr_t),
+ "Rust code expects C `size_t` to match Rust `usize`"
+);
diff --git a/rust/helpers/build_bug.c b/rust/helpers/build_bug.c
new file mode 100644
index 000000000000..44e579488037
--- /dev/null
+++ b/rust/helpers/build_bug.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/errname.h>
+
+const char *rust_helper_errname(int err)
+{
+ return errname(err);
+}
diff --git a/rust/helpers/clk.c b/rust/helpers/clk.c
new file mode 100644
index 000000000000..6d04372c9f3b
--- /dev/null
+++ b/rust/helpers/clk.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/clk.h>
+
+/*
+ * The "inline" implementation of below helpers are only available when
+ * CONFIG_HAVE_CLK or CONFIG_HAVE_CLK_PREPARE aren't set.
+ */
+#ifndef CONFIG_HAVE_CLK
+struct clk *rust_helper_clk_get(struct device *dev, const char *id)
+{
+ return clk_get(dev, id);
+}
+
+void rust_helper_clk_put(struct clk *clk)
+{
+ clk_put(clk);
+}
+
+int rust_helper_clk_enable(struct clk *clk)
+{
+ return clk_enable(clk);
+}
+
+void rust_helper_clk_disable(struct clk *clk)
+{
+ clk_disable(clk);
+}
+
+unsigned long rust_helper_clk_get_rate(struct clk *clk)
+{
+ return clk_get_rate(clk);
+}
+
+int rust_helper_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_set_rate(clk, rate);
+}
+#endif
+
+#ifndef CONFIG_HAVE_CLK_PREPARE
+int rust_helper_clk_prepare(struct clk *clk)
+{
+ return clk_prepare(clk);
+}
+
+void rust_helper_clk_unprepare(struct clk *clk)
+{
+ clk_unprepare(clk);
+}
+#endif
+
+struct clk *rust_helper_clk_get_optional(struct device *dev, const char *id)
+{
+ return clk_get_optional(dev, id);
+}
+
+int rust_helper_clk_prepare_enable(struct clk *clk)
+{
+ return clk_prepare_enable(clk);
+}
+
+void rust_helper_clk_disable_unprepare(struct clk *clk)
+{
+ clk_disable_unprepare(clk);
+}
diff --git a/rust/helpers/cpufreq.c b/rust/helpers/cpufreq.c
new file mode 100644
index 000000000000..7c1343c4d65e
--- /dev/null
+++ b/rust/helpers/cpufreq.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/cpufreq.h>
+
+#ifdef CONFIG_CPU_FREQ
+void rust_helper_cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
+{
+ cpufreq_register_em_with_opp(policy);
+}
+#endif
diff --git a/rust/helpers/cpumask.c b/rust/helpers/cpumask.c
new file mode 100644
index 000000000000..eb10598a0242
--- /dev/null
+++ b/rust/helpers/cpumask.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/cpumask.h>
+
+void rust_helper_cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+{
+ cpumask_set_cpu(cpu, dstp);
+}
+
+void rust_helper___cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+{
+ __cpumask_set_cpu(cpu, dstp);
+}
+
+void rust_helper_cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+{
+ cpumask_clear_cpu(cpu, dstp);
+}
+
+void rust_helper___cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+{
+ __cpumask_clear_cpu(cpu, dstp);
+}
+
+bool rust_helper_cpumask_test_cpu(int cpu, struct cpumask *srcp)
+{
+ return cpumask_test_cpu(cpu, srcp);
+}
+
+void rust_helper_cpumask_setall(struct cpumask *dstp)
+{
+ cpumask_setall(dstp);
+}
+
+bool rust_helper_cpumask_empty(struct cpumask *srcp)
+{
+ return cpumask_empty(srcp);
+}
+
+bool rust_helper_cpumask_full(struct cpumask *srcp)
+{
+ return cpumask_full(srcp);
+}
+
+unsigned int rust_helper_cpumask_weight(struct cpumask *srcp)
+{
+ return cpumask_weight(srcp);
+}
+
+void rust_helper_cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
+{
+ cpumask_copy(dstp, srcp);
+}
+
+bool rust_helper_alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var(mask, flags);
+}
+
+bool rust_helper_zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return zalloc_cpumask_var(mask, flags);
+}
+
+#ifndef CONFIG_CPUMASK_OFFSTACK
+void rust_helper_free_cpumask_var(cpumask_var_t mask)
+{
+ free_cpumask_var(mask);
+}
+#endif
diff --git a/rust/helpers/cred.c b/rust/helpers/cred.c
new file mode 100644
index 000000000000..fde7ae20cdd1
--- /dev/null
+++ b/rust/helpers/cred.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/cred.h>
+
+const struct cred *rust_helper_get_cred(const struct cred *cred)
+{
+ return get_cred(cred);
+}
+
+void rust_helper_put_cred(const struct cred *cred)
+{
+ put_cred(cred);
+}
diff --git a/rust/helpers/device.c b/rust/helpers/device.c
new file mode 100644
index 000000000000..b2135c6686b0
--- /dev/null
+++ b/rust/helpers/device.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/device.h>
+
+int rust_helper_devm_add_action(struct device *dev,
+ void (*action)(void *),
+ void *data)
+{
+ return devm_add_action(dev, action, data);
+}
diff --git a/rust/helpers/dma.c b/rust/helpers/dma.c
new file mode 100644
index 000000000000..df8b8a77355a
--- /dev/null
+++ b/rust/helpers/dma.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/dma-mapping.h>
+
+void *rust_helper_dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ unsigned long attrs)
+{
+ return dma_alloc_attrs(dev, size, dma_handle, flag, attrs);
+}
+
+void rust_helper_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ dma_free_attrs(dev, size, cpu_addr, dma_handle, attrs);
+}
diff --git a/rust/helpers/drm.c b/rust/helpers/drm.c
new file mode 100644
index 000000000000..450b406c6f27
--- /dev/null
+++ b/rust/helpers/drm.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_gem.h>
+#include <drm/drm_vma_manager.h>
+
+#ifdef CONFIG_DRM
+
+void rust_helper_drm_gem_object_get(struct drm_gem_object *obj)
+{
+ drm_gem_object_get(obj);
+}
+
+void rust_helper_drm_gem_object_put(struct drm_gem_object *obj)
+{
+ drm_gem_object_put(obj);
+}
+
+__u64 rust_helper_drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
+{
+ return drm_vma_node_offset_addr(node);
+}
+
+#endif
diff --git a/rust/helpers/err.c b/rust/helpers/err.c
new file mode 100644
index 000000000000..544c7cb86632
--- /dev/null
+++ b/rust/helpers/err.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/err.h>
+
+__force void *rust_helper_ERR_PTR(long err)
+{
+ return ERR_PTR(err);
+}
+
+bool rust_helper_IS_ERR(__force const void *ptr)
+{
+ return IS_ERR(ptr);
+}
+
+long rust_helper_PTR_ERR(__force const void *ptr)
+{
+ return PTR_ERR(ptr);
+}
diff --git a/rust/helpers/fs.c b/rust/helpers/fs.c
new file mode 100644
index 000000000000..a75c96763372
--- /dev/null
+++ b/rust/helpers/fs.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2024 Google LLC.
+ */
+
+#include <linux/fs.h>
+
+struct file *rust_helper_get_file(struct file *f)
+{
+ return get_file(f);
+}
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
new file mode 100644
index 000000000000..805307018f0e
--- /dev/null
+++ b/rust/helpers/helpers.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Non-trivial C macros cannot be used in Rust. Similarly, inlined C functions
+ * cannot be called either. This file explicitly creates functions ("helpers")
+ * that wrap those so that they can be called from Rust.
+ *
+ * Sorted alphabetically.
+ */
+
+#include "auxiliary.c"
+#include "blk.c"
+#include "bug.c"
+#include "build_assert.c"
+#include "build_bug.c"
+#include "clk.c"
+#include "cpufreq.c"
+#include "cpumask.c"
+#include "cred.c"
+#include "device.c"
+#include "dma.c"
+#include "drm.c"
+#include "err.c"
+#include "fs.c"
+#include "io.c"
+#include "jump_label.c"
+#include "kunit.c"
+#include "mm.c"
+#include "mutex.c"
+#include "page.c"
+#include "platform.c"
+#include "pci.c"
+#include "pid_namespace.c"
+#include "rbtree.c"
+#include "rcu.c"
+#include "refcount.c"
+#include "security.c"
+#include "signal.c"
+#include "slab.c"
+#include "spinlock.c"
+#include "sync.c"
+#include "task.c"
+#include "uaccess.c"
+#include "vmalloc.c"
+#include "wait.c"
+#include "workqueue.c"
diff --git a/rust/helpers/io.c b/rust/helpers/io.c
new file mode 100644
index 000000000000..15ea187c5466
--- /dev/null
+++ b/rust/helpers/io.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/io.h>
+
+void __iomem *rust_helper_ioremap(phys_addr_t offset, size_t size)
+{
+ return ioremap(offset, size);
+}
+
+void rust_helper_iounmap(void __iomem *addr)
+{
+ iounmap(addr);
+}
+
+u8 rust_helper_readb(const void __iomem *addr)
+{
+ return readb(addr);
+}
+
+u16 rust_helper_readw(const void __iomem *addr)
+{
+ return readw(addr);
+}
+
+u32 rust_helper_readl(const void __iomem *addr)
+{
+ return readl(addr);
+}
+
+#ifdef CONFIG_64BIT
+u64 rust_helper_readq(const void __iomem *addr)
+{
+ return readq(addr);
+}
+#endif
+
+void rust_helper_writeb(u8 value, void __iomem *addr)
+{
+ writeb(value, addr);
+}
+
+void rust_helper_writew(u16 value, void __iomem *addr)
+{
+ writew(value, addr);
+}
+
+void rust_helper_writel(u32 value, void __iomem *addr)
+{
+ writel(value, addr);
+}
+
+#ifdef CONFIG_64BIT
+void rust_helper_writeq(u64 value, void __iomem *addr)
+{
+ writeq(value, addr);
+}
+#endif
+
+u8 rust_helper_readb_relaxed(const void __iomem *addr)
+{
+ return readb_relaxed(addr);
+}
+
+u16 rust_helper_readw_relaxed(const void __iomem *addr)
+{
+ return readw_relaxed(addr);
+}
+
+u32 rust_helper_readl_relaxed(const void __iomem *addr)
+{
+ return readl_relaxed(addr);
+}
+
+#ifdef CONFIG_64BIT
+u64 rust_helper_readq_relaxed(const void __iomem *addr)
+{
+ return readq_relaxed(addr);
+}
+#endif
+
+void rust_helper_writeb_relaxed(u8 value, void __iomem *addr)
+{
+ writeb_relaxed(value, addr);
+}
+
+void rust_helper_writew_relaxed(u16 value, void __iomem *addr)
+{
+ writew_relaxed(value, addr);
+}
+
+void rust_helper_writel_relaxed(u32 value, void __iomem *addr)
+{
+ writel_relaxed(value, addr);
+}
+
+#ifdef CONFIG_64BIT
+void rust_helper_writeq_relaxed(u64 value, void __iomem *addr)
+{
+ writeq_relaxed(value, addr);
+}
+#endif
diff --git a/rust/helpers/jump_label.c b/rust/helpers/jump_label.c
new file mode 100644
index 000000000000..fc1f1e0df08e
--- /dev/null
+++ b/rust/helpers/jump_label.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2024 Google LLC.
+ */
+
+#include <linux/jump_label.h>
+
+#ifndef CONFIG_JUMP_LABEL
+int rust_helper_static_key_count(struct static_key *key)
+{
+ return static_key_count(key);
+}
+#endif
diff --git a/rust/helpers/kunit.c b/rust/helpers/kunit.c
new file mode 100644
index 000000000000..b85a4d394c11
--- /dev/null
+++ b/rust/helpers/kunit.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test-bug.h>
+
+struct kunit *rust_helper_kunit_get_current_test(void)
+{
+ return kunit_get_current_test();
+}
diff --git a/rust/helpers/mm.c b/rust/helpers/mm.c
new file mode 100644
index 000000000000..81b510c96fd2
--- /dev/null
+++ b/rust/helpers/mm.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+
+void rust_helper_mmgrab(struct mm_struct *mm)
+{
+ mmgrab(mm);
+}
+
+void rust_helper_mmdrop(struct mm_struct *mm)
+{
+ mmdrop(mm);
+}
+
+void rust_helper_mmget(struct mm_struct *mm)
+{
+ mmget(mm);
+}
+
+bool rust_helper_mmget_not_zero(struct mm_struct *mm)
+{
+ return mmget_not_zero(mm);
+}
+
+void rust_helper_mmap_read_lock(struct mm_struct *mm)
+{
+ mmap_read_lock(mm);
+}
+
+bool rust_helper_mmap_read_trylock(struct mm_struct *mm)
+{
+ return mmap_read_trylock(mm);
+}
+
+void rust_helper_mmap_read_unlock(struct mm_struct *mm)
+{
+ mmap_read_unlock(mm);
+}
+
+struct vm_area_struct *rust_helper_vma_lookup(struct mm_struct *mm,
+ unsigned long addr)
+{
+ return vma_lookup(mm, addr);
+}
+
+void rust_helper_vma_end_read(struct vm_area_struct *vma)
+{
+ vma_end_read(vma);
+}
diff --git a/rust/helpers/mutex.c b/rust/helpers/mutex.c
new file mode 100644
index 000000000000..3e9b910a88e9
--- /dev/null
+++ b/rust/helpers/mutex.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mutex.h>
+
+void rust_helper_mutex_lock(struct mutex *lock)
+{
+ mutex_lock(lock);
+}
+
+void rust_helper___mutex_init(struct mutex *mutex, const char *name,
+ struct lock_class_key *key)
+{
+ __mutex_init(mutex, name, key);
+}
+
+void rust_helper_mutex_assert_is_held(struct mutex *mutex)
+{
+ lockdep_assert_held(mutex);
+}
+
+void rust_helper_mutex_destroy(struct mutex *lock)
+{
+ mutex_destroy(lock);
+}
diff --git a/rust/helpers/page.c b/rust/helpers/page.c
new file mode 100644
index 000000000000..b3f2b8fbf87f
--- /dev/null
+++ b/rust/helpers/page.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
+{
+ return alloc_pages(gfp_mask, order);
+}
+
+void *rust_helper_kmap_local_page(struct page *page)
+{
+ return kmap_local_page(page);
+}
+
+void rust_helper_kunmap_local(const void *addr)
+{
+ kunmap_local(addr);
+}
diff --git a/rust/helpers/pci.c b/rust/helpers/pci.c
new file mode 100644
index 000000000000..cd0e6bf2cc4d
--- /dev/null
+++ b/rust/helpers/pci.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/pci.h>
+
+void rust_helper_pci_set_drvdata(struct pci_dev *pdev, void *data)
+{
+ pci_set_drvdata(pdev, data);
+}
+
+void *rust_helper_pci_get_drvdata(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+resource_size_t rust_helper_pci_resource_len(struct pci_dev *pdev, int bar)
+{
+ return pci_resource_len(pdev, bar);
+}
+
+bool rust_helper_dev_is_pci(const struct device *dev)
+{
+ return dev_is_pci(dev);
+}
diff --git a/rust/helpers/pid_namespace.c b/rust/helpers/pid_namespace.c
new file mode 100644
index 000000000000..f41482bdec9a
--- /dev/null
+++ b/rust/helpers/pid_namespace.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/pid_namespace.h>
+#include <linux/cleanup.h>
+
+struct pid_namespace *rust_helper_get_pid_ns(struct pid_namespace *ns)
+{
+ return get_pid_ns(ns);
+}
+
+void rust_helper_put_pid_ns(struct pid_namespace *ns)
+{
+ put_pid_ns(ns);
+}
+
+/* Get a reference on a task's pid namespace. */
+struct pid_namespace *rust_helper_task_get_pid_ns(struct task_struct *task)
+{
+ struct pid_namespace *pid_ns;
+
+ guard(rcu)();
+ pid_ns = task_active_pid_ns(task);
+ if (pid_ns)
+ get_pid_ns(pid_ns);
+ return pid_ns;
+}
diff --git a/rust/helpers/platform.c b/rust/helpers/platform.c
new file mode 100644
index 000000000000..82171233d12f
--- /dev/null
+++ b/rust/helpers/platform.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/platform_device.h>
+
+void *rust_helper_platform_get_drvdata(const struct platform_device *pdev)
+{
+ return platform_get_drvdata(pdev);
+}
+
+void rust_helper_platform_set_drvdata(struct platform_device *pdev, void *data)
+{
+ platform_set_drvdata(pdev, data);
+}
+
+bool rust_helper_dev_is_platform(const struct device *dev)
+{
+ return dev_is_platform(dev);
+}
diff --git a/rust/helpers/rbtree.c b/rust/helpers/rbtree.c
new file mode 100644
index 000000000000..6d404b84a9b5
--- /dev/null
+++ b/rust/helpers/rbtree.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/rbtree.h>
+
+void rust_helper_rb_link_node(struct rb_node *node, struct rb_node *parent,
+ struct rb_node **rb_link)
+{
+ rb_link_node(node, parent, rb_link);
+}
diff --git a/rust/helpers/rcu.c b/rust/helpers/rcu.c
new file mode 100644
index 000000000000..f1cec6583513
--- /dev/null
+++ b/rust/helpers/rcu.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/rcupdate.h>
+
+void rust_helper_rcu_read_lock(void)
+{
+ rcu_read_lock();
+}
+
+void rust_helper_rcu_read_unlock(void)
+{
+ rcu_read_unlock();
+}
diff --git a/rust/helpers/refcount.c b/rust/helpers/refcount.c
new file mode 100644
index 000000000000..d6adbd2e45a1
--- /dev/null
+++ b/rust/helpers/refcount.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/refcount.h>
+
+refcount_t rust_helper_REFCOUNT_INIT(int n)
+{
+ return (refcount_t)REFCOUNT_INIT(n);
+}
+
+void rust_helper_refcount_inc(refcount_t *r)
+{
+ refcount_inc(r);
+}
+
+bool rust_helper_refcount_dec_and_test(refcount_t *r)
+{
+ return refcount_dec_and_test(r);
+}
diff --git a/rust/helpers/security.c b/rust/helpers/security.c
new file mode 100644
index 000000000000..0c4c2065df28
--- /dev/null
+++ b/rust/helpers/security.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/security.h>
+
+#ifndef CONFIG_SECURITY
+void rust_helper_security_cred_getsecid(const struct cred *c, u32 *secid)
+{
+ security_cred_getsecid(c, secid);
+}
+
+int rust_helper_security_secid_to_secctx(u32 secid, struct lsm_context *cp)
+{
+ return security_secid_to_secctx(secid, cp);
+}
+
+void rust_helper_security_release_secctx(struct lsm_context *cp)
+{
+ security_release_secctx(cp);
+}
+#endif
diff --git a/rust/helpers/signal.c b/rust/helpers/signal.c
new file mode 100644
index 000000000000..1a6bbe9438e2
--- /dev/null
+++ b/rust/helpers/signal.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/sched/signal.h>
+
+int rust_helper_signal_pending(struct task_struct *t)
+{
+ return signal_pending(t);
+}
diff --git a/rust/helpers/slab.c b/rust/helpers/slab.c
new file mode 100644
index 000000000000..a842bfbddcba
--- /dev/null
+++ b/rust/helpers/slab.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/slab.h>
+
+void * __must_check __realloc_size(2)
+rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
+{
+ return krealloc(objp, new_size, flags);
+}
+
+void * __must_check __realloc_size(2)
+rust_helper_kvrealloc(const void *p, size_t size, gfp_t flags)
+{
+ return kvrealloc(p, size, flags);
+}
diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c
new file mode 100644
index 000000000000..42c4bf01a23e
--- /dev/null
+++ b/rust/helpers/spinlock.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/spinlock.h>
+
+void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+# if defined(CONFIG_PREEMPT_RT)
+ __spin_lock_init(lock, name, key, false);
+# else /*!CONFIG_PREEMPT_RT */
+ __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
+# endif /* CONFIG_PREEMPT_RT */
+#else /* !CONFIG_DEBUG_SPINLOCK */
+ spin_lock_init(lock);
+#endif /* CONFIG_DEBUG_SPINLOCK */
+}
+
+void rust_helper_spin_lock(spinlock_t *lock)
+{
+ spin_lock(lock);
+}
+
+void rust_helper_spin_unlock(spinlock_t *lock)
+{
+ spin_unlock(lock);
+}
+
+int rust_helper_spin_trylock(spinlock_t *lock)
+{
+ return spin_trylock(lock);
+}
+
+void rust_helper_spin_assert_is_held(spinlock_t *lock)
+{
+ lockdep_assert_held(lock);
+}
diff --git a/rust/helpers/sync.c b/rust/helpers/sync.c
new file mode 100644
index 000000000000..ff7e68b48810
--- /dev/null
+++ b/rust/helpers/sync.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/lockdep.h>
+
+void rust_helper_lockdep_register_key(struct lock_class_key *k)
+{
+ lockdep_register_key(k);
+}
+
+void rust_helper_lockdep_unregister_key(struct lock_class_key *k)
+{
+ lockdep_unregister_key(k);
+}
diff --git a/rust/helpers/task.c b/rust/helpers/task.c
new file mode 100644
index 000000000000..31c33ea2dce6
--- /dev/null
+++ b/rust/helpers/task.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/sched/task.h>
+
+struct task_struct *rust_helper_get_current(void)
+{
+ return current;
+}
+
+void rust_helper_get_task_struct(struct task_struct *t)
+{
+ get_task_struct(t);
+}
+
+void rust_helper_put_task_struct(struct task_struct *t)
+{
+ put_task_struct(t);
+}
+
+kuid_t rust_helper_task_uid(struct task_struct *task)
+{
+ return task_uid(task);
+}
+
+kuid_t rust_helper_task_euid(struct task_struct *task)
+{
+ return task_euid(task);
+}
+
+#ifndef CONFIG_USER_NS
+uid_t rust_helper_from_kuid(struct user_namespace *to, kuid_t uid)
+{
+ return from_kuid(to, uid);
+}
+#endif /* CONFIG_USER_NS */
+
+bool rust_helper_uid_eq(kuid_t left, kuid_t right)
+{
+ return uid_eq(left, right);
+}
+
+kuid_t rust_helper_current_euid(void)
+{
+ return current_euid();
+}
+
+struct user_namespace *rust_helper_current_user_ns(void)
+{
+ return current_user_ns();
+}
+
+pid_t rust_helper_task_tgid_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
+{
+ return task_tgid_nr_ns(tsk, ns);
+}
diff --git a/rust/helpers/uaccess.c b/rust/helpers/uaccess.c
new file mode 100644
index 000000000000..f49076f813cd
--- /dev/null
+++ b/rust/helpers/uaccess.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/uaccess.h>
+
+unsigned long rust_helper_copy_from_user(void *to, const void __user *from,
+ unsigned long n)
+{
+ return copy_from_user(to, from, n);
+}
+
+unsigned long rust_helper_copy_to_user(void __user *to, const void *from,
+ unsigned long n)
+{
+ return copy_to_user(to, from, n);
+}
diff --git a/rust/helpers/vmalloc.c b/rust/helpers/vmalloc.c
new file mode 100644
index 000000000000..80d34501bbc0
--- /dev/null
+++ b/rust/helpers/vmalloc.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/vmalloc.h>
+
+void * __must_check __realloc_size(2)
+rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
+{
+ return vrealloc(p, size, flags);
+}
diff --git a/rust/helpers/wait.c b/rust/helpers/wait.c
new file mode 100644
index 000000000000..ae48e33d9da3
--- /dev/null
+++ b/rust/helpers/wait.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/wait.h>
+
+void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
+{
+ init_wait(wq_entry);
+}
diff --git a/rust/helpers/workqueue.c b/rust/helpers/workqueue.c
new file mode 100644
index 000000000000..b2b82753509b
--- /dev/null
+++ b/rust/helpers/workqueue.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/workqueue.h>
+
+void rust_helper_init_work_with_key(struct work_struct *work, work_func_t func,
+ bool onstack, const char *name,
+ struct lock_class_key *key)
+{
+ __init_work(work, onstack);
+ work->data = (atomic_long_t)WORK_DATA_INIT();
+ lockdep_init_map(&work->lockdep_map, name, key, 0);
+ INIT_LIST_HEAD(&work->entry);
+ work->func = func;
+}
diff --git a/rust/kernel/.gitignore b/rust/kernel/.gitignore
new file mode 100644
index 000000000000..6ba39a178f30
--- /dev/null
+++ b/rust/kernel/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+/generated_arch_static_branch_asm.rs
diff --git a/rust/kernel/alloc.rs b/rust/kernel/alloc.rs
new file mode 100644
index 000000000000..fc9c9c41cd79
--- /dev/null
+++ b/rust/kernel/alloc.rs
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Implementation of the kernel's memory allocation infrastructure.
+
+#[cfg(not(any(test, testlib)))]
+pub mod allocator;
+pub mod kbox;
+pub mod kvec;
+pub mod layout;
+
+#[cfg(any(test, testlib))]
+pub mod allocator_test;
+
+#[cfg(any(test, testlib))]
+pub use self::allocator_test as allocator;
+
+pub use self::kbox::Box;
+pub use self::kbox::KBox;
+pub use self::kbox::KVBox;
+pub use self::kbox::VBox;
+
+pub use self::kvec::IntoIter;
+pub use self::kvec::KVVec;
+pub use self::kvec::KVec;
+pub use self::kvec::VVec;
+pub use self::kvec::Vec;
+
+/// Indicates an allocation error.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct AllocError;
+use core::{alloc::Layout, ptr::NonNull};
+
+/// Flags to be used when allocating memory.
+///
+/// They can be combined with the operators `|`, `&`, and `!`.
+///
+/// Values can be used from the [`flags`] module.
+#[derive(Clone, Copy, PartialEq)]
+pub struct Flags(u32);
+
+impl Flags {
+ /// Get the raw representation of this flag.
+ pub(crate) fn as_raw(self) -> u32 {
+ self.0
+ }
+
+ /// Check whether `flags` is contained in `self`.
+ pub fn contains(self, flags: Flags) -> bool {
+ (self & flags) == flags
+ }
+}
+
+impl core::ops::BitOr for Flags {
+ type Output = Self;
+ fn bitor(self, rhs: Self) -> Self::Output {
+ Self(self.0 | rhs.0)
+ }
+}
+
+impl core::ops::BitAnd for Flags {
+ type Output = Self;
+ fn bitand(self, rhs: Self) -> Self::Output {
+ Self(self.0 & rhs.0)
+ }
+}
+
+impl core::ops::Not for Flags {
+ type Output = Self;
+ fn not(self) -> Self::Output {
+ Self(!self.0)
+ }
+}
+
+/// Allocation flags.
+///
+/// These are meant to be used in functions that can allocate memory.
+pub mod flags {
+ use super::Flags;
+
+ /// Zeroes out the allocated memory.
+ ///
+ /// This is normally or'd with other flags.
+ pub const __GFP_ZERO: Flags = Flags(bindings::__GFP_ZERO);
+
+ /// Allow the allocation to be in high memory.
+ ///
+ /// Allocations in high memory may not be mapped into the kernel's address space, so this can't
+ /// be used with `kmalloc` and other similar methods.
+ ///
+ /// This is normally or'd with other flags.
+ pub const __GFP_HIGHMEM: Flags = Flags(bindings::__GFP_HIGHMEM);
+
+ /// Users can not sleep and need the allocation to succeed.
+ ///
+ /// A lower watermark is applied to allow access to "atomic reserves". The current
+ /// implementation doesn't support NMI and few other strict non-preemptive contexts (e.g.
+ /// raw_spin_lock). The same applies to [`GFP_NOWAIT`].
+ pub const GFP_ATOMIC: Flags = Flags(bindings::GFP_ATOMIC);
+
+ /// Typical for kernel-internal allocations. The caller requires ZONE_NORMAL or a lower zone
+ /// for direct access but can direct reclaim.
+ pub const GFP_KERNEL: Flags = Flags(bindings::GFP_KERNEL);
+
+ /// The same as [`GFP_KERNEL`], except the allocation is accounted to kmemcg.
+ pub const GFP_KERNEL_ACCOUNT: Flags = Flags(bindings::GFP_KERNEL_ACCOUNT);
+
+ /// For kernel allocations that should not stall for direct reclaim, start physical IO or
+ /// use any filesystem callback. It is very likely to fail to allocate memory, even for very
+ /// small allocations.
+ pub const GFP_NOWAIT: Flags = Flags(bindings::GFP_NOWAIT);
+
+ /// Suppresses allocation failure reports.
+ ///
+ /// This is normally or'd with other flags.
+ pub const __GFP_NOWARN: Flags = Flags(bindings::__GFP_NOWARN);
+}
+
+/// The kernel's [`Allocator`] trait.
+///
+/// An implementation of [`Allocator`] can allocate, re-allocate and free memory buffers described
+/// via [`Layout`].
+///
+/// [`Allocator`] is designed to be implemented as a ZST; [`Allocator`] functions do not operate on
+/// an object instance.
+///
+/// In order to be able to support `#[derive(CoercePointee)]` later on, we need to avoid a design
+/// that requires an `Allocator` to be instantiated, hence its functions must not contain any kind
+/// of `self` parameter.
+///
+/// # Safety
+///
+/// - A memory allocation returned from an allocator must remain valid until it is explicitly freed.
+///
+/// - Any pointer to a valid memory allocation must be valid to be passed to any other [`Allocator`]
+/// function of the same type.
+///
+/// - Implementers must ensure that all trait functions abide by the guarantees documented in the
+/// `# Guarantees` sections.
+pub unsafe trait Allocator {
+ /// Allocate memory based on `layout` and `flags`.
+ ///
+ /// On success, returns a buffer represented as `NonNull<[u8]>` that satisfies the layout
+ /// constraints (i.e. minimum size and alignment as specified by `layout`).
+ ///
+ /// This function is equivalent to `realloc` when called with `None`.
+ ///
+ /// # Guarantees
+ ///
+ /// When the return value is `Ok(ptr)`, then `ptr` is
+ /// - valid for reads and writes for `layout.size()` bytes, until it is passed to
+ /// [`Allocator::free`] or [`Allocator::realloc`],
+ /// - aligned to `layout.align()`,
+ ///
+ /// Additionally, `Flags` are honored as documented in
+ /// <https://docs.kernel.org/core-api/mm-api.html#mm-api-gfp-flags>.
+ fn alloc(layout: Layout, flags: Flags) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: Passing `None` to `realloc` is valid by its safety requirements and asks for a
+ // new memory allocation.
+ unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags) }
+ }
+
+ /// Re-allocate an existing memory allocation to satisfy the requested `layout`.
+ ///
+ /// If the requested size is zero, `realloc` behaves equivalent to `free`.
+ ///
+ /// If the requested size is larger than the size of the existing allocation, a successful call
+ /// to `realloc` guarantees that the new or grown buffer has at least `Layout::size` bytes, but
+ /// may also be larger.
+ ///
+ /// If the requested size is smaller than the size of the existing allocation, `realloc` may or
+ /// may not shrink the buffer; this is implementation specific to the allocator.
+ ///
+ /// On allocation failure, the existing buffer, if any, remains valid.
+ ///
+ /// The buffer is represented as `NonNull<[u8]>`.
+ ///
+ /// # Safety
+ ///
+ /// - If `ptr == Some(p)`, then `p` must point to an existing and valid memory allocation
+ /// created by this [`Allocator`]; if `old_layout` is zero-sized `p` does not need to be a
+ /// pointer returned by this [`Allocator`].
+ /// - `ptr` is allowed to be `None`; in this case a new memory allocation is created and
+ /// `old_layout` is ignored.
+ /// - `old_layout` must match the `Layout` the allocation has been created with.
+ ///
+ /// # Guarantees
+ ///
+ /// This function has the same guarantees as [`Allocator::alloc`]. When `ptr == Some(p)`, then
+ /// it additionally guarantees that:
+ /// - the contents of the memory pointed to by `p` are preserved up to the lesser of the new
+ /// and old size, i.e. `ret_ptr[0..min(layout.size(), old_layout.size())] ==
+ /// p[0..min(layout.size(), old_layout.size())]`.
+ /// - when the return value is `Err(AllocError)`, then `ptr` is still valid.
+ unsafe fn realloc(
+ ptr: Option<NonNull<u8>>,
+ layout: Layout,
+ old_layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError>;
+
+ /// Free an existing memory allocation.
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must point to an existing and valid memory allocation created by this [`Allocator`];
+ /// if `old_layout` is zero-sized `p` does not need to be a pointer returned by this
+ /// [`Allocator`].
+ /// - `layout` must match the `Layout` the allocation has been created with.
+ /// - The memory allocation at `ptr` must never again be read from or written to.
+ unsafe fn free(ptr: NonNull<u8>, layout: Layout) {
+ // SAFETY: The caller guarantees that `ptr` points at a valid allocation created by this
+ // allocator. We are passing a `Layout` with the smallest possible alignment, so it is
+ // smaller than or equal to the alignment previously used with this allocation.
+ let _ = unsafe { Self::realloc(Some(ptr), Layout::new::<()>(), layout, Flags(0)) };
+ }
+}
+
+/// Returns a properly aligned dangling pointer from the given `layout`.
+pub(crate) fn dangling_from_layout(layout: Layout) -> NonNull<u8> {
+ let ptr = layout.align() as *mut u8;
+
+ // SAFETY: `layout.align()` (and hence `ptr`) is guaranteed to be non-zero.
+ unsafe { NonNull::new_unchecked(ptr) }
+}
diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
new file mode 100644
index 000000000000..aa2dfa9dca4c
--- /dev/null
+++ b/rust/kernel/alloc/allocator.rs
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Allocator support.
+//!
+//! Documentation for the kernel's memory allocators can found in the "Memory Allocation Guide"
+//! linked below. For instance, this includes the concept of "get free page" (GFP) flags and the
+//! typical application of the different kernel allocators.
+//!
+//! Reference: <https://docs.kernel.org/core-api/memory-allocation.html>
+
+use super::Flags;
+use core::alloc::Layout;
+use core::ptr;
+use core::ptr::NonNull;
+
+use crate::alloc::{AllocError, Allocator};
+use crate::bindings;
+use crate::pr_warn;
+
+/// The contiguous kernel allocator.
+///
+/// `Kmalloc` is typically used for physically contiguous allocations up to page size, but also
+/// supports larger allocations up to `bindings::KMALLOC_MAX_SIZE`, which is hardware specific.
+///
+/// For more details see [self].
+pub struct Kmalloc;
+
+/// The virtually contiguous kernel allocator.
+///
+/// `Vmalloc` allocates pages from the page level allocator and maps them into the contiguous kernel
+/// virtual space. It is typically used for large allocations. The memory allocated with this
+/// allocator is not physically contiguous.
+///
+/// For more details see [self].
+pub struct Vmalloc;
+
+/// The kvmalloc kernel allocator.
+///
+/// `KVmalloc` attempts to allocate memory with `Kmalloc` first, but falls back to `Vmalloc` upon
+/// failure. This allocator is typically used when the size for the requested allocation is not
+/// known and may exceed the capabilities of `Kmalloc`.
+///
+/// For more details see [self].
+pub struct KVmalloc;
+
+/// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
+fn aligned_size(new_layout: Layout) -> usize {
+ // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
+ let layout = new_layout.pad_to_align();
+
+ // Note that `layout.size()` (after padding) is guaranteed to be a multiple of `layout.align()`
+ // which together with the slab guarantees means the `krealloc` will return a properly aligned
+ // object (see comments in `kmalloc()` for more information).
+ layout.size()
+}
+
+/// # Invariants
+///
+/// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
+struct ReallocFunc(
+ unsafe extern "C" fn(*const crate::ffi::c_void, usize, u32) -> *mut crate::ffi::c_void,
+);
+
+impl ReallocFunc {
+ // INVARIANT: `krealloc` satisfies the type invariants.
+ const KREALLOC: Self = Self(bindings::krealloc);
+
+ // INVARIANT: `vrealloc` satisfies the type invariants.
+ const VREALLOC: Self = Self(bindings::vrealloc);
+
+ // INVARIANT: `kvrealloc` satisfies the type invariants.
+ const KVREALLOC: Self = Self(bindings::kvrealloc);
+
+ /// # Safety
+ ///
+ /// This method has the same safety requirements as [`Allocator::realloc`].
+ ///
+ /// # Guarantees
+ ///
+ /// This method has the same guarantees as `Allocator::realloc`. Additionally
+ /// - it accepts any pointer to a valid memory allocation allocated by this function.
+ /// - memory allocated by this function remains valid until it is passed to this function.
+ #[inline]
+ unsafe fn call(
+ &self,
+ ptr: Option<NonNull<u8>>,
+ layout: Layout,
+ old_layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ let size = aligned_size(layout);
+ let ptr = match ptr {
+ Some(ptr) => {
+ if old_layout.size() == 0 {
+ ptr::null()
+ } else {
+ ptr.as_ptr()
+ }
+ }
+ None => ptr::null(),
+ };
+
+ // SAFETY:
+ // - `self.0` is one of `krealloc`, `vrealloc`, `kvrealloc` and thus only requires that
+ // `ptr` is NULL or valid.
+ // - `ptr` is either NULL or valid by the safety requirements of this function.
+ //
+ // GUARANTEE:
+ // - `self.0` is one of `krealloc`, `vrealloc`, `kvrealloc`.
+ // - Those functions provide the guarantees of this function.
+ let raw_ptr = unsafe {
+ // If `size == 0` and `ptr != NULL` the memory behind the pointer is freed.
+ self.0(ptr.cast(), size, flags.0).cast()
+ };
+
+ let ptr = if size == 0 {
+ crate::alloc::dangling_from_layout(layout)
+ } else {
+ NonNull::new(raw_ptr).ok_or(AllocError)?
+ };
+
+ Ok(NonNull::slice_from_raw_parts(ptr, size))
+ }
+}
+
+// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
+// - memory remains valid until it is explicitly freed,
+// - passing a pointer to a valid memory allocation is OK,
+// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same.
+unsafe impl Allocator for Kmalloc {
+ #[inline]
+ unsafe fn realloc(
+ ptr: Option<NonNull<u8>>,
+ layout: Layout,
+ old_layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`.
+ unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) }
+ }
+}
+
+// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
+// - memory remains valid until it is explicitly freed,
+// - passing a pointer to a valid memory allocation is OK,
+// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same.
+unsafe impl Allocator for Vmalloc {
+ #[inline]
+ unsafe fn realloc(
+ ptr: Option<NonNull<u8>>,
+ layout: Layout,
+ old_layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // TODO: Support alignments larger than PAGE_SIZE.
+ if layout.align() > bindings::PAGE_SIZE {
+ pr_warn!("Vmalloc does not support alignments larger than PAGE_SIZE yet.\n");
+ return Err(AllocError);
+ }
+
+ // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
+ // allocated with this `Allocator`.
+ unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags) }
+ }
+}
+
+// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
+// - memory remains valid until it is explicitly freed,
+// - passing a pointer to a valid memory allocation is OK,
+// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same.
+unsafe impl Allocator for KVmalloc {
+ #[inline]
+ unsafe fn realloc(
+ ptr: Option<NonNull<u8>>,
+ layout: Layout,
+ old_layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // TODO: Support alignments larger than PAGE_SIZE.
+ if layout.align() > bindings::PAGE_SIZE {
+ pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");
+ return Err(AllocError);
+ }
+
+ // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
+ // allocated with this `Allocator`.
+ unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags) }
+ }
+}
diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
new file mode 100644
index 000000000000..c37d4c0c64e9
--- /dev/null
+++ b/rust/kernel/alloc/allocator_test.rs
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! So far the kernel's `Box` and `Vec` types can't be used by userspace test cases, since all users
+//! of those types (e.g. `CString`) use kernel allocators for instantiation.
+//!
+//! In order to allow userspace test cases to make use of such types as well, implement the
+//! `Cmalloc` allocator within the allocator_test module and type alias all kernel allocators to
+//! `Cmalloc`. The `Cmalloc` allocator uses libc's `realloc()` function as allocator backend.
+
+#![allow(missing_docs)]
+
+use super::{flags::*, AllocError, Allocator, Flags};
+use core::alloc::Layout;
+use core::cmp;
+use core::ptr;
+use core::ptr::NonNull;
+
+/// The userspace allocator based on libc.
+pub struct Cmalloc;
+
+pub type Kmalloc = Cmalloc;
+pub type Vmalloc = Kmalloc;
+pub type KVmalloc = Kmalloc;
+
+extern "C" {
+ #[link_name = "aligned_alloc"]
+ fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void;
+
+ #[link_name = "free"]
+ fn libc_free(ptr: *mut crate::ffi::c_void);
+}
+
+// SAFETY:
+// - memory remains valid until it is explicitly freed,
+// - passing a pointer to a valid memory allocation created by this `Allocator` is always OK,
+// - `realloc` provides the guarantees as provided in the `# Guarantees` section.
+unsafe impl Allocator for Cmalloc {
+ unsafe fn realloc(
+ ptr: Option<NonNull<u8>>,
+ layout: Layout,
+ old_layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ let src = match ptr {
+ Some(src) => {
+ if old_layout.size() == 0 {
+ ptr::null_mut()
+ } else {
+ src.as_ptr()
+ }
+ }
+ None => ptr::null_mut(),
+ };
+
+ if layout.size() == 0 {
+ // SAFETY: `src` is either NULL or was previously allocated with this `Allocator`
+ unsafe { libc_free(src.cast()) };
+
+ return Ok(NonNull::slice_from_raw_parts(
+ crate::alloc::dangling_from_layout(layout),
+ 0,
+ ));
+ }
+
+ // ISO C (ISO/IEC 9899:2011) defines `aligned_alloc`:
+ //
+ // > The value of alignment shall be a valid alignment supported by the implementation
+ // [...].
+ //
+ // As an example of the "supported by the implementation" requirement, POSIX.1-2001 (IEEE
+ // 1003.1-2001) defines `posix_memalign`:
+ //
+ // > The value of alignment shall be a power of two multiple of sizeof (void *).
+ //
+ // and POSIX-based implementations of `aligned_alloc` inherit this requirement. At the time
+ // of writing, this is known to be the case on macOS (but not in glibc).
+ //
+ // Satisfy the stricter requirement to avoid spurious test failures on some platforms.
+ let min_align = core::mem::size_of::<*const crate::ffi::c_void>();
+ let layout = layout.align_to(min_align).map_err(|_| AllocError)?;
+ let layout = layout.pad_to_align();
+
+ // SAFETY: Returns either NULL or a pointer to a memory allocation that satisfies or
+ // exceeds the given size and alignment requirements.
+ let dst = unsafe { libc_aligned_alloc(layout.align(), layout.size()) } as *mut u8;
+ let dst = NonNull::new(dst).ok_or(AllocError)?;
+
+ if flags.contains(__GFP_ZERO) {
+ // SAFETY: The preceding calls to `libc_aligned_alloc` and `NonNull::new`
+ // guarantee that `dst` points to memory of at least `layout.size()` bytes.
+ unsafe { dst.as_ptr().write_bytes(0, layout.size()) };
+ }
+
+ if !src.is_null() {
+ // SAFETY:
+ // - `src` has previously been allocated with this `Allocator`; `dst` has just been
+ // newly allocated, hence the memory regions do not overlap.
+ // - both` src` and `dst` are properly aligned and valid for reads and writes
+ unsafe {
+ ptr::copy_nonoverlapping(
+ src,
+ dst.as_ptr(),
+ cmp::min(layout.size(), old_layout.size()),
+ )
+ };
+ }
+
+ // SAFETY: `src` is either NULL or was previously allocated with this `Allocator`
+ unsafe { libc_free(src.cast()) };
+
+ Ok(NonNull::slice_from_raw_parts(dst, layout.size()))
+ }
+}
diff --git a/rust/kernel/alloc/kbox.rs b/rust/kernel/alloc/kbox.rs
new file mode 100644
index 000000000000..b77d32f3a58b
--- /dev/null
+++ b/rust/kernel/alloc/kbox.rs
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Implementation of [`Box`].
+
+#[allow(unused_imports)] // Used in doc comments.
+use super::allocator::{KVmalloc, Kmalloc, Vmalloc};
+use super::{AllocError, Allocator, Flags};
+use core::alloc::Layout;
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem::ManuallyDrop;
+use core::mem::MaybeUninit;
+use core::ops::{Deref, DerefMut};
+use core::pin::Pin;
+use core::ptr::NonNull;
+use core::result::Result;
+
+use crate::init::InPlaceInit;
+use crate::types::ForeignOwnable;
+use pin_init::{InPlaceWrite, Init, PinInit, ZeroableOption};
+
+/// The kernel's [`Box`] type -- a heap allocation for a single value of type `T`.
+///
+/// This is the kernel's version of the Rust stdlib's `Box`. There are several differences,
+/// for example no `noalias` attribute is emitted and partially moving out of a `Box` is not
+/// supported. There are also several API differences, e.g. `Box` always requires an [`Allocator`]
+/// implementation to be passed as generic, page [`Flags`] when allocating memory and all functions
+/// that may allocate memory are fallible.
+///
+/// `Box` works with any of the kernel's allocators, e.g. [`Kmalloc`], [`Vmalloc`] or [`KVmalloc`].
+/// There are aliases for `Box` with these allocators ([`KBox`], [`VBox`], [`KVBox`]).
+///
+/// When dropping a [`Box`], the value is also dropped and the heap memory is automatically freed.
+///
+/// # Examples
+///
+/// ```
+/// let b = KBox::<u64>::new(24_u64, GFP_KERNEL)?;
+///
+/// assert_eq!(*b, 24_u64);
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// ```
+/// # use kernel::bindings;
+/// const SIZE: usize = bindings::KMALLOC_MAX_SIZE as usize + 1;
+/// struct Huge([u8; SIZE]);
+///
+/// assert!(KBox::<Huge>::new_uninit(GFP_KERNEL | __GFP_NOWARN).is_err());
+/// ```
+///
+/// ```
+/// # use kernel::bindings;
+/// const SIZE: usize = bindings::KMALLOC_MAX_SIZE as usize + 1;
+/// struct Huge([u8; SIZE]);
+///
+/// assert!(KVBox::<Huge>::new_uninit(GFP_KERNEL).is_ok());
+/// ```
+///
+/// # Invariants
+///
+/// `self.0` is always properly aligned and either points to memory allocated with `A` or, for
+/// zero-sized types, is a dangling, well aligned pointer.
+#[repr(transparent)]
+pub struct Box<T: ?Sized, A: Allocator>(NonNull<T>, PhantomData<A>);
+
+/// Type alias for [`Box`] with a [`Kmalloc`] allocator.
+///
+/// # Examples
+///
+/// ```
+/// let b = KBox::new(24_u64, GFP_KERNEL)?;
+///
+/// assert_eq!(*b, 24_u64);
+/// # Ok::<(), Error>(())
+/// ```
+pub type KBox<T> = Box<T, super::allocator::Kmalloc>;
+
+/// Type alias for [`Box`] with a [`Vmalloc`] allocator.
+///
+/// # Examples
+///
+/// ```
+/// let b = VBox::new(24_u64, GFP_KERNEL)?;
+///
+/// assert_eq!(*b, 24_u64);
+/// # Ok::<(), Error>(())
+/// ```
+pub type VBox<T> = Box<T, super::allocator::Vmalloc>;
+
+/// Type alias for [`Box`] with a [`KVmalloc`] allocator.
+///
+/// # Examples
+///
+/// ```
+/// let b = KVBox::new(24_u64, GFP_KERNEL)?;
+///
+/// assert_eq!(*b, 24_u64);
+/// # Ok::<(), Error>(())
+/// ```
+pub type KVBox<T> = Box<T, super::allocator::KVmalloc>;
+
+// SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee:
+// https://doc.rust-lang.org/stable/std/option/index.html#representation).
+unsafe impl<T, A: Allocator> ZeroableOption for Box<T, A> {}
+
+// SAFETY: `Box` is `Send` if `T` is `Send` because the `Box` owns a `T`.
+unsafe impl<T, A> Send for Box<T, A>
+where
+ T: Send + ?Sized,
+ A: Allocator,
+{
+}
+
+// SAFETY: `Box` is `Sync` if `T` is `Sync` because the `Box` owns a `T`.
+unsafe impl<T, A> Sync for Box<T, A>
+where
+ T: Sync + ?Sized,
+ A: Allocator,
+{
+}
+
+impl<T, A> Box<T, A>
+where
+ T: ?Sized,
+ A: Allocator,
+{
+ /// Creates a new `Box<T, A>` from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// For non-ZSTs, `raw` must point at an allocation allocated with `A` that is sufficiently
+ /// aligned for and holds a valid `T`. The caller passes ownership of the allocation to the
+ /// `Box`.
+ ///
+ /// For ZSTs, `raw` must be a dangling, well aligned pointer.
+ #[inline]
+ pub const unsafe fn from_raw(raw: *mut T) -> Self {
+ // INVARIANT: Validity of `raw` is guaranteed by the safety preconditions of this function.
+ // SAFETY: By the safety preconditions of this function, `raw` is not a NULL pointer.
+ Self(unsafe { NonNull::new_unchecked(raw) }, PhantomData)
+ }
+
+ /// Consumes the `Box<T, A>` and returns a raw pointer.
+ ///
+ /// This will not run the destructor of `T` and for non-ZSTs the allocation will stay alive
+ /// indefinitely. Use [`Box::from_raw`] to recover the [`Box`], drop the value and free the
+ /// allocation, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = KBox::new(24, GFP_KERNEL)?;
+ /// let ptr = KBox::into_raw(x);
+ /// // SAFETY: `ptr` comes from a previous call to `KBox::into_raw`.
+ /// let x = unsafe { KBox::from_raw(ptr) };
+ ///
+ /// assert_eq!(*x, 24);
+ /// # Ok::<(), Error>(())
+ /// ```
+ #[inline]
+ pub fn into_raw(b: Self) -> *mut T {
+ ManuallyDrop::new(b).0.as_ptr()
+ }
+
+ /// Consumes and leaks the `Box<T, A>` and returns a mutable reference.
+ ///
+ /// See [`Box::into_raw`] for more details.
+ #[inline]
+ pub fn leak<'a>(b: Self) -> &'a mut T {
+ // SAFETY: `Box::into_raw` always returns a properly aligned and dereferenceable pointer
+ // which points to an initialized instance of `T`.
+ unsafe { &mut *Box::into_raw(b) }
+ }
+}
+
+impl<T, A> Box<MaybeUninit<T>, A>
+where
+ A: Allocator,
+{
+ /// Converts a `Box<MaybeUninit<T>, A>` to a `Box<T, A>`.
+ ///
+ /// It is undefined behavior to call this function while the value inside of `b` is not yet
+ /// fully initialized.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the value inside of `b` is in an initialized state.
+ pub unsafe fn assume_init(self) -> Box<T, A> {
+ let raw = Self::into_raw(self);
+
+ // SAFETY: `raw` comes from a previous call to `Box::into_raw`. By the safety requirements
+ // of this function, the value inside the `Box` is in an initialized state. Hence, it is
+ // safe to reconstruct the `Box` as `Box<T, A>`.
+ unsafe { Box::from_raw(raw.cast()) }
+ }
+
+ /// Writes the value and converts to `Box<T, A>`.
+ pub fn write(mut self, value: T) -> Box<T, A> {
+ (*self).write(value);
+
+ // SAFETY: We've just initialized `b`'s value.
+ unsafe { self.assume_init() }
+ }
+}
+
+impl<T, A> Box<T, A>
+where
+ A: Allocator,
+{
+ /// Creates a new `Box<T, A>` and initializes its contents with `x`.
+ ///
+ /// New memory is allocated with `A`. The allocation may fail, in which case an error is
+ /// returned. For ZSTs no memory is allocated.
+ pub fn new(x: T, flags: Flags) -> Result<Self, AllocError> {
+ let b = Self::new_uninit(flags)?;
+ Ok(Box::write(b, x))
+ }
+
+ /// Creates a new `Box<T, A>` with uninitialized contents.
+ ///
+ /// New memory is allocated with `A`. The allocation may fail, in which case an error is
+ /// returned. For ZSTs no memory is allocated.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let b = KBox::<u64>::new_uninit(GFP_KERNEL)?;
+ /// let b = KBox::write(b, 24);
+ ///
+ /// assert_eq!(*b, 24_u64);
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>, A>, AllocError> {
+ let layout = Layout::new::<MaybeUninit<T>>();
+ let ptr = A::alloc(layout, flags)?;
+
+ // INVARIANT: `ptr` is either a dangling pointer or points to memory allocated with `A`,
+ // which is sufficient in size and alignment for storing a `T`.
+ Ok(Box(ptr.cast(), PhantomData))
+ }
+
+ /// Constructs a new `Pin<Box<T, A>>`. If `T` does not implement [`Unpin`], then `x` will be
+ /// pinned in memory and can't be moved.
+ #[inline]
+ pub fn pin(x: T, flags: Flags) -> Result<Pin<Box<T, A>>, AllocError>
+ where
+ A: 'static,
+ {
+ Ok(Self::new(x, flags)?.into())
+ }
+
+ /// Convert a [`Box<T,A>`] to a [`Pin<Box<T,A>>`]. If `T` does not implement
+ /// [`Unpin`], then `x` will be pinned in memory and can't be moved.
+ pub fn into_pin(this: Self) -> Pin<Self> {
+ this.into()
+ }
+
+ /// Forgets the contents (does not run the destructor), but keeps the allocation.
+ fn forget_contents(this: Self) -> Box<MaybeUninit<T>, A> {
+ let ptr = Self::into_raw(this);
+
+ // SAFETY: `ptr` is valid, because it came from `Box::into_raw`.
+ unsafe { Box::from_raw(ptr.cast()) }
+ }
+
+ /// Drops the contents, but keeps the allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = KBox::new([0; 32], GFP_KERNEL)?;
+ /// assert_eq!(*value, [0; 32]);
+ /// let value = KBox::drop_contents(value);
+ /// // Now we can re-use `value`:
+ /// let value = KBox::write(value, [1; 32]);
+ /// assert_eq!(*value, [1; 32]);
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn drop_contents(this: Self) -> Box<MaybeUninit<T>, A> {
+ let ptr = this.0.as_ptr();
+
+ // SAFETY: `ptr` is valid, because it came from `this`. After this call we never access the
+ // value stored in `this` again.
+ unsafe { core::ptr::drop_in_place(ptr) };
+
+ Self::forget_contents(this)
+ }
+
+ /// Moves the `Box`'s value out of the `Box` and consumes the `Box`.
+ pub fn into_inner(b: Self) -> T {
+ // SAFETY: By the type invariant `&*b` is valid for `read`.
+ let value = unsafe { core::ptr::read(&*b) };
+ let _ = Self::forget_contents(b);
+ value
+ }
+}
+
+impl<T, A> From<Box<T, A>> for Pin<Box<T, A>>
+where
+ T: ?Sized,
+ A: Allocator,
+{
+ /// Converts a `Box<T, A>` into a `Pin<Box<T, A>>`. If `T` does not implement [`Unpin`], then
+ /// `*b` will be pinned in memory and can't be moved.
+ ///
+ /// This moves `b` into `Pin` without moving `*b` or allocating and copying any memory.
+ fn from(b: Box<T, A>) -> Self {
+ // SAFETY: The value wrapped inside a `Pin<Box<T, A>>` cannot be moved or replaced as long
+ // as `T` does not implement `Unpin`.
+ unsafe { Pin::new_unchecked(b) }
+ }
+}
+
+impl<T, A> InPlaceWrite<T> for Box<MaybeUninit<T>, A>
+where
+ A: Allocator + 'static,
+{
+ type Initialized = Box<T, A>;
+
+ fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid.
+ unsafe { init.__init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { Box::assume_init(self) })
+ }
+
+ fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid and will not be moved, because we pin it later.
+ unsafe { init.__pinned_init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { Box::assume_init(self) }.into())
+ }
+}
+
+impl<T, A> InPlaceInit<T> for Box<T, A>
+where
+ A: Allocator + 'static,
+{
+ type PinnedSelf = Pin<Self>;
+
+ #[inline]
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>
+ where
+ E: From<AllocError>,
+ {
+ Box::<_, A>::new_uninit(flags)?.write_pin_init(init)
+ }
+
+ #[inline]
+ fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ Box::<_, A>::new_uninit(flags)?.write_init(init)
+ }
+}
+
+impl<T: 'static, A> ForeignOwnable for Box<T, A>
+where
+ A: Allocator,
+{
+ type Borrowed<'a> = &'a T;
+ type BorrowedMut<'a> = &'a mut T;
+
+ fn into_foreign(self) -> *mut crate::ffi::c_void {
+ Box::into_raw(self).cast()
+ }
+
+ unsafe fn from_foreign(ptr: *mut crate::ffi::c_void) -> Self {
+ // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
+ // call to `Self::into_foreign`.
+ unsafe { Box::from_raw(ptr.cast()) }
+ }
+
+ unsafe fn borrow<'a>(ptr: *mut crate::ffi::c_void) -> &'a T {
+ // SAFETY: The safety requirements of this method ensure that the object remains alive and
+ // immutable for the duration of 'a.
+ unsafe { &*ptr.cast() }
+ }
+
+ unsafe fn borrow_mut<'a>(ptr: *mut crate::ffi::c_void) -> &'a mut T {
+ let ptr = ptr.cast();
+ // SAFETY: The safety requirements of this method ensure that the pointer is valid and that
+ // nothing else will access the value for the duration of 'a.
+ unsafe { &mut *ptr }
+ }
+}
+
+impl<T: 'static, A> ForeignOwnable for Pin<Box<T, A>>
+where
+ A: Allocator,
+{
+ type Borrowed<'a> = Pin<&'a T>;
+ type BorrowedMut<'a> = Pin<&'a mut T>;
+
+ fn into_foreign(self) -> *mut crate::ffi::c_void {
+ // SAFETY: We are still treating the box as pinned.
+ Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }).cast()
+ }
+
+ unsafe fn from_foreign(ptr: *mut crate::ffi::c_void) -> Self {
+ // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
+ // call to `Self::into_foreign`.
+ unsafe { Pin::new_unchecked(Box::from_raw(ptr.cast())) }
+ }
+
+ unsafe fn borrow<'a>(ptr: *mut crate::ffi::c_void) -> Pin<&'a T> {
+ // SAFETY: The safety requirements for this function ensure that the object is still alive,
+ // so it is safe to dereference the raw pointer.
+ // The safety requirements of `from_foreign` also ensure that the object remains alive for
+ // the lifetime of the returned value.
+ let r = unsafe { &*ptr.cast() };
+
+ // SAFETY: This pointer originates from a `Pin<Box<T>>`.
+ unsafe { Pin::new_unchecked(r) }
+ }
+
+ unsafe fn borrow_mut<'a>(ptr: *mut crate::ffi::c_void) -> Pin<&'a mut T> {
+ let ptr = ptr.cast();
+ // SAFETY: The safety requirements for this function ensure that the object is still alive,
+ // so it is safe to dereference the raw pointer.
+ // The safety requirements of `from_foreign` also ensure that the object remains alive for
+ // the lifetime of the returned value.
+ let r = unsafe { &mut *ptr };
+
+ // SAFETY: This pointer originates from a `Pin<Box<T>>`.
+ unsafe { Pin::new_unchecked(r) }
+ }
+}
+
+impl<T, A> Deref for Box<T, A>
+where
+ T: ?Sized,
+ A: Allocator,
+{
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: `self.0` is always properly aligned, dereferenceable and points to an initialized
+ // instance of `T`.
+ unsafe { self.0.as_ref() }
+ }
+}
+
+impl<T, A> DerefMut for Box<T, A>
+where
+ T: ?Sized,
+ A: Allocator,
+{
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: `self.0` is always properly aligned, dereferenceable and points to an initialized
+ // instance of `T`.
+ unsafe { self.0.as_mut() }
+ }
+}
+
+impl<T, A> fmt::Display for Box<T, A>
+where
+ T: ?Sized + fmt::Display,
+ A: Allocator,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ <T as fmt::Display>::fmt(&**self, f)
+ }
+}
+
+impl<T, A> fmt::Debug for Box<T, A>
+where
+ T: ?Sized + fmt::Debug,
+ A: Allocator,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ <T as fmt::Debug>::fmt(&**self, f)
+ }
+}
+
+impl<T, A> Drop for Box<T, A>
+where
+ T: ?Sized,
+ A: Allocator,
+{
+ fn drop(&mut self) {
+ let layout = Layout::for_value::<T>(self);
+
+ // SAFETY: The pointer in `self.0` is guaranteed to be valid by the type invariant.
+ unsafe { core::ptr::drop_in_place::<T>(self.deref_mut()) };
+
+ // SAFETY:
+ // - `self.0` was previously allocated with `A`.
+ // - `layout` is equal to the `Layout´ `self.0` was allocated with.
+ unsafe { A::free(self.0.cast(), layout) };
+ }
+}
diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
new file mode 100644
index 000000000000..87a71fd40c3c
--- /dev/null
+++ b/rust/kernel/alloc/kvec.rs
@@ -0,0 +1,916 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Implementation of [`Vec`].
+
+// May not be needed in Rust 1.87.0 (pending beta backport).
+#![allow(clippy::ptr_eq)]
+
+use super::{
+ allocator::{KVmalloc, Kmalloc, Vmalloc},
+ layout::ArrayLayout,
+ AllocError, Allocator, Box, Flags,
+};
+use core::{
+ fmt,
+ marker::PhantomData,
+ mem::{ManuallyDrop, MaybeUninit},
+ ops::Deref,
+ ops::DerefMut,
+ ops::Index,
+ ops::IndexMut,
+ ptr,
+ ptr::NonNull,
+ slice,
+ slice::SliceIndex,
+};
+
+/// Create a [`KVec`] containing the arguments.
+///
+/// New memory is allocated with `GFP_KERNEL`.
+///
+/// # Examples
+///
+/// ```
+/// let mut v = kernel::kvec![];
+/// v.push(1, GFP_KERNEL)?;
+/// assert_eq!(v, [1]);
+///
+/// let mut v = kernel::kvec![1; 3]?;
+/// v.push(4, GFP_KERNEL)?;
+/// assert_eq!(v, [1, 1, 1, 4]);
+///
+/// let mut v = kernel::kvec![1, 2, 3]?;
+/// v.push(4, GFP_KERNEL)?;
+/// assert_eq!(v, [1, 2, 3, 4]);
+///
+/// # Ok::<(), Error>(())
+/// ```
+#[macro_export]
+macro_rules! kvec {
+ () => (
+ $crate::alloc::KVec::new()
+ );
+ ($elem:expr; $n:expr) => (
+ $crate::alloc::KVec::from_elem($elem, $n, GFP_KERNEL)
+ );
+ ($($x:expr),+ $(,)?) => (
+ match $crate::alloc::KBox::new_uninit(GFP_KERNEL) {
+ Ok(b) => Ok($crate::alloc::KVec::from($crate::alloc::KBox::write(b, [$($x),+]))),
+ Err(e) => Err(e),
+ }
+ );
+}
+
+/// The kernel's [`Vec`] type.
+///
+/// A contiguous growable array type with contents allocated with the kernel's allocators (e.g.
+/// [`Kmalloc`], [`Vmalloc`] or [`KVmalloc`]), written `Vec<T, A>`.
+///
+/// For non-zero-sized values, a [`Vec`] will use the given allocator `A` for its allocation. For
+/// the most common allocators the type aliases [`KVec`], [`VVec`] and [`KVVec`] exist.
+///
+/// For zero-sized types the [`Vec`]'s pointer must be `dangling_mut::<T>`; no memory is allocated.
+///
+/// Generally, [`Vec`] consists of a pointer that represents the vector's backing buffer, the
+/// capacity of the vector (the number of elements that currently fit into the vector), its length
+/// (the number of elements that are currently stored in the vector) and the `Allocator` type used
+/// to allocate (and free) the backing buffer.
+///
+/// A [`Vec`] can be deconstructed into and (re-)constructed from its previously named raw parts
+/// and manually modified.
+///
+/// [`Vec`]'s backing buffer gets, if required, automatically increased (re-allocated) when elements
+/// are added to the vector.
+///
+/// # Invariants
+///
+/// - `self.ptr` is always properly aligned and either points to memory allocated with `A` or, for
+/// zero-sized types, is a dangling, well aligned pointer.
+///
+/// - `self.len` always represents the exact number of elements stored in the vector.
+///
+/// - `self.layout` represents the absolute number of elements that can be stored within the vector
+/// without re-allocation. For ZSTs `self.layout`'s capacity is zero. However, it is legal for the
+/// backing buffer to be larger than `layout`.
+///
+/// - The `Allocator` type `A` of the vector is the exact same `Allocator` type the backing buffer
+/// was allocated with (and must be freed with).
+pub struct Vec<T, A: Allocator> {
+ ptr: NonNull<T>,
+ /// Represents the actual buffer size as `cap` times `size_of::<T>` bytes.
+ ///
+ /// Note: This isn't quite the same as `Self::capacity`, which in contrast returns the number of
+ /// elements we can still store without reallocating.
+ layout: ArrayLayout<T>,
+ len: usize,
+ _p: PhantomData<A>,
+}
+
+/// Type alias for [`Vec`] with a [`Kmalloc`] allocator.
+///
+/// # Examples
+///
+/// ```
+/// let mut v = KVec::new();
+/// v.push(1, GFP_KERNEL)?;
+/// assert_eq!(&v, &[1]);
+///
+/// # Ok::<(), Error>(())
+/// ```
+pub type KVec<T> = Vec<T, Kmalloc>;
+
+/// Type alias for [`Vec`] with a [`Vmalloc`] allocator.
+///
+/// # Examples
+///
+/// ```
+/// let mut v = VVec::new();
+/// v.push(1, GFP_KERNEL)?;
+/// assert_eq!(&v, &[1]);
+///
+/// # Ok::<(), Error>(())
+/// ```
+pub type VVec<T> = Vec<T, Vmalloc>;
+
+/// Type alias for [`Vec`] with a [`KVmalloc`] allocator.
+///
+/// # Examples
+///
+/// ```
+/// let mut v = KVVec::new();
+/// v.push(1, GFP_KERNEL)?;
+/// assert_eq!(&v, &[1]);
+///
+/// # Ok::<(), Error>(())
+/// ```
+pub type KVVec<T> = Vec<T, KVmalloc>;
+
+// SAFETY: `Vec` is `Send` if `T` is `Send` because `Vec` owns its elements.
+unsafe impl<T, A> Send for Vec<T, A>
+where
+ T: Send,
+ A: Allocator,
+{
+}
+
+// SAFETY: `Vec` is `Sync` if `T` is `Sync` because `Vec` owns its elements.
+unsafe impl<T, A> Sync for Vec<T, A>
+where
+ T: Sync,
+ A: Allocator,
+{
+}
+
+impl<T, A> Vec<T, A>
+where
+ A: Allocator,
+{
+ #[inline]
+ const fn is_zst() -> bool {
+ core::mem::size_of::<T>() == 0
+ }
+
+ /// Returns the number of elements that can be stored within the vector without allocating
+ /// additional memory.
+ pub fn capacity(&self) -> usize {
+ if const { Self::is_zst() } {
+ usize::MAX
+ } else {
+ self.layout.len()
+ }
+ }
+
+ /// Returns the number of elements stored within the vector.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Forcefully sets `self.len` to `new_len`.
+ ///
+ /// # Safety
+ ///
+ /// - `new_len` must be less than or equal to [`Self::capacity`].
+ /// - If `new_len` is greater than `self.len`, all elements within the interval
+ /// [`self.len`,`new_len`) must be initialized.
+ #[inline]
+ pub unsafe fn set_len(&mut self, new_len: usize) {
+ debug_assert!(new_len <= self.capacity());
+ self.len = new_len;
+ }
+
+ /// Returns a slice of the entire vector.
+ #[inline]
+ pub fn as_slice(&self) -> &[T] {
+ self
+ }
+
+ /// Returns a mutable slice of the entire vector.
+ #[inline]
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ self
+ }
+
+ /// Returns a mutable raw pointer to the vector's backing buffer, or, if `T` is a ZST, a
+ /// dangling raw pointer.
+ #[inline]
+ pub fn as_mut_ptr(&mut self) -> *mut T {
+ self.ptr.as_ptr()
+ }
+
+ /// Returns a raw pointer to the vector's backing buffer, or, if `T` is a ZST, a dangling raw
+ /// pointer.
+ #[inline]
+ pub fn as_ptr(&self) -> *const T {
+ self.ptr.as_ptr()
+ }
+
+ /// Returns `true` if the vector contains no elements, `false` otherwise.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = KVec::new();
+ /// assert!(v.is_empty());
+ ///
+ /// v.push(1, GFP_KERNEL);
+ /// assert!(!v.is_empty());
+ /// ```
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Creates a new, empty `Vec<T, A>`.
+ ///
+ /// This method does not allocate by itself.
+ #[inline]
+ pub const fn new() -> Self {
+ // INVARIANT: Since this is a new, empty `Vec` with no backing memory yet,
+ // - `ptr` is a properly aligned dangling pointer for type `T`,
+ // - `layout` is an empty `ArrayLayout` (zero capacity)
+ // - `len` is zero, since no elements can be or have been stored,
+ // - `A` is always valid.
+ Self {
+ ptr: NonNull::dangling(),
+ layout: ArrayLayout::empty(),
+ len: 0,
+ _p: PhantomData::<A>,
+ }
+ }
+
+ /// Returns a slice of `MaybeUninit<T>` for the remaining spare capacity of the vector.
+ pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<T>] {
+ // SAFETY:
+ // - `self.len` is smaller than `self.capacity` and hence, the resulting pointer is
+ // guaranteed to be part of the same allocated object.
+ // - `self.len` can not overflow `isize`.
+ let ptr = unsafe { self.as_mut_ptr().add(self.len) } as *mut MaybeUninit<T>;
+
+ // SAFETY: The memory between `self.len` and `self.capacity` is guaranteed to be allocated
+ // and valid, but uninitialized.
+ unsafe { slice::from_raw_parts_mut(ptr, self.capacity() - self.len) }
+ }
+
+ /// Appends an element to the back of the [`Vec`] instance.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = KVec::new();
+ /// v.push(1, GFP_KERNEL)?;
+ /// assert_eq!(&v, &[1]);
+ ///
+ /// v.push(2, GFP_KERNEL)?;
+ /// assert_eq!(&v, &[1, 2]);
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn push(&mut self, v: T, flags: Flags) -> Result<(), AllocError> {
+ self.reserve(1, flags)?;
+
+ // SAFETY:
+ // - `self.len` is smaller than `self.capacity` and hence, the resulting pointer is
+ // guaranteed to be part of the same allocated object.
+ // - `self.len` can not overflow `isize`.
+ let ptr = unsafe { self.as_mut_ptr().add(self.len) };
+
+ // SAFETY:
+ // - `ptr` is properly aligned and valid for writes.
+ unsafe { core::ptr::write(ptr, v) };
+
+ // SAFETY: We just initialised the first spare entry, so it is safe to increase the length
+ // by 1. We also know that the new length is <= capacity because of the previous call to
+ // `reserve` above.
+ unsafe { self.set_len(self.len() + 1) };
+ Ok(())
+ }
+
+ /// Creates a new [`Vec`] instance with at least the given capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = KVec::<u32>::with_capacity(20, GFP_KERNEL)?;
+ ///
+ /// assert!(v.capacity() >= 20);
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn with_capacity(capacity: usize, flags: Flags) -> Result<Self, AllocError> {
+ let mut v = Vec::new();
+
+ v.reserve(capacity, flags)?;
+
+ Ok(v)
+ }
+
+ /// Creates a `Vec<T, A>` from a pointer, a length and a capacity using the allocator `A`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = kernel::kvec![1, 2, 3]?;
+ /// v.reserve(1, GFP_KERNEL)?;
+ ///
+ /// let (mut ptr, mut len, cap) = v.into_raw_parts();
+ ///
+ /// // SAFETY: We've just reserved memory for another element.
+ /// unsafe { ptr.add(len).write(4) };
+ /// len += 1;
+ ///
+ /// // SAFETY: We only wrote an additional element at the end of the `KVec`'s buffer and
+ /// // correspondingly increased the length of the `KVec` by one. Otherwise, we construct it
+ /// // from the exact same raw parts.
+ /// let v = unsafe { KVec::from_raw_parts(ptr, len, cap) };
+ ///
+ /// assert_eq!(v, [1, 2, 3, 4]);
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// If `T` is a ZST:
+ ///
+ /// - `ptr` must be a dangling, well aligned pointer.
+ ///
+ /// Otherwise:
+ ///
+ /// - `ptr` must have been allocated with the allocator `A`.
+ /// - `ptr` must satisfy or exceed the alignment requirements of `T`.
+ /// - `ptr` must point to memory with a size of at least `size_of::<T>() * capacity` bytes.
+ /// - The allocated size in bytes must not be larger than `isize::MAX`.
+ /// - `length` must be less than or equal to `capacity`.
+ /// - The first `length` elements must be initialized values of type `T`.
+ ///
+ /// It is also valid to create an empty `Vec` passing a dangling pointer for `ptr` and zero for
+ /// `cap` and `len`.
+ pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self {
+ let layout = if Self::is_zst() {
+ ArrayLayout::empty()
+ } else {
+ // SAFETY: By the safety requirements of this function, `capacity * size_of::<T>()` is
+ // smaller than `isize::MAX`.
+ unsafe { ArrayLayout::new_unchecked(capacity) }
+ };
+
+ // INVARIANT: For ZSTs, we store an empty `ArrayLayout`, all other type invariants are
+ // covered by the safety requirements of this function.
+ Self {
+ // SAFETY: By the safety requirements, `ptr` is either dangling or pointing to a valid
+ // memory allocation, allocated with `A`.
+ ptr: unsafe { NonNull::new_unchecked(ptr) },
+ layout,
+ len: length,
+ _p: PhantomData::<A>,
+ }
+ }
+
+ /// Consumes the `Vec<T, A>` and returns its raw components `pointer`, `length` and `capacity`.
+ ///
+ /// This will not run the destructor of the contained elements and for non-ZSTs the allocation
+ /// will stay alive indefinitely. Use [`Vec::from_raw_parts`] to recover the [`Vec`], drop the
+ /// elements and free the allocation, if any.
+ pub fn into_raw_parts(self) -> (*mut T, usize, usize) {
+ let mut me = ManuallyDrop::new(self);
+ let len = me.len();
+ let capacity = me.capacity();
+ let ptr = me.as_mut_ptr();
+ (ptr, len, capacity)
+ }
+
+ /// Ensures that the capacity exceeds the length by at least `additional` elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = KVec::new();
+ /// v.push(1, GFP_KERNEL)?;
+ ///
+ /// v.reserve(10, GFP_KERNEL)?;
+ /// let cap = v.capacity();
+ /// assert!(cap >= 10);
+ ///
+ /// v.reserve(10, GFP_KERNEL)?;
+ /// let new_cap = v.capacity();
+ /// assert_eq!(new_cap, cap);
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError> {
+ let len = self.len();
+ let cap = self.capacity();
+
+ if cap - len >= additional {
+ return Ok(());
+ }
+
+ if Self::is_zst() {
+ // The capacity is already `usize::MAX` for ZSTs, we can't go higher.
+ return Err(AllocError);
+ }
+
+ // We know that `cap <= isize::MAX` because of the type invariants of `Self`. So the
+ // multiplication by two won't overflow.
+ let new_cap = core::cmp::max(cap * 2, len.checked_add(additional).ok_or(AllocError)?);
+ let layout = ArrayLayout::new(new_cap).map_err(|_| AllocError)?;
+
+ // SAFETY:
+ // - `ptr` is valid because it's either `None` or comes from a previous call to
+ // `A::realloc`.
+ // - `self.layout` matches the `ArrayLayout` of the preceding allocation.
+ let ptr = unsafe {
+ A::realloc(
+ Some(self.ptr.cast()),
+ layout.into(),
+ self.layout.into(),
+ flags,
+ )?
+ };
+
+ // INVARIANT:
+ // - `layout` is some `ArrayLayout::<T>`,
+ // - `ptr` has been created by `A::realloc` from `layout`.
+ self.ptr = ptr.cast();
+ self.layout = layout;
+
+ Ok(())
+ }
+}
+
+impl<T: Clone, A: Allocator> Vec<T, A> {
+ /// Extend the vector by `n` clones of `value`.
+ pub fn extend_with(&mut self, n: usize, value: T, flags: Flags) -> Result<(), AllocError> {
+ if n == 0 {
+ return Ok(());
+ }
+
+ self.reserve(n, flags)?;
+
+ let spare = self.spare_capacity_mut();
+
+ for item in spare.iter_mut().take(n - 1) {
+ item.write(value.clone());
+ }
+
+ // We can write the last element directly without cloning needlessly.
+ spare[n - 1].write(value);
+
+ // SAFETY:
+ // - `self.len() + n < self.capacity()` due to the call to reserve above,
+ // - the loop and the line above initialized the next `n` elements.
+ unsafe { self.set_len(self.len() + n) };
+
+ Ok(())
+ }
+
+ /// Pushes clones of the elements of slice into the [`Vec`] instance.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = KVec::new();
+ /// v.push(1, GFP_KERNEL)?;
+ ///
+ /// v.extend_from_slice(&[20, 30, 40], GFP_KERNEL)?;
+ /// assert_eq!(&v, &[1, 20, 30, 40]);
+ ///
+ /// v.extend_from_slice(&[50, 60], GFP_KERNEL)?;
+ /// assert_eq!(&v, &[1, 20, 30, 40, 50, 60]);
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn extend_from_slice(&mut self, other: &[T], flags: Flags) -> Result<(), AllocError> {
+ self.reserve(other.len(), flags)?;
+ for (slot, item) in core::iter::zip(self.spare_capacity_mut(), other) {
+ slot.write(item.clone());
+ }
+
+ // SAFETY:
+ // - `other.len()` spare entries have just been initialized, so it is safe to increase
+ // the length by the same number.
+ // - `self.len() + other.len() <= self.capacity()` is guaranteed by the preceding `reserve`
+ // call.
+ unsafe { self.set_len(self.len() + other.len()) };
+ Ok(())
+ }
+
+ /// Create a new `Vec<T, A>` and extend it by `n` clones of `value`.
+ pub fn from_elem(value: T, n: usize, flags: Flags) -> Result<Self, AllocError> {
+ let mut v = Self::with_capacity(n, flags)?;
+
+ v.extend_with(n, value, flags)?;
+
+ Ok(v)
+ }
+}
+
+impl<T, A> Drop for Vec<T, A>
+where
+ A: Allocator,
+{
+ fn drop(&mut self) {
+ // SAFETY: `self.as_mut_ptr` is guaranteed to be valid by the type invariant.
+ unsafe {
+ ptr::drop_in_place(core::ptr::slice_from_raw_parts_mut(
+ self.as_mut_ptr(),
+ self.len,
+ ))
+ };
+
+ // SAFETY:
+ // - `self.ptr` was previously allocated with `A`.
+ // - `self.layout` matches the `ArrayLayout` of the preceding allocation.
+ unsafe { A::free(self.ptr.cast(), self.layout.into()) };
+ }
+}
+
+impl<T, A, const N: usize> From<Box<[T; N], A>> for Vec<T, A>
+where
+ A: Allocator,
+{
+ fn from(b: Box<[T; N], A>) -> Vec<T, A> {
+ let len = b.len();
+ let ptr = Box::into_raw(b);
+
+ // SAFETY:
+ // - `b` has been allocated with `A`,
+ // - `ptr` fulfills the alignment requirements for `T`,
+ // - `ptr` points to memory with at least a size of `size_of::<T>() * len`,
+ // - all elements within `b` are initialized values of `T`,
+ // - `len` does not exceed `isize::MAX`.
+ unsafe { Vec::from_raw_parts(ptr as _, len, len) }
+ }
+}
+
+impl<T> Default for KVec<T> {
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Vec<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<T, A> Deref for Vec<T, A>
+where
+ A: Allocator,
+{
+ type Target = [T];
+
+ #[inline]
+ fn deref(&self) -> &[T] {
+ // SAFETY: The memory behind `self.as_ptr()` is guaranteed to contain `self.len`
+ // initialized elements of type `T`.
+ unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
+ }
+}
+
+impl<T, A> DerefMut for Vec<T, A>
+where
+ A: Allocator,
+{
+ #[inline]
+ fn deref_mut(&mut self) -> &mut [T] {
+ // SAFETY: The memory behind `self.as_ptr()` is guaranteed to contain `self.len`
+ // initialized elements of type `T`.
+ unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
+ }
+}
+
+impl<T: Eq, A> Eq for Vec<T, A> where A: Allocator {}
+
+impl<T, I: SliceIndex<[T]>, A> Index<I> for Vec<T, A>
+where
+ A: Allocator,
+{
+ type Output = I::Output;
+
+ #[inline]
+ fn index(&self, index: I) -> &Self::Output {
+ Index::index(&**self, index)
+ }
+}
+
+impl<T, I: SliceIndex<[T]>, A> IndexMut<I> for Vec<T, A>
+where
+ A: Allocator,
+{
+ #[inline]
+ fn index_mut(&mut self, index: I) -> &mut Self::Output {
+ IndexMut::index_mut(&mut **self, index)
+ }
+}
+
+macro_rules! impl_slice_eq {
+ ($([$($vars:tt)*] $lhs:ty, $rhs:ty,)*) => {
+ $(
+ impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs
+ where
+ T: PartialEq<U>,
+ {
+ #[inline]
+ fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
+ }
+ )*
+ }
+}
+
+impl_slice_eq! {
+ [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2>,
+ [A: Allocator] Vec<T, A>, &[U],
+ [A: Allocator] Vec<T, A>, &mut [U],
+ [A: Allocator] &[T], Vec<U, A>,
+ [A: Allocator] &mut [T], Vec<U, A>,
+ [A: Allocator] Vec<T, A>, [U],
+ [A: Allocator] [T], Vec<U, A>,
+ [A: Allocator, const N: usize] Vec<T, A>, [U; N],
+ [A: Allocator, const N: usize] Vec<T, A>, &[U; N],
+}
+
+impl<'a, T, A> IntoIterator for &'a Vec<T, A>
+where
+ A: Allocator,
+{
+ type Item = &'a T;
+ type IntoIter = slice::Iter<'a, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A>
+where
+ A: Allocator,
+{
+ type Item = &'a mut T;
+ type IntoIter = slice::IterMut<'a, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+/// An [`Iterator`] implementation for [`Vec`] that moves elements out of a vector.
+///
+/// This structure is created by the [`Vec::into_iter`] method on [`Vec`] (provided by the
+/// [`IntoIterator`] trait).
+///
+/// # Examples
+///
+/// ```
+/// let v = kernel::kvec![0, 1, 2]?;
+/// let iter = v.into_iter();
+///
+/// # Ok::<(), Error>(())
+/// ```
+pub struct IntoIter<T, A: Allocator> {
+ ptr: *mut T,
+ buf: NonNull<T>,
+ len: usize,
+ layout: ArrayLayout<T>,
+ _p: PhantomData<A>,
+}
+
+impl<T, A> IntoIter<T, A>
+where
+ A: Allocator,
+{
+ fn into_raw_parts(self) -> (*mut T, NonNull<T>, usize, usize) {
+ let me = ManuallyDrop::new(self);
+ let ptr = me.ptr;
+ let buf = me.buf;
+ let len = me.len;
+ let cap = me.layout.len();
+ (ptr, buf, len, cap)
+ }
+
+ /// Same as `Iterator::collect` but specialized for `Vec`'s `IntoIter`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = kernel::kvec![1, 2, 3]?;
+ /// let mut it = v.into_iter();
+ ///
+ /// assert_eq!(it.next(), Some(1));
+ ///
+ /// let v = it.collect(GFP_KERNEL);
+ /// assert_eq!(v, [2, 3]);
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ ///
+ /// # Implementation details
+ ///
+ /// Currently, we can't implement `FromIterator`. There are a couple of issues with this trait
+ /// in the kernel, namely:
+ ///
+ /// - Rust's specialization feature is unstable. This prevents us to optimize for the special
+ /// case where `I::IntoIter` equals `Vec`'s `IntoIter` type.
+ /// - We also can't use `I::IntoIter`'s type ID either to work around this, since `FromIterator`
+ /// doesn't require this type to be `'static`.
+ /// - `FromIterator::from_iter` does return `Self` instead of `Result<Self, AllocError>`, hence
+ /// we can't properly handle allocation failures.
+ /// - Neither `Iterator::collect` nor `FromIterator::from_iter` can handle additional allocation
+ /// flags.
+ ///
+ /// Instead, provide `IntoIter::collect`, such that we can at least convert a `IntoIter` into a
+ /// `Vec` again.
+ ///
+ /// Note that `IntoIter::collect` doesn't require `Flags`, since it re-uses the existing backing
+ /// buffer. However, this backing buffer may be shrunk to the actual count of elements.
+ pub fn collect(self, flags: Flags) -> Vec<T, A> {
+ let old_layout = self.layout;
+ let (mut ptr, buf, len, mut cap) = self.into_raw_parts();
+ let has_advanced = ptr != buf.as_ptr();
+
+ if has_advanced {
+ // Copy the contents we have advanced to at the beginning of the buffer.
+ //
+ // SAFETY:
+ // - `ptr` is valid for reads of `len * size_of::<T>()` bytes,
+ // - `buf.as_ptr()` is valid for writes of `len * size_of::<T>()` bytes,
+ // - `ptr` and `buf.as_ptr()` are not be subject to aliasing restrictions relative to
+ // each other,
+ // - both `ptr` and `buf.ptr()` are properly aligned.
+ unsafe { ptr::copy(ptr, buf.as_ptr(), len) };
+ ptr = buf.as_ptr();
+
+ // SAFETY: `len` is guaranteed to be smaller than `self.layout.len()`.
+ let layout = unsafe { ArrayLayout::<T>::new_unchecked(len) };
+
+ // SAFETY: `buf` points to the start of the backing buffer and `len` is guaranteed to be
+ // smaller than `cap`. Depending on `alloc` this operation may shrink the buffer or leaves
+ // it as it is.
+ ptr = match unsafe {
+ A::realloc(Some(buf.cast()), layout.into(), old_layout.into(), flags)
+ } {
+ // If we fail to shrink, which likely can't even happen, continue with the existing
+ // buffer.
+ Err(_) => ptr,
+ Ok(ptr) => {
+ cap = len;
+ ptr.as_ptr().cast()
+ }
+ };
+ }
+
+ // SAFETY: If the iterator has been advanced, the advanced elements have been copied to
+ // the beginning of the buffer and `len` has been adjusted accordingly.
+ //
+ // - `ptr` is guaranteed to point to the start of the backing buffer.
+ // - `cap` is either the original capacity or, after shrinking the buffer, equal to `len`.
+ // - `alloc` is guaranteed to be unchanged since `into_iter` has been called on the original
+ // `Vec`.
+ unsafe { Vec::from_raw_parts(ptr, len, cap) }
+ }
+}
+
+impl<T, A> Iterator for IntoIter<T, A>
+where
+ A: Allocator,
+{
+ type Item = T;
+
+ /// # Examples
+ ///
+ /// ```
+ /// let v = kernel::kvec![1, 2, 3]?;
+ /// let mut it = v.into_iter();
+ ///
+ /// assert_eq!(it.next(), Some(1));
+ /// assert_eq!(it.next(), Some(2));
+ /// assert_eq!(it.next(), Some(3));
+ /// assert_eq!(it.next(), None);
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ fn next(&mut self) -> Option<T> {
+ if self.len == 0 {
+ return None;
+ }
+
+ let current = self.ptr;
+
+ // SAFETY: We can't overflow; decreasing `self.len` by one every time we advance `self.ptr`
+ // by one guarantees that.
+ unsafe { self.ptr = self.ptr.add(1) };
+
+ self.len -= 1;
+
+ // SAFETY: `current` is guaranteed to point at a valid element within the buffer.
+ Some(unsafe { current.read() })
+ }
+
+ /// # Examples
+ ///
+ /// ```
+ /// let v: KVec<u32> = kernel::kvec![1, 2, 3]?;
+ /// let mut iter = v.into_iter();
+ /// let size = iter.size_hint().0;
+ ///
+ /// iter.next();
+ /// assert_eq!(iter.size_hint().0, size - 1);
+ ///
+ /// iter.next();
+ /// assert_eq!(iter.size_hint().0, size - 2);
+ ///
+ /// iter.next();
+ /// assert_eq!(iter.size_hint().0, size - 3);
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.len, Some(self.len))
+ }
+}
+
+impl<T, A> Drop for IntoIter<T, A>
+where
+ A: Allocator,
+{
+ fn drop(&mut self) {
+ // SAFETY: `self.ptr` is guaranteed to be valid by the type invariant.
+ unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(self.ptr, self.len)) };
+
+ // SAFETY:
+ // - `self.buf` was previously allocated with `A`.
+ // - `self.layout` matches the `ArrayLayout` of the preceding allocation.
+ unsafe { A::free(self.buf.cast(), self.layout.into()) };
+ }
+}
+
+impl<T, A> IntoIterator for Vec<T, A>
+where
+ A: Allocator,
+{
+ type Item = T;
+ type IntoIter = IntoIter<T, A>;
+
+ /// Consumes the `Vec<T, A>` and creates an `Iterator`, which moves each value out of the
+ /// vector (from start to end).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = kernel::kvec![1, 2]?;
+ /// let mut v_iter = v.into_iter();
+ ///
+ /// let first_element: Option<u32> = v_iter.next();
+ ///
+ /// assert_eq!(first_element, Some(1));
+ /// assert_eq!(v_iter.next(), Some(2));
+ /// assert_eq!(v_iter.next(), None);
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ ///
+ /// ```
+ /// let v = kernel::kvec![];
+ /// let mut v_iter = v.into_iter();
+ ///
+ /// let first_element: Option<u32> = v_iter.next();
+ ///
+ /// assert_eq!(first_element, None);
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ let buf = self.ptr;
+ let layout = self.layout;
+ let (ptr, len, _) = self.into_raw_parts();
+
+ IntoIter {
+ ptr,
+ buf,
+ len,
+ layout,
+ _p: PhantomData::<A>,
+ }
+ }
+}
diff --git a/rust/kernel/alloc/layout.rs b/rust/kernel/alloc/layout.rs
new file mode 100644
index 000000000000..93ed514f7cc7
--- /dev/null
+++ b/rust/kernel/alloc/layout.rs
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Memory layout.
+//!
+//! Custom layout types extending or improving [`Layout`].
+
+use core::{alloc::Layout, marker::PhantomData};
+
+/// Error when constructing an [`ArrayLayout`].
+pub struct LayoutError;
+
+/// A layout for an array `[T; n]`.
+///
+/// # Invariants
+///
+/// - `len * size_of::<T>() <= isize::MAX`.
+pub struct ArrayLayout<T> {
+ len: usize,
+ _phantom: PhantomData<fn() -> T>,
+}
+
+impl<T> Clone for ArrayLayout<T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+impl<T> Copy for ArrayLayout<T> {}
+
+const ISIZE_MAX: usize = isize::MAX as usize;
+
+impl<T> ArrayLayout<T> {
+ /// Creates a new layout for `[T; 0]`.
+ pub const fn empty() -> Self {
+ // INVARIANT: `0 * size_of::<T>() <= isize::MAX`.
+ Self {
+ len: 0,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Creates a new layout for `[T; len]`.
+ ///
+ /// # Errors
+ ///
+ /// When `len * size_of::<T>()` overflows or when `len * size_of::<T>() > isize::MAX`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::alloc::layout::{ArrayLayout, LayoutError};
+ /// let layout = ArrayLayout::<i32>::new(15)?;
+ /// assert_eq!(layout.len(), 15);
+ ///
+ /// // Errors because `len * size_of::<T>()` overflows.
+ /// let layout = ArrayLayout::<i32>::new(isize::MAX as usize);
+ /// assert!(layout.is_err());
+ ///
+ /// // Errors because `len * size_of::<i32>() > isize::MAX`,
+ /// // even though `len < isize::MAX`.
+ /// let layout = ArrayLayout::<i32>::new(isize::MAX as usize / 2);
+ /// assert!(layout.is_err());
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub const fn new(len: usize) -> Result<Self, LayoutError> {
+ match len.checked_mul(core::mem::size_of::<T>()) {
+ Some(size) if size <= ISIZE_MAX => {
+ // INVARIANT: We checked above that `len * size_of::<T>() <= isize::MAX`.
+ Ok(Self {
+ len,
+ _phantom: PhantomData,
+ })
+ }
+ _ => Err(LayoutError),
+ }
+ }
+
+ /// Creates a new layout for `[T; len]`.
+ ///
+ /// # Safety
+ ///
+ /// `len` must be a value, for which `len * size_of::<T>() <= isize::MAX` is true.
+ pub unsafe fn new_unchecked(len: usize) -> Self {
+ // INVARIANT: By the safety requirements of this function
+ // `len * size_of::<T>() <= isize::MAX`.
+ Self {
+ len,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Returns the number of array elements represented by this layout.
+ pub const fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Returns `true` when no array elements are represented by this layout.
+ pub const fn is_empty(&self) -> bool {
+ self.len == 0
+ }
+}
+
+impl<T> From<ArrayLayout<T>> for Layout {
+ fn from(value: ArrayLayout<T>) -> Self {
+ let res = Layout::array::<T>(value.len);
+ // SAFETY: By the type invariant of `ArrayLayout` we have
+ // `len * size_of::<T>() <= isize::MAX` and thus the result must be `Ok`.
+ unsafe { res.unwrap_unchecked() }
+ }
+}
diff --git a/rust/kernel/allocator.rs b/rust/kernel/allocator.rs
deleted file mode 100644
index 01ad139e19bc..000000000000
--- a/rust/kernel/allocator.rs
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-//! Allocator support.
-
-use core::alloc::{GlobalAlloc, Layout};
-use core::ptr;
-
-use crate::bindings;
-
-struct KernelAllocator;
-
-/// Calls `krealloc` with a proper size to alloc a new object aligned to `new_layout`'s alignment.
-///
-/// # Safety
-///
-/// - `ptr` can be either null or a pointer which has been allocated by this allocator.
-/// - `new_layout` must have a non-zero size.
-unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: bindings::gfp_t) -> *mut u8 {
- // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
- let layout = new_layout.pad_to_align();
-
- let mut size = layout.size();
-
- if layout.align() > bindings::ARCH_SLAB_MINALIGN {
- // The alignment requirement exceeds the slab guarantee, thus try to enlarge the size
- // to use the "power-of-two" size/alignment guarantee (see comments in `kmalloc()` for
- // more information).
- //
- // Note that `layout.size()` (after padding) is guaranteed to be a multiple of
- // `layout.align()`, so `next_power_of_two` gives enough alignment guarantee.
- size = size.next_power_of_two();
- }
-
- // SAFETY:
- // - `ptr` is either null or a pointer returned from a previous `k{re}alloc()` by the
- // function safety requirement.
- // - `size` is greater than 0 since it's either a `layout.size()` (which cannot be zero
- // according to the function safety requirement) or a result from `next_power_of_two()`.
- unsafe { bindings::krealloc(ptr as *const core::ffi::c_void, size, flags) as *mut u8 }
-}
-
-unsafe impl GlobalAlloc for KernelAllocator {
- unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety
- // requirement.
- unsafe { krealloc_aligned(ptr::null_mut(), layout, bindings::GFP_KERNEL) }
- }
-
- unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
- unsafe {
- bindings::kfree(ptr as *const core::ffi::c_void);
- }
- }
-
- unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
- // SAFETY:
- // - `new_size`, when rounded up to the nearest multiple of `layout.align()`, will not
- // overflow `isize` by the function safety requirement.
- // - `layout.align()` is a proper alignment (i.e. not zero and must be a power of two).
- let layout = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
-
- // SAFETY:
- // - `ptr` is either null or a pointer allocated by this allocator by the function safety
- // requirement.
- // - the size of `layout` is not zero because `new_size` is not zero by the function safety
- // requirement.
- unsafe { krealloc_aligned(ptr, layout, bindings::GFP_KERNEL) }
- }
-
- unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
- // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety
- // requirement.
- unsafe {
- krealloc_aligned(
- ptr::null_mut(),
- layout,
- bindings::GFP_KERNEL | bindings::__GFP_ZERO,
- )
- }
- }
-}
-
-#[global_allocator]
-static ALLOCATOR: KernelAllocator = KernelAllocator;
-
-// See <https://github.com/rust-lang/rust/pull/86844>.
-#[no_mangle]
-static __rust_no_alloc_shim_is_unstable: u8 = 0;
diff --git a/rust/kernel/auxiliary.rs b/rust/kernel/auxiliary.rs
new file mode 100644
index 000000000000..5c072960dee0
--- /dev/null
+++ b/rust/kernel/auxiliary.rs
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Abstractions for the auxiliary bus.
+//!
+//! C header: [`include/linux/auxiliary_bus.h`](srctree/include/linux/auxiliary_bus.h)
+
+use crate::{
+ bindings, container_of, device,
+ device_id::RawDeviceId,
+ driver,
+ error::{to_result, Result},
+ prelude::*,
+ str::CStr,
+ types::{ForeignOwnable, Opaque},
+ ThisModule,
+};
+use core::{
+ marker::PhantomData,
+ ptr::{addr_of_mut, NonNull},
+};
+
+/// An adapter for the registration of auxiliary drivers.
+pub struct Adapter<T: Driver>(T);
+
+// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if
+// a preceding call to `register` has been successful.
+unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
+ type RegType = bindings::auxiliary_driver;
+
+ unsafe fn register(
+ adrv: &Opaque<Self::RegType>,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result {
+ // SAFETY: It's safe to set the fields of `struct auxiliary_driver` on initialization.
+ unsafe {
+ (*adrv.get()).name = name.as_char_ptr();
+ (*adrv.get()).probe = Some(Self::probe_callback);
+ (*adrv.get()).remove = Some(Self::remove_callback);
+ (*adrv.get()).id_table = T::ID_TABLE.as_ptr();
+ }
+
+ // SAFETY: `adrv` is guaranteed to be a valid `RegType`.
+ to_result(unsafe {
+ bindings::__auxiliary_driver_register(adrv.get(), module.0, name.as_char_ptr())
+ })
+ }
+
+ unsafe fn unregister(adrv: &Opaque<Self::RegType>) {
+ // SAFETY: `adrv` is guaranteed to be a valid `RegType`.
+ unsafe { bindings::auxiliary_driver_unregister(adrv.get()) }
+ }
+}
+
+impl<T: Driver + 'static> Adapter<T> {
+ extern "C" fn probe_callback(
+ adev: *mut bindings::auxiliary_device,
+ id: *const bindings::auxiliary_device_id,
+ ) -> kernel::ffi::c_int {
+ // SAFETY: The auxiliary bus only ever calls the probe callback with a valid pointer to a
+ // `struct auxiliary_device`.
+ //
+ // INVARIANT: `adev` is valid for the duration of `probe_callback()`.
+ let adev = unsafe { &*adev.cast::<Device<device::Core>>() };
+
+ // SAFETY: `DeviceId` is a `#[repr(transparent)`] wrapper of `struct auxiliary_device_id`
+ // and does not add additional invariants, so it's safe to transmute.
+ let id = unsafe { &*id.cast::<DeviceId>() };
+ let info = T::ID_TABLE.info(id.index());
+
+ match T::probe(adev, info) {
+ Ok(data) => {
+ // Let the `struct auxiliary_device` own a reference of the driver's private data.
+ // SAFETY: By the type invariant `adev.as_raw` returns a valid pointer to a
+ // `struct auxiliary_device`.
+ unsafe { bindings::auxiliary_set_drvdata(adev.as_raw(), data.into_foreign()) };
+ }
+ Err(err) => return Error::to_errno(err),
+ }
+
+ 0
+ }
+
+ extern "C" fn remove_callback(adev: *mut bindings::auxiliary_device) {
+ // SAFETY: The auxiliary bus only ever calls the remove callback with a valid pointer to a
+ // `struct auxiliary_device`.
+ let ptr = unsafe { bindings::auxiliary_get_drvdata(adev) };
+
+ // SAFETY: `remove_callback` is only ever called after a successful call to
+ // `probe_callback`, hence it's guaranteed that `ptr` points to a valid and initialized
+ // `KBox<T>` pointer created through `KBox::into_foreign`.
+ drop(unsafe { KBox::<T>::from_foreign(ptr) });
+ }
+}
+
+/// Declares a kernel module that exposes a single auxiliary driver.
+#[macro_export]
+macro_rules! module_auxiliary_driver {
+ ($($f:tt)*) => {
+ $crate::module_driver!(<T>, $crate::auxiliary::Adapter<T>, { $($f)* });
+ };
+}
+
+/// Abstraction for `bindings::auxiliary_device_id`.
+#[repr(transparent)]
+#[derive(Clone, Copy)]
+pub struct DeviceId(bindings::auxiliary_device_id);
+
+impl DeviceId {
+ /// Create a new [`DeviceId`] from name.
+ pub const fn new(modname: &'static CStr, name: &'static CStr) -> Self {
+ let name = name.as_bytes_with_nul();
+ let modname = modname.as_bytes_with_nul();
+
+ // TODO: Replace with `bindings::auxiliary_device_id::default()` once stabilized for
+ // `const`.
+ //
+ // SAFETY: FFI type is valid to be zero-initialized.
+ let mut id: bindings::auxiliary_device_id = unsafe { core::mem::zeroed() };
+
+ let mut i = 0;
+ while i < modname.len() {
+ id.name[i] = modname[i];
+ i += 1;
+ }
+
+ // Reuse the space of the NULL terminator.
+ id.name[i - 1] = b'.';
+
+ let mut j = 0;
+ while j < name.len() {
+ id.name[i] = name[j];
+ i += 1;
+ j += 1;
+ }
+
+ Self(id)
+ }
+}
+
+// SAFETY:
+// * `DeviceId` is a `#[repr(transparent)`] wrapper of `auxiliary_device_id` and does not add
+// additional invariants, so it's safe to transmute to `RawType`.
+// * `DRIVER_DATA_OFFSET` is the offset to the `driver_data` field.
+unsafe impl RawDeviceId for DeviceId {
+ type RawType = bindings::auxiliary_device_id;
+
+ const DRIVER_DATA_OFFSET: usize =
+ core::mem::offset_of!(bindings::auxiliary_device_id, driver_data);
+
+ fn index(&self) -> usize {
+ self.0.driver_data
+ }
+}
+
+/// IdTable type for auxiliary drivers.
+pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>;
+
+/// Create a auxiliary `IdTable` with its alias for modpost.
+#[macro_export]
+macro_rules! auxiliary_device_table {
+ ($table_name:ident, $module_table_name:ident, $id_info_type: ty, $table_data: expr) => {
+ const $table_name: $crate::device_id::IdArray<
+ $crate::auxiliary::DeviceId,
+ $id_info_type,
+ { $table_data.len() },
+ > = $crate::device_id::IdArray::new($table_data);
+
+ $crate::module_device_table!("auxiliary", $module_table_name, $table_name);
+ };
+}
+
+/// The auxiliary driver trait.
+///
+/// Drivers must implement this trait in order to get an auxiliary driver registered.
+pub trait Driver {
+ /// The type holding information about each device id supported by the driver.
+ ///
+ /// TODO: Use associated_type_defaults once stabilized:
+ ///
+ /// type IdInfo: 'static = ();
+ type IdInfo: 'static;
+
+ /// The table of device ids supported by the driver.
+ const ID_TABLE: IdTable<Self::IdInfo>;
+
+ /// Auxiliary driver probe.
+ ///
+ /// Called when an auxiliary device is matches a corresponding driver.
+ fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>;
+}
+
+/// The auxiliary device representation.
+///
+/// This structure represents the Rust abstraction for a C `struct auxiliary_device`. The
+/// implementation abstracts the usage of an already existing C `struct auxiliary_device` within
+/// Rust code that we get passed from the C side.
+///
+/// # Invariants
+///
+/// A [`Device`] instance represents a valid `struct auxiliary_device` created by the C portion of
+/// the kernel.
+#[repr(transparent)]
+pub struct Device<Ctx: device::DeviceContext = device::Normal>(
+ Opaque<bindings::auxiliary_device>,
+ PhantomData<Ctx>,
+);
+
+impl<Ctx: device::DeviceContext> Device<Ctx> {
+ fn as_raw(&self) -> *mut bindings::auxiliary_device {
+ self.0.get()
+ }
+
+ /// Returns the auxiliary device' id.
+ pub fn id(&self) -> u32 {
+ // SAFETY: By the type invariant `self.as_raw()` is a valid pointer to a
+ // `struct auxiliary_device`.
+ unsafe { (*self.as_raw()).id }
+ }
+
+ /// Returns a reference to the parent [`device::Device`], if any.
+ pub fn parent(&self) -> Option<&device::Device> {
+ let ptr: *const Self = self;
+ // CAST: `Device<Ctx: DeviceContext>` types are transparent to each other.
+ let ptr: *const Device = ptr.cast();
+ // SAFETY: `ptr` was derived from `&self`.
+ let this = unsafe { &*ptr };
+
+ this.as_ref().parent()
+ }
+}
+
+impl Device {
+ extern "C" fn release(dev: *mut bindings::device) {
+ // SAFETY: By the type invariant `self.0.as_raw` is a pointer to the `struct device`
+ // embedded in `struct auxiliary_device`.
+ let adev = unsafe { container_of!(dev, bindings::auxiliary_device, dev) }.cast_mut();
+
+ // SAFETY: `adev` points to the memory that has been allocated in `Registration::new`, via
+ // `KBox::new(Opaque::<bindings::auxiliary_device>::zeroed(), GFP_KERNEL)`.
+ let _ = unsafe { KBox::<Opaque<bindings::auxiliary_device>>::from_raw(adev.cast()) };
+ }
+}
+
+// SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic
+// argument.
+kernel::impl_device_context_deref!(unsafe { Device });
+kernel::impl_device_context_into_aref!(Device);
+
+// SAFETY: Instances of `Device` are always reference-counted.
+unsafe impl crate::types::AlwaysRefCounted for Device {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
+ unsafe { bindings::get_device(self.as_ref().as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // CAST: `Self` a transparent wrapper of `bindings::auxiliary_device`.
+ let adev: *mut bindings::auxiliary_device = obj.cast().as_ptr();
+
+ // SAFETY: By the type invariant of `Self`, `adev` is a pointer to a valid
+ // `struct auxiliary_device`.
+ let dev = unsafe { addr_of_mut!((*adev).dev) };
+
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::put_device(dev) }
+ }
+}
+
+impl<Ctx: device::DeviceContext> AsRef<device::Device<Ctx>> for Device<Ctx> {
+ fn as_ref(&self) -> &device::Device<Ctx> {
+ // SAFETY: By the type invariant of `Self`, `self.as_raw()` is a pointer to a valid
+ // `struct auxiliary_device`.
+ let dev = unsafe { addr_of_mut!((*self.as_raw()).dev) };
+
+ // SAFETY: `dev` points to a valid `struct device`.
+ unsafe { device::Device::as_ref(dev) }
+ }
+}
+
+// SAFETY: A `Device` is always reference-counted and can be released from any thread.
+unsafe impl Send for Device {}
+
+// SAFETY: `Device` can be shared among threads because all methods of `Device`
+// (i.e. `Device<Normal>) are thread safe.
+unsafe impl Sync for Device {}
+
+/// The registration of an auxiliary device.
+///
+/// This type represents the registration of a [`struct auxiliary_device`]. When an instance of this
+/// type is dropped, its respective auxiliary device will be unregistered from the system.
+///
+/// # Invariants
+///
+/// `self.0` always holds a valid pointer to an initialized and registered
+/// [`struct auxiliary_device`].
+pub struct Registration(NonNull<bindings::auxiliary_device>);
+
+impl Registration {
+ /// Create and register a new auxiliary device.
+ pub fn new(parent: &device::Device, name: &CStr, id: u32, modname: &CStr) -> Result<Self> {
+ let boxed = KBox::new(Opaque::<bindings::auxiliary_device>::zeroed(), GFP_KERNEL)?;
+ let adev = boxed.get();
+
+ // SAFETY: It's safe to set the fields of `struct auxiliary_device` on initialization.
+ unsafe {
+ (*adev).dev.parent = parent.as_raw();
+ (*adev).dev.release = Some(Device::release);
+ (*adev).name = name.as_char_ptr();
+ (*adev).id = id;
+ }
+
+ // SAFETY: `adev` is guaranteed to be a valid pointer to a `struct auxiliary_device`,
+ // which has not been initialized yet.
+ unsafe { bindings::auxiliary_device_init(adev) };
+
+ // Now that `adev` is initialized, leak the `Box`; the corresponding memory will be freed
+ // by `Device::release` when the last reference to the `struct auxiliary_device` is dropped.
+ let _ = KBox::into_raw(boxed);
+
+ // SAFETY:
+ // - `adev` is guaranteed to be a valid pointer to a `struct auxiliary_device`, which has
+ // been initialialized,
+ // - `modname.as_char_ptr()` is a NULL terminated string.
+ let ret = unsafe { bindings::__auxiliary_device_add(adev, modname.as_char_ptr()) };
+ if ret != 0 {
+ // SAFETY: `adev` is guaranteed to be a valid pointer to a `struct auxiliary_device`,
+ // which has been initialialized.
+ unsafe { bindings::auxiliary_device_uninit(adev) };
+
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: `adev` is guaranteed to be non-null, since the `KBox` was allocated successfully.
+ //
+ // INVARIANT: The device will remain registered until `auxiliary_device_delete()` is called,
+ // which happens in `Self::drop()`.
+ Ok(Self(unsafe { NonNull::new_unchecked(adev) }))
+ }
+}
+
+impl Drop for Registration {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariant of `Self`, `self.0.as_ptr()` is a valid registered
+ // `struct auxiliary_device`.
+ unsafe { bindings::auxiliary_device_delete(self.0.as_ptr()) };
+
+ // This drops the reference we acquired through `auxiliary_device_init()`.
+ //
+ // SAFETY: By the type invariant of `Self`, `self.0.as_ptr()` is a valid registered
+ // `struct auxiliary_device`.
+ unsafe { bindings::auxiliary_device_uninit(self.0.as_ptr()) };
+ }
+}
+
+// SAFETY: A `Registration` of a `struct auxiliary_device` can be released from any thread.
+unsafe impl Send for Registration {}
+
+// SAFETY: `Registration` does not expose any methods or fields that need synchronization.
+unsafe impl Sync for Registration {}
diff --git a/rust/kernel/block.rs b/rust/kernel/block.rs
new file mode 100644
index 000000000000..150f710efe5b
--- /dev/null
+++ b/rust/kernel/block.rs
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Types for working with the block layer.
+
+pub mod mq;
diff --git a/rust/kernel/block/mq.rs b/rust/kernel/block/mq.rs
new file mode 100644
index 000000000000..fb0f393c1cea
--- /dev/null
+++ b/rust/kernel/block/mq.rs
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This module provides types for implementing block drivers that interface the
+//! blk-mq subsystem.
+//!
+//! To implement a block device driver, a Rust module must do the following:
+//!
+//! - Implement [`Operations`] for a type `T`.
+//! - Create a [`TagSet<T>`].
+//! - Create a [`GenDisk<T>`], via the [`GenDiskBuilder`].
+//! - Add the disk to the system by calling [`GenDiskBuilder::build`] passing in
+//! the `TagSet` reference.
+//!
+//! The types available in this module that have direct C counterparts are:
+//!
+//! - The [`TagSet`] type that abstracts the C type `struct tag_set`.
+//! - The [`GenDisk`] type that abstracts the C type `struct gendisk`.
+//! - The [`Request`] type that abstracts the C type `struct request`.
+//!
+//! The kernel will interface with the block device driver by calling the method
+//! implementations of the `Operations` trait.
+//!
+//! IO requests are passed to the driver as [`kernel::types::ARef<Request>`]
+//! instances. The `Request` type is a wrapper around the C `struct request`.
+//! The driver must mark end of processing by calling one of the
+//! `Request::end`, methods. Failure to do so can lead to deadlock or timeout
+//! errors. Please note that the C function `blk_mq_start_request` is implicitly
+//! called when the request is queued with the driver.
+//!
+//! The `TagSet` is responsible for creating and maintaining a mapping between
+//! `Request`s and integer ids as well as carrying a pointer to the vtable
+//! generated by `Operations`. This mapping is useful for associating
+//! completions from hardware with the correct `Request` instance. The `TagSet`
+//! determines the maximum queue depth by setting the number of `Request`
+//! instances available to the driver, and it determines the number of queues to
+//! instantiate for the driver. If possible, a driver should allocate one queue
+//! per core, to keep queue data local to a core.
+//!
+//! One `TagSet` instance can be shared between multiple `GenDisk` instances.
+//! This can be useful when implementing drivers where one piece of hardware
+//! with one set of IO resources are represented to the user as multiple disks.
+//!
+//! One significant difference between block device drivers implemented with
+//! these Rust abstractions and drivers implemented in C, is that the Rust
+//! drivers have to own a reference count on the `Request` type when the IO is
+//! in flight. This is to ensure that the C `struct request` instances backing
+//! the Rust `Request` instances are live while the Rust driver holds a
+//! reference to the `Request`. In addition, the conversion of an integer tag to
+//! a `Request` via the `TagSet` would not be sound without this bookkeeping.
+//!
+//! [`GenDisk`]: gen_disk::GenDisk
+//! [`GenDisk<T>`]: gen_disk::GenDisk
+//! [`GenDiskBuilder`]: gen_disk::GenDiskBuilder
+//! [`GenDiskBuilder::build`]: gen_disk::GenDiskBuilder::build
+//!
+//! # Example
+//!
+//! ```rust
+//! use kernel::{
+//! alloc::flags,
+//! block::mq::*,
+//! new_mutex,
+//! prelude::*,
+//! sync::{Arc, Mutex},
+//! types::{ARef, ForeignOwnable},
+//! };
+//!
+//! struct MyBlkDevice;
+//!
+//! #[vtable]
+//! impl Operations for MyBlkDevice {
+//!
+//! fn queue_rq(rq: ARef<Request<Self>>, _is_last: bool) -> Result {
+//! Request::end_ok(rq);
+//! Ok(())
+//! }
+//!
+//! fn commit_rqs() {}
+//! }
+//!
+//! let tagset: Arc<TagSet<MyBlkDevice>> =
+//! Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
+//! let mut disk = gen_disk::GenDiskBuilder::new()
+//! .capacity_sectors(4096)
+//! .build(format_args!("myblk"), tagset)?;
+//!
+//! # Ok::<(), kernel::error::Error>(())
+//! ```
+
+pub mod gen_disk;
+mod operations;
+mod raw_writer;
+mod request;
+mod tag_set;
+
+pub use operations::Operations;
+pub use request::Request;
+pub use tag_set::TagSet;
diff --git a/rust/kernel/block/mq/gen_disk.rs b/rust/kernel/block/mq/gen_disk.rs
new file mode 100644
index 000000000000..14806e1997fd
--- /dev/null
+++ b/rust/kernel/block/mq/gen_disk.rs
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic disk abstraction.
+//!
+//! C header: [`include/linux/blkdev.h`](srctree/include/linux/blkdev.h)
+//! C header: [`include/linux/blk_mq.h`](srctree/include/linux/blk_mq.h)
+
+use crate::block::mq::{raw_writer::RawWriter, Operations, TagSet};
+use crate::{bindings, error::from_err_ptr, error::Result, sync::Arc};
+use crate::{error, static_lock_class};
+use core::fmt::{self, Write};
+
+/// A builder for [`GenDisk`].
+///
+/// Use this struct to configure and add new [`GenDisk`] to the VFS.
+pub struct GenDiskBuilder {
+ rotational: bool,
+ logical_block_size: u32,
+ physical_block_size: u32,
+ capacity_sectors: u64,
+}
+
+impl Default for GenDiskBuilder {
+ fn default() -> Self {
+ Self {
+ rotational: false,
+ logical_block_size: bindings::PAGE_SIZE as u32,
+ physical_block_size: bindings::PAGE_SIZE as u32,
+ capacity_sectors: 0,
+ }
+ }
+}
+
+impl GenDiskBuilder {
+ /// Create a new instance.
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ /// Set the rotational media attribute for the device to be built.
+ pub fn rotational(mut self, rotational: bool) -> Self {
+ self.rotational = rotational;
+ self
+ }
+
+ /// Validate block size by verifying that it is between 512 and `PAGE_SIZE`,
+ /// and that it is a power of two.
+ fn validate_block_size(size: u32) -> Result {
+ if !(512..=bindings::PAGE_SIZE as u32).contains(&size) || !size.is_power_of_two() {
+ Err(error::code::EINVAL)
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Set the logical block size of the device to be built.
+ ///
+ /// This method will check that block size is a power of two and between 512
+ /// and 4096. If not, an error is returned and the block size is not set.
+ ///
+ /// This is the smallest unit the storage device can address. It is
+ /// typically 4096 bytes.
+ pub fn logical_block_size(mut self, block_size: u32) -> Result<Self> {
+ Self::validate_block_size(block_size)?;
+ self.logical_block_size = block_size;
+ Ok(self)
+ }
+
+ /// Set the physical block size of the device to be built.
+ ///
+ /// This method will check that block size is a power of two and between 512
+ /// and 4096. If not, an error is returned and the block size is not set.
+ ///
+ /// This is the smallest unit a physical storage device can write
+ /// atomically. It is usually the same as the logical block size but may be
+ /// bigger. One example is SATA drives with 4096 byte physical block size
+ /// that expose a 512 byte logical block size to the operating system.
+ pub fn physical_block_size(mut self, block_size: u32) -> Result<Self> {
+ Self::validate_block_size(block_size)?;
+ self.physical_block_size = block_size;
+ Ok(self)
+ }
+
+ /// Set the capacity of the device to be built, in sectors (512 bytes).
+ pub fn capacity_sectors(mut self, capacity: u64) -> Self {
+ self.capacity_sectors = capacity;
+ self
+ }
+
+ /// Build a new `GenDisk` and add it to the VFS.
+ pub fn build<T: Operations>(
+ self,
+ name: fmt::Arguments<'_>,
+ tagset: Arc<TagSet<T>>,
+ ) -> Result<GenDisk<T>> {
+ // SAFETY: `bindings::queue_limits` contain only fields that are valid when zeroed.
+ let mut lim: bindings::queue_limits = unsafe { core::mem::zeroed() };
+
+ lim.logical_block_size = self.logical_block_size;
+ lim.physical_block_size = self.physical_block_size;
+ if self.rotational {
+ lim.features = bindings::BLK_FEAT_ROTATIONAL;
+ }
+
+ // SAFETY: `tagset.raw_tag_set()` points to a valid and initialized tag set
+ let gendisk = from_err_ptr(unsafe {
+ bindings::__blk_mq_alloc_disk(
+ tagset.raw_tag_set(),
+ &mut lim,
+ core::ptr::null_mut(),
+ static_lock_class!().as_ptr(),
+ )
+ })?;
+
+ const TABLE: bindings::block_device_operations = bindings::block_device_operations {
+ submit_bio: None,
+ open: None,
+ release: None,
+ ioctl: None,
+ compat_ioctl: None,
+ check_events: None,
+ unlock_native_capacity: None,
+ getgeo: None,
+ set_read_only: None,
+ swap_slot_free_notify: None,
+ report_zones: None,
+ devnode: None,
+ alternative_gpt_sector: None,
+ get_unique_id: None,
+ // TODO: Set to THIS_MODULE. Waiting for const_refs_to_static feature to
+ // be merged (unstable in rustc 1.78 which is staged for linux 6.10)
+ // https://github.com/rust-lang/rust/issues/119618
+ owner: core::ptr::null_mut(),
+ pr_ops: core::ptr::null_mut(),
+ free_disk: None,
+ poll_bio: None,
+ };
+
+ // SAFETY: `gendisk` is a valid pointer as we initialized it above
+ unsafe { (*gendisk).fops = &TABLE };
+
+ let mut raw_writer = RawWriter::from_array(
+ // SAFETY: `gendisk` points to a valid and initialized instance. We
+ // have exclusive access, since the disk is not added to the VFS
+ // yet.
+ unsafe { &mut (*gendisk).disk_name },
+ )?;
+ raw_writer.write_fmt(name)?;
+ raw_writer.write_char('\0')?;
+
+ // SAFETY: `gendisk` points to a valid and initialized instance of
+ // `struct gendisk`. `set_capacity` takes a lock to synchronize this
+ // operation, so we will not race.
+ unsafe { bindings::set_capacity(gendisk, self.capacity_sectors) };
+
+ crate::error::to_result(
+ // SAFETY: `gendisk` points to a valid and initialized instance of
+ // `struct gendisk`.
+ unsafe {
+ bindings::device_add_disk(core::ptr::null_mut(), gendisk, core::ptr::null_mut())
+ },
+ )?;
+
+ // INVARIANT: `gendisk` was initialized above.
+ // INVARIANT: `gendisk` was added to the VFS via `device_add_disk` above.
+ Ok(GenDisk {
+ _tagset: tagset,
+ gendisk,
+ })
+ }
+}
+
+/// A generic block device.
+///
+/// # Invariants
+///
+/// - `gendisk` must always point to an initialized and valid `struct gendisk`.
+/// - `gendisk` was added to the VFS through a call to
+/// `bindings::device_add_disk`.
+pub struct GenDisk<T: Operations> {
+ _tagset: Arc<TagSet<T>>,
+ gendisk: *mut bindings::gendisk,
+}
+
+// SAFETY: `GenDisk` is an owned pointer to a `struct gendisk` and an `Arc` to a
+// `TagSet` It is safe to send this to other threads as long as T is Send.
+unsafe impl<T: Operations + Send> Send for GenDisk<T> {}
+
+impl<T: Operations> Drop for GenDisk<T> {
+ fn drop(&mut self) {
+ // SAFETY: By type invariant, `self.gendisk` points to a valid and
+ // initialized instance of `struct gendisk`, and it was previously added
+ // to the VFS.
+ unsafe { bindings::del_gendisk(self.gendisk) };
+ }
+}
diff --git a/rust/kernel/block/mq/operations.rs b/rust/kernel/block/mq/operations.rs
new file mode 100644
index 000000000000..864ff379dc91
--- /dev/null
+++ b/rust/kernel/block/mq/operations.rs
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This module provides an interface for blk-mq drivers to implement.
+//!
+//! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h)
+
+use crate::{
+ bindings,
+ block::mq::request::RequestDataWrapper,
+ block::mq::Request,
+ error::{from_result, Result},
+ prelude::*,
+ types::ARef,
+};
+use core::{marker::PhantomData, sync::atomic::AtomicU64, sync::atomic::Ordering};
+
+/// Implement this trait to interface blk-mq as block devices.
+///
+/// To implement a block device driver, implement this trait as described in the
+/// [module level documentation]. The kernel will use the implementation of the
+/// functions defined in this trait to interface a block device driver. Note:
+/// There is no need for an exit_request() implementation, because the `drop`
+/// implementation of the [`Request`] type will be invoked by automatically by
+/// the C/Rust glue logic.
+///
+/// [module level documentation]: kernel::block::mq
+#[macros::vtable]
+pub trait Operations: Sized {
+ /// Called by the kernel to queue a request with the driver. If `is_last` is
+ /// `false`, the driver is allowed to defer committing the request.
+ fn queue_rq(rq: ARef<Request<Self>>, is_last: bool) -> Result;
+
+ /// Called by the kernel to indicate that queued requests should be submitted.
+ fn commit_rqs();
+
+ /// Called by the kernel to poll the device for completed requests. Only
+ /// used for poll queues.
+ fn poll() -> bool {
+ build_error!(crate::error::VTABLE_DEFAULT_ERROR)
+ }
+}
+
+/// A vtable for blk-mq to interact with a block device driver.
+///
+/// A `bindings::blk_mq_ops` vtable is constructed from pointers to the `extern
+/// "C"` functions of this struct, exposed through the `OperationsVTable::VTABLE`.
+///
+/// For general documentation of these methods, see the kernel source
+/// documentation related to `struct blk_mq_operations` in
+/// [`include/linux/blk-mq.h`].
+///
+/// [`include/linux/blk-mq.h`]: srctree/include/linux/blk-mq.h
+pub(crate) struct OperationsVTable<T: Operations>(PhantomData<T>);
+
+impl<T: Operations> OperationsVTable<T> {
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// - The caller of this function must ensure that the pointee of `bd` is
+ /// valid for reads for the duration of this function.
+ /// - This function must be called for an initialized and live `hctx`. That
+ /// is, `Self::init_hctx_callback` was called and
+ /// `Self::exit_hctx_callback()` was not yet called.
+ /// - `(*bd).rq` must point to an initialized and live `bindings:request`.
+ /// That is, `Self::init_request_callback` was called but
+ /// `Self::exit_request_callback` was not yet called for the request.
+ /// - `(*bd).rq` must be owned by the driver. That is, the block layer must
+ /// promise to not access the request until the driver calls
+ /// `bindings::blk_mq_end_request` for the request.
+ unsafe extern "C" fn queue_rq_callback(
+ _hctx: *mut bindings::blk_mq_hw_ctx,
+ bd: *const bindings::blk_mq_queue_data,
+ ) -> bindings::blk_status_t {
+ // SAFETY: `bd.rq` is valid as required by the safety requirement for
+ // this function.
+ let request = unsafe { &*(*bd).rq.cast::<Request<T>>() };
+
+ // One refcount for the ARef, one for being in flight
+ request.wrapper_ref().refcount().store(2, Ordering::Relaxed);
+
+ // SAFETY:
+ // - We own a refcount that we took above. We pass that to `ARef`.
+ // - By the safety requirements of this function, `request` is a valid
+ // `struct request` and the private data is properly initialized.
+ // - `rq` will be alive until `blk_mq_end_request` is called and is
+ // reference counted by `ARef` until then.
+ let rq = unsafe { Request::aref_from_raw((*bd).rq) };
+
+ // SAFETY: We have exclusive access and we just set the refcount above.
+ unsafe { Request::start_unchecked(&rq) };
+
+ let ret = T::queue_rq(
+ rq,
+ // SAFETY: `bd` is valid as required by the safety requirement for
+ // this function.
+ unsafe { (*bd).last },
+ );
+
+ if let Err(e) = ret {
+ e.to_blk_status()
+ } else {
+ bindings::BLK_STS_OK as _
+ }
+ }
+
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// This function may only be called by blk-mq C infrastructure.
+ unsafe extern "C" fn commit_rqs_callback(_hctx: *mut bindings::blk_mq_hw_ctx) {
+ T::commit_rqs()
+ }
+
+ /// This function is called by the C kernel. It is not currently
+ /// implemented, and there is no way to exercise this code path.
+ ///
+ /// # Safety
+ ///
+ /// This function may only be called by blk-mq C infrastructure.
+ unsafe extern "C" fn complete_callback(_rq: *mut bindings::request) {}
+
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// This function may only be called by blk-mq C infrastructure.
+ unsafe extern "C" fn poll_callback(
+ _hctx: *mut bindings::blk_mq_hw_ctx,
+ _iob: *mut bindings::io_comp_batch,
+ ) -> crate::ffi::c_int {
+ T::poll().into()
+ }
+
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// This function may only be called by blk-mq C infrastructure. This
+ /// function may only be called once before `exit_hctx_callback` is called
+ /// for the same context.
+ unsafe extern "C" fn init_hctx_callback(
+ _hctx: *mut bindings::blk_mq_hw_ctx,
+ _tagset_data: *mut crate::ffi::c_void,
+ _hctx_idx: crate::ffi::c_uint,
+ ) -> crate::ffi::c_int {
+ from_result(|| Ok(0))
+ }
+
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// This function may only be called by blk-mq C infrastructure.
+ unsafe extern "C" fn exit_hctx_callback(
+ _hctx: *mut bindings::blk_mq_hw_ctx,
+ _hctx_idx: crate::ffi::c_uint,
+ ) {
+ }
+
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// - This function may only be called by blk-mq C infrastructure.
+ /// - `_set` must point to an initialized `TagSet<T>`.
+ /// - `rq` must point to an initialized `bindings::request`.
+ /// - The allocation pointed to by `rq` must be at the size of `Request`
+ /// plus the size of `RequestDataWrapper`.
+ unsafe extern "C" fn init_request_callback(
+ _set: *mut bindings::blk_mq_tag_set,
+ rq: *mut bindings::request,
+ _hctx_idx: crate::ffi::c_uint,
+ _numa_node: crate::ffi::c_uint,
+ ) -> crate::ffi::c_int {
+ from_result(|| {
+ // SAFETY: By the safety requirements of this function, `rq` points
+ // to a valid allocation.
+ let pdu = unsafe { Request::wrapper_ptr(rq.cast::<Request<T>>()) };
+
+ // SAFETY: The refcount field is allocated but not initialized, so
+ // it is valid for writes.
+ unsafe { RequestDataWrapper::refcount_ptr(pdu.as_ptr()).write(AtomicU64::new(0)) };
+
+ Ok(0)
+ })
+ }
+
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// - This function may only be called by blk-mq C infrastructure.
+ /// - `_set` must point to an initialized `TagSet<T>`.
+ /// - `rq` must point to an initialized and valid `Request`.
+ unsafe extern "C" fn exit_request_callback(
+ _set: *mut bindings::blk_mq_tag_set,
+ rq: *mut bindings::request,
+ _hctx_idx: crate::ffi::c_uint,
+ ) {
+ // SAFETY: The tagset invariants guarantee that all requests are allocated with extra memory
+ // for the request data.
+ let pdu = unsafe { bindings::blk_mq_rq_to_pdu(rq) }.cast::<RequestDataWrapper>();
+
+ // SAFETY: `pdu` is valid for read and write and is properly initialised.
+ unsafe { core::ptr::drop_in_place(pdu) };
+ }
+
+ const VTABLE: bindings::blk_mq_ops = bindings::blk_mq_ops {
+ queue_rq: Some(Self::queue_rq_callback),
+ queue_rqs: None,
+ commit_rqs: Some(Self::commit_rqs_callback),
+ get_budget: None,
+ put_budget: None,
+ set_rq_budget_token: None,
+ get_rq_budget_token: None,
+ timeout: None,
+ poll: if T::HAS_POLL {
+ Some(Self::poll_callback)
+ } else {
+ None
+ },
+ complete: Some(Self::complete_callback),
+ init_hctx: Some(Self::init_hctx_callback),
+ exit_hctx: Some(Self::exit_hctx_callback),
+ init_request: Some(Self::init_request_callback),
+ exit_request: Some(Self::exit_request_callback),
+ cleanup_rq: None,
+ busy: None,
+ map_queues: None,
+ #[cfg(CONFIG_BLK_DEBUG_FS)]
+ show_rq: None,
+ };
+
+ pub(crate) const fn build() -> &'static bindings::blk_mq_ops {
+ &Self::VTABLE
+ }
+}
diff --git a/rust/kernel/block/mq/raw_writer.rs b/rust/kernel/block/mq/raw_writer.rs
new file mode 100644
index 000000000000..7e2159e4f6a6
--- /dev/null
+++ b/rust/kernel/block/mq/raw_writer.rs
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::fmt::{self, Write};
+
+use crate::error::Result;
+use crate::prelude::EINVAL;
+
+/// A mutable reference to a byte buffer where a string can be written into.
+///
+/// # Invariants
+///
+/// `buffer` is always null terminated.
+pub(crate) struct RawWriter<'a> {
+ buffer: &'a mut [u8],
+ pos: usize,
+}
+
+impl<'a> RawWriter<'a> {
+ /// Create a new `RawWriter` instance.
+ fn new(buffer: &'a mut [u8]) -> Result<RawWriter<'a>> {
+ *(buffer.last_mut().ok_or(EINVAL)?) = 0;
+
+ // INVARIANT: We null terminated the buffer above.
+ Ok(Self { buffer, pos: 0 })
+ }
+
+ pub(crate) fn from_array<const N: usize>(
+ a: &'a mut [crate::ffi::c_char; N],
+ ) -> Result<RawWriter<'a>> {
+ Self::new(
+ // SAFETY: the buffer of `a` is valid for read and write as `u8` for
+ // at least `N` bytes.
+ unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr().cast::<u8>(), N) },
+ )
+ }
+}
+
+impl Write for RawWriter<'_> {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ let bytes = s.as_bytes();
+ let len = bytes.len();
+
+ // We do not want to overwrite our null terminator
+ if self.pos + len > self.buffer.len() - 1 {
+ return Err(fmt::Error);
+ }
+
+ // INVARIANT: We are not overwriting the last byte
+ self.buffer[self.pos..self.pos + len].copy_from_slice(bytes);
+
+ self.pos += len;
+
+ Ok(())
+ }
+}
diff --git a/rust/kernel/block/mq/request.rs b/rust/kernel/block/mq/request.rs
new file mode 100644
index 000000000000..4a5b7ec914ef
--- /dev/null
+++ b/rust/kernel/block/mq/request.rs
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This module provides a wrapper for the C `struct request` type.
+//!
+//! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h)
+
+use crate::{
+ bindings,
+ block::mq::Operations,
+ error::Result,
+ types::{ARef, AlwaysRefCounted, Opaque},
+};
+use core::{
+ marker::PhantomData,
+ ptr::NonNull,
+ sync::atomic::{AtomicU64, Ordering},
+};
+
+/// A wrapper around a blk-mq [`struct request`]. This represents an IO request.
+///
+/// # Implementation details
+///
+/// There are four states for a request that the Rust bindings care about:
+///
+/// 1. Request is owned by block layer (refcount 0).
+/// 2. Request is owned by driver but with zero [`ARef`]s in existence
+/// (refcount 1).
+/// 3. Request is owned by driver with exactly one [`ARef`] in existence
+/// (refcount 2).
+/// 4. Request is owned by driver with more than one [`ARef`] in existence
+/// (refcount > 2).
+///
+///
+/// We need to track 1 and 2 to ensure we fail tag to request conversions for
+/// requests that are not owned by the driver.
+///
+/// We need to track 3 and 4 to ensure that it is safe to end the request and hand
+/// back ownership to the block layer.
+///
+/// The states are tracked through the private `refcount` field of
+/// `RequestDataWrapper`. This structure lives in the private data area of the C
+/// [`struct request`].
+///
+/// # Invariants
+///
+/// * `self.0` is a valid [`struct request`] created by the C portion of the
+/// kernel.
+/// * The private data area associated with this request must be an initialized
+/// and valid `RequestDataWrapper<T>`.
+/// * `self` is reference counted by atomic modification of
+/// `self.wrapper_ref().refcount()`.
+///
+/// [`struct request`]: srctree/include/linux/blk-mq.h
+///
+#[repr(transparent)]
+pub struct Request<T: Operations>(Opaque<bindings::request>, PhantomData<T>);
+
+impl<T: Operations> Request<T> {
+ /// Create an [`ARef<Request>`] from a [`struct request`] pointer.
+ ///
+ /// # Safety
+ ///
+ /// * The caller must own a refcount on `ptr` that is transferred to the
+ /// returned [`ARef`].
+ /// * The type invariants for [`Request`] must hold for the pointee of `ptr`.
+ ///
+ /// [`struct request`]: srctree/include/linux/blk-mq.h
+ pub(crate) unsafe fn aref_from_raw(ptr: *mut bindings::request) -> ARef<Self> {
+ // INVARIANT: By the safety requirements of this function, invariants are upheld.
+ // SAFETY: By the safety requirement of this function, we own a
+ // reference count that we can pass to `ARef`.
+ unsafe { ARef::from_raw(NonNull::new_unchecked(ptr as *const Self as *mut Self)) }
+ }
+
+ /// Notify the block layer that a request is going to be processed now.
+ ///
+ /// The block layer uses this hook to do proper initializations such as
+ /// starting the timeout timer. It is a requirement that block device
+ /// drivers call this function when starting to process a request.
+ ///
+ /// # Safety
+ ///
+ /// The caller must have exclusive ownership of `self`, that is
+ /// `self.wrapper_ref().refcount() == 2`.
+ pub(crate) unsafe fn start_unchecked(this: &ARef<Self>) {
+ // SAFETY: By type invariant, `self.0` is a valid `struct request` and
+ // we have exclusive access.
+ unsafe { bindings::blk_mq_start_request(this.0.get()) };
+ }
+
+ /// Try to take exclusive ownership of `this` by dropping the refcount to 0.
+ /// This fails if `this` is not the only [`ARef`] pointing to the underlying
+ /// [`Request`].
+ ///
+ /// If the operation is successful, [`Ok`] is returned with a pointer to the
+ /// C [`struct request`]. If the operation fails, `this` is returned in the
+ /// [`Err`] variant.
+ ///
+ /// [`struct request`]: srctree/include/linux/blk-mq.h
+ fn try_set_end(this: ARef<Self>) -> Result<*mut bindings::request, ARef<Self>> {
+ // We can race with `TagSet::tag_to_rq`
+ if let Err(_old) = this.wrapper_ref().refcount().compare_exchange(
+ 2,
+ 0,
+ Ordering::Relaxed,
+ Ordering::Relaxed,
+ ) {
+ return Err(this);
+ }
+
+ let request_ptr = this.0.get();
+ core::mem::forget(this);
+
+ Ok(request_ptr)
+ }
+
+ /// Notify the block layer that the request has been completed without errors.
+ ///
+ /// This function will return [`Err`] if `this` is not the only [`ARef`]
+ /// referencing the request.
+ pub fn end_ok(this: ARef<Self>) -> Result<(), ARef<Self>> {
+ let request_ptr = Self::try_set_end(this)?;
+
+ // SAFETY: By type invariant, `this.0` was a valid `struct request`. The
+ // success of the call to `try_set_end` guarantees that there are no
+ // `ARef`s pointing to this request. Therefore it is safe to hand it
+ // back to the block layer.
+ unsafe { bindings::blk_mq_end_request(request_ptr, bindings::BLK_STS_OK as _) };
+
+ Ok(())
+ }
+
+ /// Return a pointer to the [`RequestDataWrapper`] stored in the private area
+ /// of the request structure.
+ ///
+ /// # Safety
+ ///
+ /// - `this` must point to a valid allocation of size at least size of
+ /// [`Self`] plus size of [`RequestDataWrapper`].
+ pub(crate) unsafe fn wrapper_ptr(this: *mut Self) -> NonNull<RequestDataWrapper> {
+ let request_ptr = this.cast::<bindings::request>();
+ // SAFETY: By safety requirements for this function, `this` is a
+ // valid allocation.
+ let wrapper_ptr =
+ unsafe { bindings::blk_mq_rq_to_pdu(request_ptr).cast::<RequestDataWrapper>() };
+ // SAFETY: By C API contract, wrapper_ptr points to a valid allocation
+ // and is not null.
+ unsafe { NonNull::new_unchecked(wrapper_ptr) }
+ }
+
+ /// Return a reference to the [`RequestDataWrapper`] stored in the private
+ /// area of the request structure.
+ pub(crate) fn wrapper_ref(&self) -> &RequestDataWrapper {
+ // SAFETY: By type invariant, `self.0` is a valid allocation. Further,
+ // the private data associated with this request is initialized and
+ // valid. The existence of `&self` guarantees that the private data is
+ // valid as a shared reference.
+ unsafe { Self::wrapper_ptr(self as *const Self as *mut Self).as_ref() }
+ }
+}
+
+/// A wrapper around data stored in the private area of the C [`struct request`].
+///
+/// [`struct request`]: srctree/include/linux/blk-mq.h
+pub(crate) struct RequestDataWrapper {
+ /// The Rust request refcount has the following states:
+ ///
+ /// - 0: The request is owned by C block layer.
+ /// - 1: The request is owned by Rust abstractions but there are no [`ARef`] references to it.
+ /// - 2+: There are [`ARef`] references to the request.
+ refcount: AtomicU64,
+}
+
+impl RequestDataWrapper {
+ /// Return a reference to the refcount of the request that is embedding
+ /// `self`.
+ pub(crate) fn refcount(&self) -> &AtomicU64 {
+ &self.refcount
+ }
+
+ /// Return a pointer to the refcount of the request that is embedding the
+ /// pointee of `this`.
+ ///
+ /// # Safety
+ ///
+ /// - `this` must point to a live allocation of at least the size of `Self`.
+ pub(crate) unsafe fn refcount_ptr(this: *mut Self) -> *mut AtomicU64 {
+ // SAFETY: Because of the safety requirements of this function, the
+ // field projection is safe.
+ unsafe { &raw mut (*this).refcount }
+ }
+}
+
+// SAFETY: Exclusive access is thread-safe for `Request`. `Request` has no `&mut
+// self` methods and `&self` methods that mutate `self` are internally
+// synchronized.
+unsafe impl<T: Operations> Send for Request<T> {}
+
+// SAFETY: Shared access is thread-safe for `Request`. `&self` methods that
+// mutate `self` are internally synchronized`
+unsafe impl<T: Operations> Sync for Request<T> {}
+
+/// Store the result of `op(target.load())` in target, returning new value of
+/// target.
+fn atomic_relaxed_op_return(target: &AtomicU64, op: impl Fn(u64) -> u64) -> u64 {
+ let old = target.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| Some(op(x)));
+
+ // SAFETY: Because the operation passed to `fetch_update` above always
+ // return `Some`, `old` will always be `Ok`.
+ let old = unsafe { old.unwrap_unchecked() };
+
+ op(old)
+}
+
+/// Store the result of `op(target.load)` in `target` if `target.load() !=
+/// pred`, returning [`true`] if the target was updated.
+fn atomic_relaxed_op_unless(target: &AtomicU64, op: impl Fn(u64) -> u64, pred: u64) -> bool {
+ target
+ .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| {
+ if x == pred {
+ None
+ } else {
+ Some(op(x))
+ }
+ })
+ .is_ok()
+}
+
+// SAFETY: All instances of `Request<T>` are reference counted. This
+// implementation of `AlwaysRefCounted` ensure that increments to the ref count
+// keeps the object alive in memory at least until a matching reference count
+// decrement is executed.
+unsafe impl<T: Operations> AlwaysRefCounted for Request<T> {
+ fn inc_ref(&self) {
+ let refcount = &self.wrapper_ref().refcount();
+
+ #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
+ let updated = atomic_relaxed_op_unless(refcount, |x| x + 1, 0);
+
+ #[cfg(CONFIG_DEBUG_MISC)]
+ if !updated {
+ panic!("Request refcount zero on clone")
+ }
+ }
+
+ unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
+ // SAFETY: The type invariants of `ARef` guarantee that `obj` is valid
+ // for read.
+ let wrapper_ptr = unsafe { Self::wrapper_ptr(obj.as_ptr()).as_ptr() };
+ // SAFETY: The type invariant of `Request` guarantees that the private
+ // data area is initialized and valid.
+ let refcount = unsafe { &*RequestDataWrapper::refcount_ptr(wrapper_ptr) };
+
+ #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
+ let new_refcount = atomic_relaxed_op_return(refcount, |x| x - 1);
+
+ #[cfg(CONFIG_DEBUG_MISC)]
+ if new_refcount == 0 {
+ panic!("Request reached refcount zero in Rust abstractions");
+ }
+ }
+}
diff --git a/rust/kernel/block/mq/tag_set.rs b/rust/kernel/block/mq/tag_set.rs
new file mode 100644
index 000000000000..bcf4214ad149
--- /dev/null
+++ b/rust/kernel/block/mq/tag_set.rs
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This module provides the `TagSet` struct to wrap the C `struct blk_mq_tag_set`.
+//!
+//! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h)
+
+use core::pin::Pin;
+
+use crate::{
+ bindings,
+ block::mq::{operations::OperationsVTable, request::RequestDataWrapper, Operations},
+ error,
+ prelude::try_pin_init,
+ types::Opaque,
+};
+use core::{convert::TryInto, marker::PhantomData};
+use pin_init::{pin_data, pinned_drop, PinInit};
+
+/// A wrapper for the C `struct blk_mq_tag_set`.
+///
+/// `struct blk_mq_tag_set` contains a `struct list_head` and so must be pinned.
+///
+/// # Invariants
+///
+/// - `inner` is initialized and valid.
+#[pin_data(PinnedDrop)]
+#[repr(transparent)]
+pub struct TagSet<T: Operations> {
+ #[pin]
+ inner: Opaque<bindings::blk_mq_tag_set>,
+ _p: PhantomData<T>,
+}
+
+impl<T: Operations> TagSet<T> {
+ /// Try to create a new tag set
+ pub fn new(
+ nr_hw_queues: u32,
+ num_tags: u32,
+ num_maps: u32,
+ ) -> impl PinInit<Self, error::Error> {
+ // SAFETY: `blk_mq_tag_set` only contains integers and pointers, which
+ // all are allowed to be 0.
+ let tag_set: bindings::blk_mq_tag_set = unsafe { core::mem::zeroed() };
+ let tag_set = core::mem::size_of::<RequestDataWrapper>()
+ .try_into()
+ .map(|cmd_size| {
+ bindings::blk_mq_tag_set {
+ ops: OperationsVTable::<T>::build(),
+ nr_hw_queues,
+ timeout: 0, // 0 means default which is 30Hz in C
+ numa_node: bindings::NUMA_NO_NODE,
+ queue_depth: num_tags,
+ cmd_size,
+ flags: 0,
+ driver_data: core::ptr::null_mut::<crate::ffi::c_void>(),
+ nr_maps: num_maps,
+ ..tag_set
+ }
+ });
+
+ try_pin_init!(TagSet {
+ inner <- PinInit::<_, error::Error>::pin_chain(Opaque::new(tag_set?), |tag_set| {
+ // SAFETY: we do not move out of `tag_set`.
+ let tag_set = unsafe { Pin::get_unchecked_mut(tag_set) };
+ // SAFETY: `tag_set` is a reference to an initialized `blk_mq_tag_set`.
+ error::to_result( unsafe { bindings::blk_mq_alloc_tag_set(tag_set.get())})
+ }),
+ _p: PhantomData,
+ })
+ }
+
+ /// Return the pointer to the wrapped `struct blk_mq_tag_set`
+ pub(crate) fn raw_tag_set(&self) -> *mut bindings::blk_mq_tag_set {
+ self.inner.get()
+ }
+}
+
+#[pinned_drop]
+impl<T: Operations> PinnedDrop for TagSet<T> {
+ fn drop(self: Pin<&mut Self>) {
+ // SAFETY: By type invariant `inner` is valid and has been properly
+ // initialized during construction.
+ unsafe { bindings::blk_mq_free_tag_set(self.inner.get()) };
+ }
+}
diff --git a/rust/kernel/build_assert.rs b/rust/kernel/build_assert.rs
index 9e37120bc69c..6331b15d7c4d 100644
--- a/rust/kernel/build_assert.rs
+++ b/rust/kernel/build_assert.rs
@@ -2,6 +2,9 @@
//! Build-time assert.
+#[doc(hidden)]
+pub use build_error::build_error;
+
/// Fails the build if the code path calling `build_error!` can possibly be executed.
///
/// If the macro is executed in const context, `build_error!` will panic.
@@ -11,7 +14,6 @@
/// # Examples
///
/// ```
-/// # use kernel::build_error;
/// #[inline]
/// fn foo(a: usize) -> usize {
/// a.checked_add(1).unwrap_or_else(|| build_error!("overflow"))
@@ -23,10 +25,10 @@
#[macro_export]
macro_rules! build_error {
() => {{
- $crate::build_error("")
+ $crate::build_assert::build_error("")
}};
($msg:expr) => {{
- $crate::build_error($msg)
+ $crate::build_assert::build_error($msg)
}};
}
@@ -73,12 +75,12 @@ macro_rules! build_error {
macro_rules! build_assert {
($cond:expr $(,)?) => {{
if !$cond {
- $crate::build_error(concat!("assertion failed: ", stringify!($cond)));
+ $crate::build_assert::build_error(concat!("assertion failed: ", stringify!($cond)));
}
}};
($cond:expr, $msg:expr) => {{
if !$cond {
- $crate::build_error($msg);
+ $crate::build_assert::build_error($msg);
}
}};
}
diff --git a/rust/kernel/clk.rs b/rust/kernel/clk.rs
new file mode 100644
index 000000000000..6041c6d07527
--- /dev/null
+++ b/rust/kernel/clk.rs
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Clock abstractions.
+//!
+//! C header: [`include/linux/clk.h`](srctree/include/linux/clk.h)
+//!
+//! Reference: <https://docs.kernel.org/driver-api/clk.html>
+
+use crate::ffi::c_ulong;
+
+/// The frequency unit.
+///
+/// Represents a frequency in hertz, wrapping a [`c_ulong`] value.
+///
+/// ## Examples
+///
+/// ```
+/// use kernel::clk::Hertz;
+///
+/// let hz = 1_000_000_000;
+/// let rate = Hertz(hz);
+///
+/// assert_eq!(rate.as_hz(), hz);
+/// assert_eq!(rate, Hertz(hz));
+/// assert_eq!(rate, Hertz::from_khz(hz / 1_000));
+/// assert_eq!(rate, Hertz::from_mhz(hz / 1_000_000));
+/// assert_eq!(rate, Hertz::from_ghz(hz / 1_000_000_000));
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct Hertz(pub c_ulong);
+
+impl Hertz {
+ /// Create a new instance from kilohertz (kHz)
+ pub fn from_khz(khz: c_ulong) -> Self {
+ Self(khz * 1_000)
+ }
+
+ /// Create a new instance from megahertz (MHz)
+ pub fn from_mhz(mhz: c_ulong) -> Self {
+ Self(mhz * 1_000_000)
+ }
+
+ /// Create a new instance from gigahertz (GHz)
+ pub fn from_ghz(ghz: c_ulong) -> Self {
+ Self(ghz * 1_000_000_000)
+ }
+
+ /// Get the frequency in hertz
+ pub fn as_hz(&self) -> c_ulong {
+ self.0
+ }
+
+ /// Get the frequency in kilohertz
+ pub fn as_khz(&self) -> c_ulong {
+ self.0 / 1_000
+ }
+
+ /// Get the frequency in megahertz
+ pub fn as_mhz(&self) -> c_ulong {
+ self.0 / 1_000_000
+ }
+
+ /// Get the frequency in gigahertz
+ pub fn as_ghz(&self) -> c_ulong {
+ self.0 / 1_000_000_000
+ }
+}
+
+impl From<Hertz> for c_ulong {
+ fn from(freq: Hertz) -> Self {
+ freq.0
+ }
+}
+
+#[cfg(CONFIG_COMMON_CLK)]
+mod common_clk {
+ use super::Hertz;
+ use crate::{
+ device::Device,
+ error::{from_err_ptr, to_result, Result},
+ prelude::*,
+ };
+
+ use core::{ops::Deref, ptr};
+
+ /// A reference-counted clock.
+ ///
+ /// Rust abstraction for the C [`struct clk`].
+ ///
+ /// # Invariants
+ ///
+ /// A [`Clk`] instance holds either a pointer to a valid [`struct clk`] created by the C
+ /// portion of the kernel or a NULL pointer.
+ ///
+ /// Instances of this type are reference-counted. Calling [`Clk::get`] ensures that the
+ /// allocation remains valid for the lifetime of the [`Clk`].
+ ///
+ /// ## Examples
+ ///
+ /// The following example demonstrates how to obtain and configure a clock for a device.
+ ///
+ /// ```
+ /// use kernel::c_str;
+ /// use kernel::clk::{Clk, Hertz};
+ /// use kernel::device::Device;
+ /// use kernel::error::Result;
+ ///
+ /// fn configure_clk(dev: &Device) -> Result {
+ /// let clk = Clk::get(dev, Some(c_str!("apb_clk")))?;
+ ///
+ /// clk.prepare_enable()?;
+ ///
+ /// let expected_rate = Hertz::from_ghz(1);
+ ///
+ /// if clk.rate() != expected_rate {
+ /// clk.set_rate(expected_rate)?;
+ /// }
+ ///
+ /// clk.disable_unprepare();
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`struct clk`]: https://docs.kernel.org/driver-api/clk.html
+ #[repr(transparent)]
+ pub struct Clk(*mut bindings::clk);
+
+ impl Clk {
+ /// Gets [`Clk`] corresponding to a [`Device`] and a connection id.
+ ///
+ /// Equivalent to the kernel's [`clk_get`] API.
+ ///
+ /// [`clk_get`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_get
+ pub fn get(dev: &Device, name: Option<&CStr>) -> Result<Self> {
+ let con_id = if let Some(name) = name {
+ name.as_ptr()
+ } else {
+ ptr::null()
+ };
+
+ // SAFETY: It is safe to call [`clk_get`] for a valid device pointer.
+ //
+ // INVARIANT: The reference-count is decremented when [`Clk`] goes out of scope.
+ Ok(Self(from_err_ptr(unsafe {
+ bindings::clk_get(dev.as_raw(), con_id)
+ })?))
+ }
+
+ /// Obtain the raw [`struct clk`] pointer.
+ #[inline]
+ pub fn as_raw(&self) -> *mut bindings::clk {
+ self.0
+ }
+
+ /// Enable the clock.
+ ///
+ /// Equivalent to the kernel's [`clk_enable`] API.
+ ///
+ /// [`clk_enable`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_enable
+ #[inline]
+ pub fn enable(&self) -> Result {
+ // SAFETY: By the type invariants, self.as_raw() is a valid argument for
+ // [`clk_enable`].
+ to_result(unsafe { bindings::clk_enable(self.as_raw()) })
+ }
+
+ /// Disable the clock.
+ ///
+ /// Equivalent to the kernel's [`clk_disable`] API.
+ ///
+ /// [`clk_disable`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_disable
+ #[inline]
+ pub fn disable(&self) {
+ // SAFETY: By the type invariants, self.as_raw() is a valid argument for
+ // [`clk_disable`].
+ unsafe { bindings::clk_disable(self.as_raw()) };
+ }
+
+ /// Prepare the clock.
+ ///
+ /// Equivalent to the kernel's [`clk_prepare`] API.
+ ///
+ /// [`clk_prepare`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_prepare
+ #[inline]
+ pub fn prepare(&self) -> Result {
+ // SAFETY: By the type invariants, self.as_raw() is a valid argument for
+ // [`clk_prepare`].
+ to_result(unsafe { bindings::clk_prepare(self.as_raw()) })
+ }
+
+ /// Unprepare the clock.
+ ///
+ /// Equivalent to the kernel's [`clk_unprepare`] API.
+ ///
+ /// [`clk_unprepare`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_unprepare
+ #[inline]
+ pub fn unprepare(&self) {
+ // SAFETY: By the type invariants, self.as_raw() is a valid argument for
+ // [`clk_unprepare`].
+ unsafe { bindings::clk_unprepare(self.as_raw()) };
+ }
+
+ /// Prepare and enable the clock.
+ ///
+ /// Equivalent to calling [`Clk::prepare`] followed by [`Clk::enable`].
+ #[inline]
+ pub fn prepare_enable(&self) -> Result {
+ // SAFETY: By the type invariants, self.as_raw() is a valid argument for
+ // [`clk_prepare_enable`].
+ to_result(unsafe { bindings::clk_prepare_enable(self.as_raw()) })
+ }
+
+ /// Disable and unprepare the clock.
+ ///
+ /// Equivalent to calling [`Clk::disable`] followed by [`Clk::unprepare`].
+ #[inline]
+ pub fn disable_unprepare(&self) {
+ // SAFETY: By the type invariants, self.as_raw() is a valid argument for
+ // [`clk_disable_unprepare`].
+ unsafe { bindings::clk_disable_unprepare(self.as_raw()) };
+ }
+
+ /// Get clock's rate.
+ ///
+ /// Equivalent to the kernel's [`clk_get_rate`] API.
+ ///
+ /// [`clk_get_rate`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_get_rate
+ #[inline]
+ pub fn rate(&self) -> Hertz {
+ // SAFETY: By the type invariants, self.as_raw() is a valid argument for
+ // [`clk_get_rate`].
+ Hertz(unsafe { bindings::clk_get_rate(self.as_raw()) })
+ }
+
+ /// Set clock's rate.
+ ///
+ /// Equivalent to the kernel's [`clk_set_rate`] API.
+ ///
+ /// [`clk_set_rate`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_set_rate
+ #[inline]
+ pub fn set_rate(&self, rate: Hertz) -> Result {
+ // SAFETY: By the type invariants, self.as_raw() is a valid argument for
+ // [`clk_set_rate`].
+ to_result(unsafe { bindings::clk_set_rate(self.as_raw(), rate.as_hz()) })
+ }
+ }
+
+ impl Drop for Clk {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, self.as_raw() is a valid argument for [`clk_put`].
+ unsafe { bindings::clk_put(self.as_raw()) };
+ }
+ }
+
+ /// A reference-counted optional clock.
+ ///
+ /// A lightweight wrapper around an optional [`Clk`]. An [`OptionalClk`] represents a [`Clk`]
+ /// that a driver can function without but may improve performance or enable additional
+ /// features when available.
+ ///
+ /// # Invariants
+ ///
+ /// An [`OptionalClk`] instance encapsulates a [`Clk`] with either a valid [`struct clk`] or
+ /// `NULL` pointer.
+ ///
+ /// Instances of this type are reference-counted. Calling [`OptionalClk::get`] ensures that the
+ /// allocation remains valid for the lifetime of the [`OptionalClk`].
+ ///
+ /// ## Examples
+ ///
+ /// The following example demonstrates how to obtain and configure an optional clock for a
+ /// device. The code functions correctly whether or not the clock is available.
+ ///
+ /// ```
+ /// use kernel::c_str;
+ /// use kernel::clk::{OptionalClk, Hertz};
+ /// use kernel::device::Device;
+ /// use kernel::error::Result;
+ ///
+ /// fn configure_clk(dev: &Device) -> Result {
+ /// let clk = OptionalClk::get(dev, Some(c_str!("apb_clk")))?;
+ ///
+ /// clk.prepare_enable()?;
+ ///
+ /// let expected_rate = Hertz::from_ghz(1);
+ ///
+ /// if clk.rate() != expected_rate {
+ /// clk.set_rate(expected_rate)?;
+ /// }
+ ///
+ /// clk.disable_unprepare();
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`struct clk`]: https://docs.kernel.org/driver-api/clk.html
+ pub struct OptionalClk(Clk);
+
+ impl OptionalClk {
+ /// Gets [`OptionalClk`] corresponding to a [`Device`] and a connection id.
+ ///
+ /// Equivalent to the kernel's [`clk_get_optional`] API.
+ ///
+ /// [`clk_get_optional`]:
+ /// https://docs.kernel.org/core-api/kernel-api.html#c.clk_get_optional
+ pub fn get(dev: &Device, name: Option<&CStr>) -> Result<Self> {
+ let con_id = if let Some(name) = name {
+ name.as_ptr()
+ } else {
+ ptr::null()
+ };
+
+ // SAFETY: It is safe to call [`clk_get_optional`] for a valid device pointer.
+ //
+ // INVARIANT: The reference-count is decremented when [`OptionalClk`] goes out of
+ // scope.
+ Ok(Self(Clk(from_err_ptr(unsafe {
+ bindings::clk_get_optional(dev.as_raw(), con_id)
+ })?)))
+ }
+ }
+
+ // Make [`OptionalClk`] behave like [`Clk`].
+ impl Deref for OptionalClk {
+ type Target = Clk;
+
+ fn deref(&self) -> &Clk {
+ &self.0
+ }
+ }
+}
+
+#[cfg(CONFIG_COMMON_CLK)]
+pub use common_clk::*;
diff --git a/rust/kernel/configfs.rs b/rust/kernel/configfs.rs
new file mode 100644
index 000000000000..b93ac7b0bebc
--- /dev/null
+++ b/rust/kernel/configfs.rs
@@ -0,0 +1,1049 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! configfs interface: Userspace-driven Kernel Object Configuration
+//!
+//! configfs is an in-memory pseudo file system for configuration of kernel
+//! modules. Please see the [C documentation] for details and intended use of
+//! configfs.
+//!
+//! This module does not support the following configfs features:
+//!
+//! - Items. All group children are groups.
+//! - Symlink support.
+//! - `disconnect_notify` hook.
+//! - Default groups.
+//!
+//! See the [`rust_configfs.rs`] sample for a full example use of this module.
+//!
+//! C header: [`include/linux/configfs.h`](srctree/include/linux/configfs.h)
+//!
+//! # Example
+//!
+//! ```ignore
+//! use kernel::alloc::flags;
+//! use kernel::c_str;
+//! use kernel::configfs_attrs;
+//! use kernel::configfs;
+//! use kernel::new_mutex;
+//! use kernel::page::PAGE_SIZE;
+//! use kernel::sync::Mutex;
+//! use kernel::ThisModule;
+//!
+//! #[pin_data]
+//! struct RustConfigfs {
+//! #[pin]
+//! config: configfs::Subsystem<Configuration>,
+//! }
+//!
+//! impl kernel::InPlaceModule for RustConfigfs {
+//! fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
+//! pr_info!("Rust configfs sample (init)\n");
+//!
+//! let item_type = configfs_attrs! {
+//! container: configfs::Subsystem<Configuration>,
+//! data: Configuration,
+//! attributes: [
+//! message: 0,
+//! bar: 1,
+//! ],
+//! };
+//!
+//! try_pin_init!(Self {
+//! config <- configfs::Subsystem::new(
+//! c_str!("rust_configfs"), item_type, Configuration::new()
+//! ),
+//! })
+//! }
+//! }
+//!
+//! #[pin_data]
+//! struct Configuration {
+//! message: &'static CStr,
+//! #[pin]
+//! bar: Mutex<(KBox<[u8; PAGE_SIZE]>, usize)>,
+//! }
+//!
+//! impl Configuration {
+//! fn new() -> impl PinInit<Self, Error> {
+//! try_pin_init!(Self {
+//! message: c_str!("Hello World\n"),
+//! bar <- new_mutex!((KBox::new([0; PAGE_SIZE], flags::GFP_KERNEL)?, 0)),
+//! })
+//! }
+//! }
+//!
+//! #[vtable]
+//! impl configfs::AttributeOperations<0> for Configuration {
+//! type Data = Configuration;
+//!
+//! fn show(container: &Configuration, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+//! pr_info!("Show message\n");
+//! let data = container.message;
+//! page[0..data.len()].copy_from_slice(data);
+//! Ok(data.len())
+//! }
+//! }
+//!
+//! #[vtable]
+//! impl configfs::AttributeOperations<1> for Configuration {
+//! type Data = Configuration;
+//!
+//! fn show(container: &Configuration, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+//! pr_info!("Show bar\n");
+//! let guard = container.bar.lock();
+//! let data = guard.0.as_slice();
+//! let len = guard.1;
+//! page[0..len].copy_from_slice(&data[0..len]);
+//! Ok(len)
+//! }
+//!
+//! fn store(container: &Configuration, page: &[u8]) -> Result {
+//! pr_info!("Store bar\n");
+//! let mut guard = container.bar.lock();
+//! guard.0[0..page.len()].copy_from_slice(page);
+//! guard.1 = page.len();
+//! Ok(())
+//! }
+//! }
+//! ```
+//!
+//! [C documentation]: srctree/Documentation/filesystems/configfs.rst
+//! [`rust_configfs.rs`]: srctree/samples/rust/rust_configfs.rs
+
+use crate::alloc::flags;
+use crate::container_of;
+use crate::page::PAGE_SIZE;
+use crate::prelude::*;
+use crate::str::CString;
+use crate::sync::Arc;
+use crate::sync::ArcBorrow;
+use crate::types::Opaque;
+use core::cell::UnsafeCell;
+use core::marker::PhantomData;
+
+/// A configfs subsystem.
+///
+/// This is the top level entrypoint for a configfs hierarchy. To register
+/// with configfs, embed a field of this type into your kernel module struct.
+#[pin_data(PinnedDrop)]
+pub struct Subsystem<Data> {
+ #[pin]
+ subsystem: Opaque<bindings::configfs_subsystem>,
+ #[pin]
+ data: Data,
+}
+
+// SAFETY: We do not provide any operations on `Subsystem`.
+unsafe impl<Data> Sync for Subsystem<Data> {}
+
+// SAFETY: Ownership of `Subsystem` can safely be transferred to other threads.
+unsafe impl<Data> Send for Subsystem<Data> {}
+
+impl<Data> Subsystem<Data> {
+ /// Create an initializer for a [`Subsystem`].
+ ///
+ /// The subsystem will appear in configfs as a directory name given by
+ /// `name`. The attributes available in directory are specified by
+ /// `item_type`.
+ pub fn new(
+ name: &'static CStr,
+ item_type: &'static ItemType<Subsystem<Data>, Data>,
+ data: impl PinInit<Data, Error>,
+ ) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ subsystem <- pin_init::zeroed().chain(
+ |place: &mut Opaque<bindings::configfs_subsystem>| {
+ // SAFETY: We initialized the required fields of `place.group` above.
+ unsafe {
+ bindings::config_group_init_type_name(
+ &mut (*place.get()).su_group,
+ name.as_ptr(),
+ item_type.as_ptr(),
+ )
+ };
+
+ // SAFETY: `place.su_mutex` is valid for use as a mutex.
+ unsafe {
+ bindings::__mutex_init(
+ &mut (*place.get()).su_mutex,
+ kernel::optional_name!().as_char_ptr(),
+ kernel::static_lock_class!().as_ptr(),
+ )
+ }
+ Ok(())
+ }
+ ),
+ data <- data,
+ })
+ .pin_chain(|this| {
+ crate::error::to_result(
+ // SAFETY: We initialized `this.subsystem` according to C API contract above.
+ unsafe { bindings::configfs_register_subsystem(this.subsystem.get()) },
+ )
+ })
+ }
+}
+
+#[pinned_drop]
+impl<Data> PinnedDrop for Subsystem<Data> {
+ fn drop(self: Pin<&mut Self>) {
+ // SAFETY: We registered `self.subsystem` in the initializer returned by `Self::new`.
+ unsafe { bindings::configfs_unregister_subsystem(self.subsystem.get()) };
+ // SAFETY: We initialized the mutex in `Subsystem::new`.
+ unsafe { bindings::mutex_destroy(&raw mut (*self.subsystem.get()).su_mutex) };
+ }
+}
+
+/// Trait that allows offset calculations for structs that embed a
+/// `bindings::config_group`.
+///
+/// Users of the configfs API should not need to implement this trait.
+///
+/// # Safety
+///
+/// - Implementers of this trait must embed a `bindings::config_group`.
+/// - Methods must be implemented according to method documentation.
+pub unsafe trait HasGroup<Data> {
+ /// Return the address of the `bindings::config_group` embedded in [`Self`].
+ ///
+ /// # Safety
+ ///
+ /// - `this` must be a valid allocation of at least the size of [`Self`].
+ unsafe fn group(this: *const Self) -> *const bindings::config_group;
+
+ /// Return the address of the [`Self`] that `group` is embedded in.
+ ///
+ /// # Safety
+ ///
+ /// - `group` must point to the `bindings::config_group` that is embedded in
+ /// [`Self`].
+ unsafe fn container_of(group: *const bindings::config_group) -> *const Self;
+}
+
+// SAFETY: `Subsystem<Data>` embeds a field of type `bindings::config_group`
+// within the `subsystem` field.
+unsafe impl<Data> HasGroup<Data> for Subsystem<Data> {
+ unsafe fn group(this: *const Self) -> *const bindings::config_group {
+ // SAFETY: By impl and function safety requirement this projection is in bounds.
+ unsafe { &raw const (*(*this).subsystem.get()).su_group }
+ }
+
+ unsafe fn container_of(group: *const bindings::config_group) -> *const Self {
+ // SAFETY: By impl and function safety requirement this projection is in bounds.
+ let c_subsys_ptr = unsafe { container_of!(group, bindings::configfs_subsystem, su_group) };
+ let opaque_ptr = c_subsys_ptr.cast::<Opaque<bindings::configfs_subsystem>>();
+ // SAFETY: By impl and function safety requirement, `opaque_ptr` and the
+ // pointer it returns, are within the same allocation.
+ unsafe { container_of!(opaque_ptr, Subsystem<Data>, subsystem) }
+ }
+}
+
+/// A configfs group.
+///
+/// To add a subgroup to configfs, pass this type as `ctype` to
+/// [`crate::configfs_attrs`] when creating a group in [`GroupOperations::make_group`].
+#[pin_data]
+pub struct Group<Data> {
+ #[pin]
+ group: Opaque<bindings::config_group>,
+ #[pin]
+ data: Data,
+}
+
+impl<Data> Group<Data> {
+ /// Create an initializer for a new group.
+ ///
+ /// When instantiated, the group will appear as a directory with the name
+ /// given by `name` and it will contain attributes specified by `item_type`.
+ pub fn new(
+ name: CString,
+ item_type: &'static ItemType<Group<Data>, Data>,
+ data: impl PinInit<Data, Error>,
+ ) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ group <- pin_init::zeroed().chain(|v: &mut Opaque<bindings::config_group>| {
+ let place = v.get();
+ let name = name.as_bytes_with_nul().as_ptr();
+ // SAFETY: It is safe to initialize a group once it has been zeroed.
+ unsafe {
+ bindings::config_group_init_type_name(place, name.cast(), item_type.as_ptr())
+ };
+ Ok(())
+ }),
+ data <- data,
+ })
+ }
+}
+
+// SAFETY: `Group<Data>` embeds a field of type `bindings::config_group`
+// within the `group` field.
+unsafe impl<Data> HasGroup<Data> for Group<Data> {
+ unsafe fn group(this: *const Self) -> *const bindings::config_group {
+ Opaque::raw_get(
+ // SAFETY: By impl and function safety requirements this field
+ // projection is within bounds of the allocation.
+ unsafe { &raw const (*this).group },
+ )
+ }
+
+ unsafe fn container_of(group: *const bindings::config_group) -> *const Self {
+ let opaque_ptr = group.cast::<Opaque<bindings::config_group>>();
+ // SAFETY: By impl and function safety requirement, `opaque_ptr` and
+ // pointer it returns will be in the same allocation.
+ unsafe { container_of!(opaque_ptr, Self, group) }
+ }
+}
+
+/// # Safety
+///
+/// `this` must be a valid pointer.
+///
+/// If `this` does not represent the root group of a configfs subsystem,
+/// `this` must be a pointer to a `bindings::config_group` embedded in a
+/// `Group<Parent>`.
+///
+/// Otherwise, `this` must be a pointer to a `bindings::config_group` that
+/// is embedded in a `bindings::configfs_subsystem` that is embedded in a
+/// `Subsystem<Parent>`.
+unsafe fn get_group_data<'a, Parent>(this: *mut bindings::config_group) -> &'a Parent {
+ // SAFETY: `this` is a valid pointer.
+ let is_root = unsafe { (*this).cg_subsys.is_null() };
+
+ if !is_root {
+ // SAFETY: By C API contact,`this` was returned from a call to
+ // `make_group`. The pointer is known to be embedded within a
+ // `Group<Parent>`.
+ unsafe { &(*Group::<Parent>::container_of(this)).data }
+ } else {
+ // SAFETY: By C API contract, `this` is a pointer to the
+ // `bindings::config_group` field within a `Subsystem<Parent>`.
+ unsafe { &(*Subsystem::container_of(this)).data }
+ }
+}
+
+struct GroupOperationsVTable<Parent, Child>(PhantomData<(Parent, Child)>);
+
+impl<Parent, Child> GroupOperationsVTable<Parent, Child>
+where
+ Parent: GroupOperations<Child = Child>,
+ Child: 'static,
+{
+ /// # Safety
+ ///
+ /// `this` must be a valid pointer.
+ ///
+ /// If `this` does not represent the root group of a configfs subsystem,
+ /// `this` must be a pointer to a `bindings::config_group` embedded in a
+ /// `Group<Parent>`.
+ ///
+ /// Otherwise, `this` must be a pointer to a `bindings::config_group` that
+ /// is embedded in a `bindings::configfs_subsystem` that is embedded in a
+ /// `Subsystem<Parent>`.
+ ///
+ /// `name` must point to a null terminated string.
+ unsafe extern "C" fn make_group(
+ this: *mut bindings::config_group,
+ name: *const kernel::ffi::c_char,
+ ) -> *mut bindings::config_group {
+ // SAFETY: By function safety requirements of this function, this call
+ // is safe.
+ let parent_data = unsafe { get_group_data(this) };
+
+ let group_init = match Parent::make_group(
+ parent_data,
+ // SAFETY: By function safety requirements, name points to a null
+ // terminated string.
+ unsafe { CStr::from_char_ptr(name) },
+ ) {
+ Ok(init) => init,
+ Err(e) => return e.to_ptr(),
+ };
+
+ let child_group = <Arc<Group<Child>> as InPlaceInit<Group<Child>>>::try_pin_init(
+ group_init,
+ flags::GFP_KERNEL,
+ );
+
+ match child_group {
+ Ok(child_group) => {
+ let child_group_ptr = child_group.into_raw();
+ // SAFETY: We allocated the pointee of `child_ptr` above as a
+ // `Group<Child>`.
+ unsafe { Group::<Child>::group(child_group_ptr) }.cast_mut()
+ }
+ Err(e) => e.to_ptr(),
+ }
+ }
+
+ /// # Safety
+ ///
+ /// If `this` does not represent the root group of a configfs subsystem,
+ /// `this` must be a pointer to a `bindings::config_group` embedded in a
+ /// `Group<Parent>`.
+ ///
+ /// Otherwise, `this` must be a pointer to a `bindings::config_group` that
+ /// is embedded in a `bindings::configfs_subsystem` that is embedded in a
+ /// `Subsystem<Parent>`.
+ ///
+ /// `item` must point to a `bindings::config_item` within a
+ /// `bindings::config_group` within a `Group<Child>`.
+ unsafe extern "C" fn drop_item(
+ this: *mut bindings::config_group,
+ item: *mut bindings::config_item,
+ ) {
+ // SAFETY: By function safety requirements of this function, this call
+ // is safe.
+ let parent_data = unsafe { get_group_data(this) };
+
+ // SAFETY: By function safety requirements, `item` is embedded in a
+ // `config_group`.
+ let c_child_group_ptr = unsafe { container_of!(item, bindings::config_group, cg_item) };
+ // SAFETY: By function safety requirements, `c_child_group_ptr` is
+ // embedded within a `Group<Child>`.
+ let r_child_group_ptr = unsafe { Group::<Child>::container_of(c_child_group_ptr) };
+
+ if Parent::HAS_DROP_ITEM {
+ // SAFETY: We called `into_raw` to produce `r_child_group_ptr` in
+ // `make_group`.
+ let arc: Arc<Group<Child>> = unsafe { Arc::from_raw(r_child_group_ptr.cast_mut()) };
+
+ Parent::drop_item(parent_data, arc.as_arc_borrow());
+ arc.into_raw();
+ }
+
+ // SAFETY: By C API contract, we are required to drop a refcount on
+ // `item`.
+ unsafe { bindings::config_item_put(item) };
+ }
+
+ const VTABLE: bindings::configfs_group_operations = bindings::configfs_group_operations {
+ make_item: None,
+ make_group: Some(Self::make_group),
+ disconnect_notify: None,
+ drop_item: Some(Self::drop_item),
+ is_visible: None,
+ is_bin_visible: None,
+ };
+
+ const fn vtable_ptr() -> *const bindings::configfs_group_operations {
+ &Self::VTABLE as *const bindings::configfs_group_operations
+ }
+}
+
+struct ItemOperationsVTable<Container, Data>(PhantomData<(Container, Data)>);
+
+impl<Data> ItemOperationsVTable<Group<Data>, Data>
+where
+ Data: 'static,
+{
+ /// # Safety
+ ///
+ /// `this` must be a pointer to a `bindings::config_group` embedded in a
+ /// `Group<Parent>`.
+ ///
+ /// This function will destroy the pointee of `this`. The pointee of `this`
+ /// must not be accessed after the function returns.
+ unsafe extern "C" fn release(this: *mut bindings::config_item) {
+ // SAFETY: By function safety requirements, `this` is embedded in a
+ // `config_group`.
+ let c_group_ptr = unsafe { kernel::container_of!(this, bindings::config_group, cg_item) };
+ // SAFETY: By function safety requirements, `c_group_ptr` is
+ // embedded within a `Group<Data>`.
+ let r_group_ptr = unsafe { Group::<Data>::container_of(c_group_ptr) };
+
+ // SAFETY: We called `into_raw` on `r_group_ptr` in
+ // `make_group`.
+ let pin_self: Arc<Group<Data>> = unsafe { Arc::from_raw(r_group_ptr.cast_mut()) };
+ drop(pin_self);
+ }
+
+ const VTABLE: bindings::configfs_item_operations = bindings::configfs_item_operations {
+ release: Some(Self::release),
+ allow_link: None,
+ drop_link: None,
+ };
+
+ const fn vtable_ptr() -> *const bindings::configfs_item_operations {
+ &Self::VTABLE as *const bindings::configfs_item_operations
+ }
+}
+
+impl<Data> ItemOperationsVTable<Subsystem<Data>, Data> {
+ const VTABLE: bindings::configfs_item_operations = bindings::configfs_item_operations {
+ release: None,
+ allow_link: None,
+ drop_link: None,
+ };
+
+ const fn vtable_ptr() -> *const bindings::configfs_item_operations {
+ &Self::VTABLE as *const bindings::configfs_item_operations
+ }
+}
+
+/// Operations implemented by configfs groups that can create subgroups.
+///
+/// Implement this trait on structs that embed a [`Subsystem`] or a [`Group`].
+#[vtable]
+pub trait GroupOperations {
+ /// The child data object type.
+ ///
+ /// This group will create subgroups (subdirectories) backed by this kind of
+ /// object.
+ type Child: 'static;
+
+ /// Creates a new subgroup.
+ ///
+ /// The kernel will call this method in response to `mkdir(2)` in the
+ /// directory representing `this`.
+ ///
+ /// To accept the request to create a group, implementations should
+ /// return an initializer of a `Group<Self::Child>`. To prevent creation,
+ /// return a suitable error.
+ fn make_group(&self, name: &CStr) -> Result<impl PinInit<Group<Self::Child>, Error>>;
+
+ /// Prepares the group for removal from configfs.
+ ///
+ /// The kernel will call this method before the directory representing `_child` is removed from
+ /// configfs.
+ ///
+ /// Implementations can use this method to do house keeping before configfs drops its
+ /// reference to `Child`.
+ ///
+ /// NOTE: "drop" in the name of this function is not related to the Rust drop term. Rather, the
+ /// name is inherited from the callback name in the underlying C code.
+ fn drop_item(&self, _child: ArcBorrow<'_, Group<Self::Child>>) {
+ kernel::build_error!(kernel::error::VTABLE_DEFAULT_ERROR)
+ }
+}
+
+/// A configfs attribute.
+///
+/// An attribute appears as a file in configfs, inside a folder that represent
+/// the group that the attribute belongs to.
+#[repr(transparent)]
+pub struct Attribute<const ID: u64, O, Data> {
+ attribute: Opaque<bindings::configfs_attribute>,
+ _p: PhantomData<(O, Data)>,
+}
+
+// SAFETY: We do not provide any operations on `Attribute`.
+unsafe impl<const ID: u64, O, Data> Sync for Attribute<ID, O, Data> {}
+
+// SAFETY: Ownership of `Attribute` can safely be transferred to other threads.
+unsafe impl<const ID: u64, O, Data> Send for Attribute<ID, O, Data> {}
+
+impl<const ID: u64, O, Data> Attribute<ID, O, Data>
+where
+ O: AttributeOperations<ID, Data = Data>,
+{
+ /// # Safety
+ ///
+ /// `item` must be embedded in a `bindings::config_group`.
+ ///
+ /// If `item` does not represent the root group of a configfs subsystem,
+ /// the group must be embedded in a `Group<Data>`.
+ ///
+ /// Otherwise, the group must be a embedded in a
+ /// `bindings::configfs_subsystem` that is embedded in a `Subsystem<Data>`.
+ ///
+ /// `page` must point to a writable buffer of size at least [`PAGE_SIZE`].
+ unsafe extern "C" fn show(
+ item: *mut bindings::config_item,
+ page: *mut kernel::ffi::c_char,
+ ) -> isize {
+ let c_group: *mut bindings::config_group =
+ // SAFETY: By function safety requirements, `item` is embedded in a
+ // `config_group`.
+ unsafe { container_of!(item, bindings::config_group, cg_item) }.cast_mut();
+
+ // SAFETY: The function safety requirements for this function satisfy
+ // the conditions for this call.
+ let data: &Data = unsafe { get_group_data(c_group) };
+
+ // SAFETY: By function safety requirements, `page` is writable for `PAGE_SIZE`.
+ let ret = O::show(data, unsafe { &mut *(page as *mut [u8; PAGE_SIZE]) });
+
+ match ret {
+ Ok(size) => size as isize,
+ Err(err) => err.to_errno() as isize,
+ }
+ }
+
+ /// # Safety
+ ///
+ /// `item` must be embedded in a `bindings::config_group`.
+ ///
+ /// If `item` does not represent the root group of a configfs subsystem,
+ /// the group must be embedded in a `Group<Data>`.
+ ///
+ /// Otherwise, the group must be a embedded in a
+ /// `bindings::configfs_subsystem` that is embedded in a `Subsystem<Data>`.
+ ///
+ /// `page` must point to a readable buffer of size at least `size`.
+ unsafe extern "C" fn store(
+ item: *mut bindings::config_item,
+ page: *const kernel::ffi::c_char,
+ size: usize,
+ ) -> isize {
+ let c_group: *mut bindings::config_group =
+ // SAFETY: By function safety requirements, `item` is embedded in a
+ // `config_group`.
+ unsafe { container_of!(item, bindings::config_group, cg_item) }.cast_mut();
+
+ // SAFETY: The function safety requirements for this function satisfy
+ // the conditions for this call.
+ let data: &Data = unsafe { get_group_data(c_group) };
+
+ let ret = O::store(
+ data,
+ // SAFETY: By function safety requirements, `page` is readable
+ // for at least `size`.
+ unsafe { core::slice::from_raw_parts(page.cast(), size) },
+ );
+
+ match ret {
+ Ok(()) => size as isize,
+ Err(err) => err.to_errno() as isize,
+ }
+ }
+
+ /// Create a new attribute.
+ ///
+ /// The attribute will appear as a file with name given by `name`.
+ pub const fn new(name: &'static CStr) -> Self {
+ Self {
+ attribute: Opaque::new(bindings::configfs_attribute {
+ ca_name: name.as_char_ptr(),
+ ca_owner: core::ptr::null_mut(),
+ ca_mode: 0o660,
+ show: Some(Self::show),
+ store: if O::HAS_STORE {
+ Some(Self::store)
+ } else {
+ None
+ },
+ }),
+ _p: PhantomData,
+ }
+ }
+}
+
+/// Operations supported by an attribute.
+///
+/// Implement this trait on type and pass that type as generic parameter when
+/// creating an [`Attribute`]. The type carrying the implementation serve no
+/// purpose other than specifying the attribute operations.
+///
+/// This trait must be implemented on the `Data` type of for types that
+/// implement `HasGroup<Data>`. The trait must be implemented once for each
+/// attribute of the group. The constant type parameter `ID` maps the
+/// implementation to a specific `Attribute`. `ID` must be passed when declaring
+/// attributes via the [`kernel::configfs_attrs`] macro, to tie
+/// `AttributeOperations` implementations to concrete named attributes.
+#[vtable]
+pub trait AttributeOperations<const ID: u64 = 0> {
+ /// The type of the object that contains the field that is backing the
+ /// attribute for this operation.
+ type Data;
+
+ /// Renders the value of an attribute.
+ ///
+ /// This function is called by the kernel to read the value of an attribute.
+ ///
+ /// Implementations should write the rendering of the attribute to `page`
+ /// and return the number of bytes written.
+ fn show(data: &Self::Data, page: &mut [u8; PAGE_SIZE]) -> Result<usize>;
+
+ /// Stores the value of an attribute.
+ ///
+ /// This function is called by the kernel to update the value of an attribute.
+ ///
+ /// Implementations should parse the value from `page` and update internal
+ /// state to reflect the parsed value.
+ fn store(_data: &Self::Data, _page: &[u8]) -> Result {
+ kernel::build_error!(kernel::error::VTABLE_DEFAULT_ERROR)
+ }
+}
+
+/// A list of attributes.
+///
+/// This type is used to construct a new [`ItemType`]. It represents a list of
+/// [`Attribute`] that will appear in the directory representing a [`Group`].
+/// Users should not directly instantiate this type, rather they should use the
+/// [`kernel::configfs_attrs`] macro to declare a static set of attributes for a
+/// group.
+///
+/// # Note
+///
+/// Instances of this type are constructed statically at compile by the
+/// [`kernel::configfs_attrs`] macro.
+#[repr(transparent)]
+pub struct AttributeList<const N: usize, Data>(
+ /// Null terminated Array of pointers to [`Attribute`]. The type is [`c_void`]
+ /// to conform to the C API.
+ UnsafeCell<[*mut kernel::ffi::c_void; N]>,
+ PhantomData<Data>,
+);
+
+// SAFETY: Ownership of `AttributeList` can safely be transferred to other threads.
+unsafe impl<const N: usize, Data> Send for AttributeList<N, Data> {}
+
+// SAFETY: We do not provide any operations on `AttributeList` that need synchronization.
+unsafe impl<const N: usize, Data> Sync for AttributeList<N, Data> {}
+
+impl<const N: usize, Data> AttributeList<N, Data> {
+ /// # Safety
+ ///
+ /// This function must only be called by the [`kernel::configfs_attrs`]
+ /// macro.
+ #[doc(hidden)]
+ pub const unsafe fn new() -> Self {
+ Self(UnsafeCell::new([core::ptr::null_mut(); N]), PhantomData)
+ }
+
+ /// # Safety
+ ///
+ /// The caller must ensure that there are no other concurrent accesses to
+ /// `self`. That is, the caller has exclusive access to `self.`
+ #[doc(hidden)]
+ pub const unsafe fn add<const I: usize, const ID: u64, O>(
+ &'static self,
+ attribute: &'static Attribute<ID, O, Data>,
+ ) where
+ O: AttributeOperations<ID, Data = Data>,
+ {
+ // We need a space at the end of our list for a null terminator.
+ const { assert!(I < N - 1, "Invalid attribute index") };
+
+ // SAFETY: By function safety requirements, we have exclusive access to
+ // `self` and the reference created below will be exclusive.
+ unsafe {
+ (&mut *self.0.get())[I] = (attribute as *const Attribute<ID, O, Data>)
+ .cast_mut()
+ .cast()
+ };
+ }
+}
+
+/// A representation of the attributes that will appear in a [`Group`] or
+/// [`Subsystem`].
+///
+/// Users should not directly instantiate objects of this type. Rather, they
+/// should use the [`kernel::configfs_attrs`] macro to statically declare the
+/// shape of a [`Group`] or [`Subsystem`].
+#[pin_data]
+pub struct ItemType<Container, Data> {
+ #[pin]
+ item_type: Opaque<bindings::config_item_type>,
+ _p: PhantomData<(Container, Data)>,
+}
+
+// SAFETY: We do not provide any operations on `ItemType` that need synchronization.
+unsafe impl<Container, Data> Sync for ItemType<Container, Data> {}
+
+// SAFETY: Ownership of `ItemType` can safely be transferred to other threads.
+unsafe impl<Container, Data> Send for ItemType<Container, Data> {}
+
+macro_rules! impl_item_type {
+ ($tpe:ty) => {
+ impl<Data> ItemType<$tpe, Data> {
+ #[doc(hidden)]
+ pub const fn new_with_child_ctor<const N: usize, Child>(
+ owner: &'static ThisModule,
+ attributes: &'static AttributeList<N, Data>,
+ ) -> Self
+ where
+ Data: GroupOperations<Child = Child>,
+ Child: 'static,
+ {
+ Self {
+ item_type: Opaque::new(bindings::config_item_type {
+ ct_owner: owner.as_ptr(),
+ ct_group_ops: GroupOperationsVTable::<Data, Child>::vtable_ptr().cast_mut(),
+ ct_item_ops: ItemOperationsVTable::<$tpe, Data>::vtable_ptr().cast_mut(),
+ ct_attrs: (attributes as *const AttributeList<N, Data>)
+ .cast_mut()
+ .cast(),
+ ct_bin_attrs: core::ptr::null_mut(),
+ }),
+ _p: PhantomData,
+ }
+ }
+
+ #[doc(hidden)]
+ pub const fn new<const N: usize>(
+ owner: &'static ThisModule,
+ attributes: &'static AttributeList<N, Data>,
+ ) -> Self {
+ Self {
+ item_type: Opaque::new(bindings::config_item_type {
+ ct_owner: owner.as_ptr(),
+ ct_group_ops: core::ptr::null_mut(),
+ ct_item_ops: ItemOperationsVTable::<$tpe, Data>::vtable_ptr().cast_mut(),
+ ct_attrs: (attributes as *const AttributeList<N, Data>)
+ .cast_mut()
+ .cast(),
+ ct_bin_attrs: core::ptr::null_mut(),
+ }),
+ _p: PhantomData,
+ }
+ }
+ }
+ };
+}
+
+impl_item_type!(Subsystem<Data>);
+impl_item_type!(Group<Data>);
+
+impl<Container, Data> ItemType<Container, Data> {
+ fn as_ptr(&self) -> *const bindings::config_item_type {
+ self.item_type.get()
+ }
+}
+
+/// Define a list of configfs attributes statically.
+///
+/// Invoking the macro in the following manner:
+///
+/// ```ignore
+/// let item_type = configfs_attrs! {
+/// container: configfs::Subsystem<Configuration>,
+/// data: Configuration,
+/// child: Child,
+/// attributes: [
+/// message: 0,
+/// bar: 1,
+/// ],
+/// };
+/// ```
+///
+/// Expands the following output:
+///
+/// ```ignore
+/// let item_type = {
+/// static CONFIGURATION_MESSAGE_ATTR: kernel::configfs::Attribute<
+/// 0,
+/// Configuration,
+/// Configuration,
+/// > = unsafe {
+/// kernel::configfs::Attribute::new({
+/// const S: &str = "message\u{0}";
+/// const C: &kernel::str::CStr = match kernel::str::CStr::from_bytes_with_nul(
+/// S.as_bytes()
+/// ) {
+/// Ok(v) => v,
+/// Err(_) => {
+/// core::panicking::panic_fmt(core::const_format_args!(
+/// "string contains interior NUL"
+/// ));
+/// }
+/// };
+/// C
+/// })
+/// };
+///
+/// static CONFIGURATION_BAR_ATTR: kernel::configfs::Attribute<
+/// 1,
+/// Configuration,
+/// Configuration
+/// > = unsafe {
+/// kernel::configfs::Attribute::new({
+/// const S: &str = "bar\u{0}";
+/// const C: &kernel::str::CStr = match kernel::str::CStr::from_bytes_with_nul(
+/// S.as_bytes()
+/// ) {
+/// Ok(v) => v,
+/// Err(_) => {
+/// core::panicking::panic_fmt(core::const_format_args!(
+/// "string contains interior NUL"
+/// ));
+/// }
+/// };
+/// C
+/// })
+/// };
+///
+/// const N: usize = (1usize + (1usize + 0usize)) + 1usize;
+///
+/// static CONFIGURATION_ATTRS: kernel::configfs::AttributeList<N, Configuration> =
+/// unsafe { kernel::configfs::AttributeList::new() };
+///
+/// {
+/// const N: usize = 0usize;
+/// unsafe { CONFIGURATION_ATTRS.add::<N, 0, _>(&CONFIGURATION_MESSAGE_ATTR) };
+/// }
+///
+/// {
+/// const N: usize = (1usize + 0usize);
+/// unsafe { CONFIGURATION_ATTRS.add::<N, 1, _>(&CONFIGURATION_BAR_ATTR) };
+/// }
+///
+/// static CONFIGURATION_TPE:
+/// kernel::configfs::ItemType<configfs::Subsystem<Configuration> ,Configuration>
+/// = kernel::configfs::ItemType::<
+/// configfs::Subsystem<Configuration>,
+/// Configuration
+/// >::new_with_child_ctor::<N,Child>(
+/// &THIS_MODULE,
+/// &CONFIGURATION_ATTRS
+/// );
+///
+/// &CONFIGURATION_TPE
+/// }
+/// ```
+#[macro_export]
+macro_rules! configfs_attrs {
+ (
+ container: $container:ty,
+ data: $data:ty,
+ attributes: [
+ $($name:ident: $attr:literal),* $(,)?
+ ] $(,)?
+ ) => {
+ $crate::configfs_attrs!(
+ count:
+ @container($container),
+ @data($data),
+ @child(),
+ @no_child(x),
+ @attrs($($name $attr)*),
+ @eat($($name $attr,)*),
+ @assign(),
+ @cnt(0usize),
+ )
+ };
+ (
+ container: $container:ty,
+ data: $data:ty,
+ child: $child:ty,
+ attributes: [
+ $($name:ident: $attr:literal),* $(,)?
+ ] $(,)?
+ ) => {
+ $crate::configfs_attrs!(
+ count:
+ @container($container),
+ @data($data),
+ @child($child),
+ @no_child(),
+ @attrs($($name $attr)*),
+ @eat($($name $attr,)*),
+ @assign(),
+ @cnt(0usize),
+ )
+ };
+ (count:
+ @container($container:ty),
+ @data($data:ty),
+ @child($($child:ty)?),
+ @no_child($($no_child:ident)?),
+ @attrs($($aname:ident $aattr:literal)*),
+ @eat($name:ident $attr:literal, $($rname:ident $rattr:literal,)*),
+ @assign($($assign:block)*),
+ @cnt($cnt:expr),
+ ) => {
+ $crate::configfs_attrs!(
+ count:
+ @container($container),
+ @data($data),
+ @child($($child)?),
+ @no_child($($no_child)?),
+ @attrs($($aname $aattr)*),
+ @eat($($rname $rattr,)*),
+ @assign($($assign)* {
+ const N: usize = $cnt;
+ // The following macro text expands to a call to `Attribute::add`.
+
+ // SAFETY: By design of this macro, the name of the variable we
+ // invoke the `add` method on below, is not visible outside of
+ // the macro expansion. The macro does not operate concurrently
+ // on this variable, and thus we have exclusive access to the
+ // variable.
+ unsafe {
+ $crate::macros::paste!(
+ [< $data:upper _ATTRS >]
+ .add::<N, $attr, _>(&[< $data:upper _ $name:upper _ATTR >])
+ )
+ };
+ }),
+ @cnt(1usize + $cnt),
+ )
+ };
+ (count:
+ @container($container:ty),
+ @data($data:ty),
+ @child($($child:ty)?),
+ @no_child($($no_child:ident)?),
+ @attrs($($aname:ident $aattr:literal)*),
+ @eat(),
+ @assign($($assign:block)*),
+ @cnt($cnt:expr),
+ ) =>
+ {
+ $crate::configfs_attrs!(
+ final:
+ @container($container),
+ @data($data),
+ @child($($child)?),
+ @no_child($($no_child)?),
+ @attrs($($aname $aattr)*),
+ @assign($($assign)*),
+ @cnt($cnt),
+ )
+ };
+ (final:
+ @container($container:ty),
+ @data($data:ty),
+ @child($($child:ty)?),
+ @no_child($($no_child:ident)?),
+ @attrs($($name:ident $attr:literal)*),
+ @assign($($assign:block)*),
+ @cnt($cnt:expr),
+ ) =>
+ {
+ $crate::macros::paste!{
+ {
+ $(
+ // SAFETY: We are expanding `configfs_attrs`.
+ static [< $data:upper _ $name:upper _ATTR >]:
+ $crate::configfs::Attribute<$attr, $data, $data> =
+ unsafe {
+ $crate::configfs::Attribute::new(c_str!(::core::stringify!($name)))
+ };
+ )*
+
+
+ // We need space for a null terminator.
+ const N: usize = $cnt + 1usize;
+
+ // SAFETY: We are expanding `configfs_attrs`.
+ static [< $data:upper _ATTRS >]:
+ $crate::configfs::AttributeList<N, $data> =
+ unsafe { $crate::configfs::AttributeList::new() };
+
+ $($assign)*
+
+ $(
+ const [<$no_child:upper>]: bool = true;
+
+ static [< $data:upper _TPE >] : $crate::configfs::ItemType<$container, $data> =
+ $crate::configfs::ItemType::<$container, $data>::new::<N>(
+ &THIS_MODULE, &[<$ data:upper _ATTRS >]
+ );
+ )?
+
+ $(
+ static [< $data:upper _TPE >]:
+ $crate::configfs::ItemType<$container, $data> =
+ $crate::configfs::ItemType::<$container, $data>::
+ new_with_child_ctor::<N, $child>(
+ &THIS_MODULE, &[<$ data:upper _ATTRS >]
+ );
+ )?
+
+ & [< $data:upper _TPE >]
+ }
+ }
+ };
+
+}
diff --git a/rust/kernel/cpu.rs b/rust/kernel/cpu.rs
new file mode 100644
index 000000000000..10c5c3b25873
--- /dev/null
+++ b/rust/kernel/cpu.rs
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic CPU definitions.
+//!
+//! C header: [`include/linux/cpu.h`](srctree/include/linux/cpu.h)
+
+use crate::{bindings, device::Device, error::Result, prelude::ENODEV};
+
+/// Creates a new instance of CPU's device.
+///
+/// # Safety
+///
+/// Reference counting is not implemented for the CPU device in the C code. When a CPU is
+/// hot-unplugged, the corresponding CPU device is unregistered, but its associated memory
+/// is not freed.
+///
+/// Callers must ensure that the CPU device is not used after it has been unregistered.
+/// This can be achieved, for example, by registering a CPU hotplug notifier and removing
+/// any references to the CPU device within the notifier's callback.
+pub unsafe fn from_cpu(cpu: u32) -> Result<&'static Device> {
+ // SAFETY: It is safe to call `get_cpu_device()` for any CPU.
+ let ptr = unsafe { bindings::get_cpu_device(cpu) };
+ if ptr.is_null() {
+ return Err(ENODEV);
+ }
+
+ // SAFETY: The pointer returned by `get_cpu_device()`, if not `NULL`, is a valid pointer to
+ // a `struct device` and is never freed by the C code.
+ Ok(unsafe { Device::as_ref(ptr) })
+}
diff --git a/rust/kernel/cpufreq.rs b/rust/kernel/cpufreq.rs
new file mode 100644
index 000000000000..09b856bb297b
--- /dev/null
+++ b/rust/kernel/cpufreq.rs
@@ -0,0 +1,1321 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! CPU frequency scaling.
+//!
+//! This module provides rust abstractions for interacting with the cpufreq subsystem.
+//!
+//! C header: [`include/linux/cpufreq.h`](srctree/include/linux/cpufreq.h)
+//!
+//! Reference: <https://docs.kernel.org/admin-guide/pm/cpufreq.html>
+
+use crate::{
+ clk::Hertz,
+ cpumask,
+ device::{Bound, Device},
+ devres::Devres,
+ error::{code::*, from_err_ptr, from_result, to_result, Result, VTABLE_DEFAULT_ERROR},
+ ffi::{c_char, c_ulong},
+ prelude::*,
+ types::ForeignOwnable,
+ types::Opaque,
+};
+
+#[cfg(CONFIG_COMMON_CLK)]
+use crate::clk::Clk;
+
+use core::{
+ cell::UnsafeCell,
+ marker::PhantomData,
+ mem::MaybeUninit,
+ ops::{Deref, DerefMut},
+ pin::Pin,
+ ptr,
+};
+
+use macros::vtable;
+
+/// Maximum length of CPU frequency driver's name.
+const CPUFREQ_NAME_LEN: usize = bindings::CPUFREQ_NAME_LEN as usize;
+
+/// Default transition latency value in nanoseconds.
+pub const ETERNAL_LATENCY_NS: u32 = bindings::CPUFREQ_ETERNAL as u32;
+
+/// CPU frequency driver flags.
+pub mod flags {
+ /// Driver needs to update internal limits even if frequency remains unchanged.
+ pub const NEED_UPDATE_LIMITS: u16 = 1 << 0;
+
+ /// Platform where constants like `loops_per_jiffy` are unaffected by frequency changes.
+ pub const CONST_LOOPS: u16 = 1 << 1;
+
+ /// Register driver as a thermal cooling device automatically.
+ pub const IS_COOLING_DEV: u16 = 1 << 2;
+
+ /// Supports multiple clock domains with per-policy governors in `cpu/cpuN/cpufreq/`.
+ pub const HAVE_GOVERNOR_PER_POLICY: u16 = 1 << 3;
+
+ /// Allows post-change notifications outside of the `target()` routine.
+ pub const ASYNC_NOTIFICATION: u16 = 1 << 4;
+
+ /// Ensure CPU starts at a valid frequency from the driver's freq-table.
+ pub const NEED_INITIAL_FREQ_CHECK: u16 = 1 << 5;
+
+ /// Disallow governors with `dynamic_switching` capability.
+ pub const NO_AUTO_DYNAMIC_SWITCHING: u16 = 1 << 6;
+}
+
+/// Relations from the C code.
+const CPUFREQ_RELATION_L: u32 = 0;
+const CPUFREQ_RELATION_H: u32 = 1;
+const CPUFREQ_RELATION_C: u32 = 2;
+
+/// Can be used with any of the above values.
+const CPUFREQ_RELATION_E: u32 = 1 << 2;
+
+/// CPU frequency selection relations.
+///
+/// CPU frequency selection relations, each optionally marked as "efficient".
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum Relation {
+ /// Select the lowest frequency at or above target.
+ Low(bool),
+ /// Select the highest frequency below or at target.
+ High(bool),
+ /// Select the closest frequency to the target.
+ Close(bool),
+}
+
+impl Relation {
+ // Construct from a C-compatible `u32` value.
+ fn new(val: u32) -> Result<Self> {
+ let efficient = val & CPUFREQ_RELATION_E != 0;
+
+ Ok(match val & !CPUFREQ_RELATION_E {
+ CPUFREQ_RELATION_L => Self::Low(efficient),
+ CPUFREQ_RELATION_H => Self::High(efficient),
+ CPUFREQ_RELATION_C => Self::Close(efficient),
+ _ => return Err(EINVAL),
+ })
+ }
+}
+
+impl From<Relation> for u32 {
+ // Convert to a C-compatible `u32` value.
+ fn from(rel: Relation) -> Self {
+ let (mut val, efficient) = match rel {
+ Relation::Low(e) => (CPUFREQ_RELATION_L, e),
+ Relation::High(e) => (CPUFREQ_RELATION_H, e),
+ Relation::Close(e) => (CPUFREQ_RELATION_C, e),
+ };
+
+ if efficient {
+ val |= CPUFREQ_RELATION_E;
+ }
+
+ val
+ }
+}
+
+/// Policy data.
+///
+/// Rust abstraction for the C `struct cpufreq_policy_data`.
+///
+/// # Invariants
+///
+/// A [`PolicyData`] instance always corresponds to a valid C `struct cpufreq_policy_data`.
+///
+/// The callers must ensure that the `struct cpufreq_policy_data` is valid for access and remains
+/// valid for the lifetime of the returned reference.
+#[repr(transparent)]
+pub struct PolicyData(Opaque<bindings::cpufreq_policy_data>);
+
+impl PolicyData {
+ /// Creates a mutable reference to an existing `struct cpufreq_policy_data` pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime
+ /// of the returned reference.
+ #[inline]
+ pub unsafe fn from_raw_mut<'a>(ptr: *mut bindings::cpufreq_policy_data) -> &'a mut Self {
+ // SAFETY: Guaranteed by the safety requirements of the function.
+ //
+ // INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the
+ // lifetime of the returned reference.
+ unsafe { &mut *ptr.cast() }
+ }
+
+ /// Returns a raw pointer to the underlying C `cpufreq_policy_data`.
+ #[inline]
+ pub fn as_raw(&self) -> *mut bindings::cpufreq_policy_data {
+ let this: *const Self = self;
+ this.cast_mut().cast()
+ }
+
+ /// Wrapper for `cpufreq_generic_frequency_table_verify`.
+ #[inline]
+ pub fn generic_verify(&self) -> Result {
+ // SAFETY: By the type invariant, the pointer stored in `self` is valid.
+ to_result(unsafe { bindings::cpufreq_generic_frequency_table_verify(self.as_raw()) })
+ }
+}
+
+/// The frequency table index.
+///
+/// Represents index with a frequency table.
+///
+/// # Invariants
+///
+/// The index must correspond to a valid entry in the [`Table`] it is used for.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct TableIndex(usize);
+
+impl TableIndex {
+ /// Creates an instance of [`TableIndex`].
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `index` correspond to a valid entry in the [`Table`] it is used
+ /// for.
+ pub unsafe fn new(index: usize) -> Self {
+ // INVARIANT: The caller ensures that `index` correspond to a valid entry in the [`Table`].
+ Self(index)
+ }
+}
+
+impl From<TableIndex> for usize {
+ #[inline]
+ fn from(index: TableIndex) -> Self {
+ index.0
+ }
+}
+
+/// CPU frequency table.
+///
+/// Rust abstraction for the C `struct cpufreq_frequency_table`.
+///
+/// # Invariants
+///
+/// A [`Table`] instance always corresponds to a valid C `struct cpufreq_frequency_table`.
+///
+/// The callers must ensure that the `struct cpufreq_frequency_table` is valid for access and
+/// remains valid for the lifetime of the returned reference.
+///
+/// ## Examples
+///
+/// The following example demonstrates how to read a frequency value from [`Table`].
+///
+/// ```
+/// use kernel::cpufreq::{Policy, TableIndex};
+///
+/// fn show_freq(policy: &Policy) -> Result {
+/// let table = policy.freq_table()?;
+///
+/// // SAFETY: Index is a valid entry in the table.
+/// let index = unsafe { TableIndex::new(0) };
+///
+/// pr_info!("The frequency at index 0 is: {:?}\n", table.freq(index)?);
+/// pr_info!("The flags at index 0 is: {}\n", table.flags(index));
+/// pr_info!("The data at index 0 is: {}\n", table.data(index));
+/// Ok(())
+/// }
+/// ```
+#[repr(transparent)]
+pub struct Table(Opaque<bindings::cpufreq_frequency_table>);
+
+impl Table {
+ /// Creates a reference to an existing C `struct cpufreq_frequency_table` pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime
+ /// of the returned reference.
+ #[inline]
+ pub unsafe fn from_raw<'a>(ptr: *const bindings::cpufreq_frequency_table) -> &'a Self {
+ // SAFETY: Guaranteed by the safety requirements of the function.
+ //
+ // INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the
+ // lifetime of the returned reference.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Returns the raw mutable pointer to the C `struct cpufreq_frequency_table`.
+ #[inline]
+ pub fn as_raw(&self) -> *mut bindings::cpufreq_frequency_table {
+ let this: *const Self = self;
+ this.cast_mut().cast()
+ }
+
+ /// Returns frequency at `index` in the [`Table`].
+ #[inline]
+ pub fn freq(&self, index: TableIndex) -> Result<Hertz> {
+ // SAFETY: By the type invariant, the pointer stored in `self` is valid and `index` is
+ // guaranteed to be valid by its safety requirements.
+ Ok(Hertz::from_khz(unsafe {
+ (*self.as_raw().add(index.into())).frequency.try_into()?
+ }))
+ }
+
+ /// Returns flags at `index` in the [`Table`].
+ #[inline]
+ pub fn flags(&self, index: TableIndex) -> u32 {
+ // SAFETY: By the type invariant, the pointer stored in `self` is valid and `index` is
+ // guaranteed to be valid by its safety requirements.
+ unsafe { (*self.as_raw().add(index.into())).flags }
+ }
+
+ /// Returns data at `index` in the [`Table`].
+ #[inline]
+ pub fn data(&self, index: TableIndex) -> u32 {
+ // SAFETY: By the type invariant, the pointer stored in `self` is valid and `index` is
+ // guaranteed to be valid by its safety requirements.
+ unsafe { (*self.as_raw().add(index.into())).driver_data }
+ }
+}
+
+/// CPU frequency table owned and pinned in memory, created from a [`TableBuilder`].
+pub struct TableBox {
+ entries: Pin<KVec<bindings::cpufreq_frequency_table>>,
+}
+
+impl TableBox {
+ /// Constructs a new [`TableBox`] from a [`KVec`] of entries.
+ ///
+ /// # Errors
+ ///
+ /// Returns `EINVAL` if the entries list is empty.
+ #[inline]
+ fn new(entries: KVec<bindings::cpufreq_frequency_table>) -> Result<Self> {
+ if entries.is_empty() {
+ return Err(EINVAL);
+ }
+
+ Ok(Self {
+ // Pin the entries to memory, since we are passing its pointer to the C code.
+ entries: Pin::new(entries),
+ })
+ }
+
+ /// Returns a raw pointer to the underlying C `cpufreq_frequency_table`.
+ #[inline]
+ fn as_raw(&self) -> *const bindings::cpufreq_frequency_table {
+ // The pointer is valid until the table gets dropped.
+ self.entries.as_ptr()
+ }
+}
+
+impl Deref for TableBox {
+ type Target = Table;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The caller owns TableBox, it is safe to deref.
+ unsafe { Self::Target::from_raw(self.as_raw()) }
+ }
+}
+
+/// CPU frequency table builder.
+///
+/// This is used by the CPU frequency drivers to build a frequency table dynamically.
+///
+/// ## Examples
+///
+/// The following example demonstrates how to create a CPU frequency table.
+///
+/// ```
+/// use kernel::cpufreq::{TableBuilder, TableIndex};
+/// use kernel::clk::Hertz;
+///
+/// let mut builder = TableBuilder::new();
+///
+/// // Adds few entries to the table.
+/// builder.add(Hertz::from_mhz(700), 0, 1).unwrap();
+/// builder.add(Hertz::from_mhz(800), 2, 3).unwrap();
+/// builder.add(Hertz::from_mhz(900), 4, 5).unwrap();
+/// builder.add(Hertz::from_ghz(1), 6, 7).unwrap();
+///
+/// let table = builder.to_table().unwrap();
+///
+/// // SAFETY: Index values correspond to valid entries in the table.
+/// let (index0, index2) = unsafe { (TableIndex::new(0), TableIndex::new(2)) };
+///
+/// assert_eq!(table.freq(index0), Ok(Hertz::from_mhz(700)));
+/// assert_eq!(table.flags(index0), 0);
+/// assert_eq!(table.data(index0), 1);
+///
+/// assert_eq!(table.freq(index2), Ok(Hertz::from_mhz(900)));
+/// assert_eq!(table.flags(index2), 4);
+/// assert_eq!(table.data(index2), 5);
+/// ```
+#[derive(Default)]
+#[repr(transparent)]
+pub struct TableBuilder {
+ entries: KVec<bindings::cpufreq_frequency_table>,
+}
+
+impl TableBuilder {
+ /// Creates a new instance of [`TableBuilder`].
+ #[inline]
+ pub fn new() -> Self {
+ Self {
+ entries: KVec::new(),
+ }
+ }
+
+ /// Adds a new entry to the table.
+ pub fn add(&mut self, freq: Hertz, flags: u32, driver_data: u32) -> Result {
+ // Adds the new entry at the end of the vector.
+ Ok(self.entries.push(
+ bindings::cpufreq_frequency_table {
+ flags,
+ driver_data,
+ frequency: freq.as_khz() as u32,
+ },
+ GFP_KERNEL,
+ )?)
+ }
+
+ /// Consumes the [`TableBuilder`] and returns [`TableBox`].
+ pub fn to_table(mut self) -> Result<TableBox> {
+ // Add last entry to the table.
+ self.add(Hertz(c_ulong::MAX), 0, 0)?;
+
+ TableBox::new(self.entries)
+ }
+}
+
+/// CPU frequency policy.
+///
+/// Rust abstraction for the C `struct cpufreq_policy`.
+///
+/// # Invariants
+///
+/// A [`Policy`] instance always corresponds to a valid C `struct cpufreq_policy`.
+///
+/// The callers must ensure that the `struct cpufreq_policy` is valid for access and remains valid
+/// for the lifetime of the returned reference.
+///
+/// ## Examples
+///
+/// The following example demonstrates how to create a CPU frequency table.
+///
+/// ```
+/// use kernel::cpufreq::{ETERNAL_LATENCY_NS, Policy};
+///
+/// fn update_policy(policy: &mut Policy) {
+/// policy
+/// .set_dvfs_possible_from_any_cpu(true)
+/// .set_fast_switch_possible(true)
+/// .set_transition_latency_ns(ETERNAL_LATENCY_NS);
+///
+/// pr_info!("The policy details are: {:?}\n", (policy.cpu(), policy.cur()));
+/// }
+/// ```
+#[repr(transparent)]
+pub struct Policy(Opaque<bindings::cpufreq_policy>);
+
+impl Policy {
+ /// Creates a reference to an existing `struct cpufreq_policy` pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime
+ /// of the returned reference.
+ #[inline]
+ pub unsafe fn from_raw<'a>(ptr: *const bindings::cpufreq_policy) -> &'a Self {
+ // SAFETY: Guaranteed by the safety requirements of the function.
+ //
+ // INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the
+ // lifetime of the returned reference.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Creates a mutable reference to an existing `struct cpufreq_policy` pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime
+ /// of the returned reference.
+ #[inline]
+ pub unsafe fn from_raw_mut<'a>(ptr: *mut bindings::cpufreq_policy) -> &'a mut Self {
+ // SAFETY: Guaranteed by the safety requirements of the function.
+ //
+ // INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the
+ // lifetime of the returned reference.
+ unsafe { &mut *ptr.cast() }
+ }
+
+ /// Returns a raw mutable pointer to the C `struct cpufreq_policy`.
+ #[inline]
+ fn as_raw(&self) -> *mut bindings::cpufreq_policy {
+ let this: *const Self = self;
+ this.cast_mut().cast()
+ }
+
+ #[inline]
+ fn as_ref(&self) -> &bindings::cpufreq_policy {
+ // SAFETY: By the type invariant, the pointer stored in `self` is valid.
+ unsafe { &*self.as_raw() }
+ }
+
+ #[inline]
+ fn as_mut_ref(&mut self) -> &mut bindings::cpufreq_policy {
+ // SAFETY: By the type invariant, the pointer stored in `self` is valid.
+ unsafe { &mut *self.as_raw() }
+ }
+
+ /// Returns the primary CPU for the [`Policy`].
+ #[inline]
+ pub fn cpu(&self) -> u32 {
+ self.as_ref().cpu
+ }
+
+ /// Returns the minimum frequency for the [`Policy`].
+ #[inline]
+ pub fn min(&self) -> Hertz {
+ Hertz::from_khz(self.as_ref().min as usize)
+ }
+
+ /// Set the minimum frequency for the [`Policy`].
+ #[inline]
+ pub fn set_min(&mut self, min: Hertz) -> &mut Self {
+ self.as_mut_ref().min = min.as_khz() as u32;
+ self
+ }
+
+ /// Returns the maximum frequency for the [`Policy`].
+ #[inline]
+ pub fn max(&self) -> Hertz {
+ Hertz::from_khz(self.as_ref().max as usize)
+ }
+
+ /// Set the maximum frequency for the [`Policy`].
+ #[inline]
+ pub fn set_max(&mut self, max: Hertz) -> &mut Self {
+ self.as_mut_ref().max = max.as_khz() as u32;
+ self
+ }
+
+ /// Returns the current frequency for the [`Policy`].
+ #[inline]
+ pub fn cur(&self) -> Hertz {
+ Hertz::from_khz(self.as_ref().cur as usize)
+ }
+
+ /// Returns the suspend frequency for the [`Policy`].
+ #[inline]
+ pub fn suspend_freq(&self) -> Hertz {
+ Hertz::from_khz(self.as_ref().suspend_freq as usize)
+ }
+
+ /// Sets the suspend frequency for the [`Policy`].
+ #[inline]
+ pub fn set_suspend_freq(&mut self, freq: Hertz) -> &mut Self {
+ self.as_mut_ref().suspend_freq = freq.as_khz() as u32;
+ self
+ }
+
+ /// Provides a wrapper to the generic suspend routine.
+ #[inline]
+ pub fn generic_suspend(&mut self) -> Result {
+ // SAFETY: By the type invariant, the pointer stored in `self` is valid.
+ to_result(unsafe { bindings::cpufreq_generic_suspend(self.as_mut_ref()) })
+ }
+
+ /// Provides a wrapper to the generic get routine.
+ #[inline]
+ pub fn generic_get(&self) -> Result<u32> {
+ // SAFETY: By the type invariant, the pointer stored in `self` is valid.
+ Ok(unsafe { bindings::cpufreq_generic_get(self.cpu()) })
+ }
+
+ /// Provides a wrapper to the register with energy model using the OPP core.
+ #[cfg(CONFIG_PM_OPP)]
+ #[inline]
+ pub fn register_em_opp(&mut self) {
+ // SAFETY: By the type invariant, the pointer stored in `self` is valid.
+ unsafe { bindings::cpufreq_register_em_with_opp(self.as_mut_ref()) };
+ }
+
+ /// Gets [`cpumask::Cpumask`] for a cpufreq [`Policy`].
+ #[inline]
+ pub fn cpus(&mut self) -> &mut cpumask::Cpumask {
+ // SAFETY: The pointer to `cpus` is valid for writing and remains valid for the lifetime of
+ // the returned reference.
+ unsafe { cpumask::CpumaskVar::as_mut_ref(&mut self.as_mut_ref().cpus) }
+ }
+
+ /// Sets clock for the [`Policy`].
+ ///
+ /// # Safety
+ ///
+ /// The caller must guarantee that the returned [`Clk`] is not dropped while it is getting used
+ /// by the C code.
+ #[cfg(CONFIG_COMMON_CLK)]
+ pub unsafe fn set_clk(&mut self, dev: &Device, name: Option<&CStr>) -> Result<Clk> {
+ let clk = Clk::get(dev, name)?;
+ self.as_mut_ref().clk = clk.as_raw();
+ Ok(clk)
+ }
+
+ /// Allows / disallows frequency switching code to run on any CPU.
+ #[inline]
+ pub fn set_dvfs_possible_from_any_cpu(&mut self, val: bool) -> &mut Self {
+ self.as_mut_ref().dvfs_possible_from_any_cpu = val;
+ self
+ }
+
+ /// Returns if fast switching of frequencies is possible or not.
+ #[inline]
+ pub fn fast_switch_possible(&self) -> bool {
+ self.as_ref().fast_switch_possible
+ }
+
+ /// Enables / disables fast frequency switching.
+ #[inline]
+ pub fn set_fast_switch_possible(&mut self, val: bool) -> &mut Self {
+ self.as_mut_ref().fast_switch_possible = val;
+ self
+ }
+
+ /// Sets transition latency (in nanoseconds) for the [`Policy`].
+ #[inline]
+ pub fn set_transition_latency_ns(&mut self, latency_ns: u32) -> &mut Self {
+ self.as_mut_ref().cpuinfo.transition_latency = latency_ns;
+ self
+ }
+
+ /// Sets cpuinfo `min_freq`.
+ #[inline]
+ pub fn set_cpuinfo_min_freq(&mut self, min_freq: Hertz) -> &mut Self {
+ self.as_mut_ref().cpuinfo.min_freq = min_freq.as_khz() as u32;
+ self
+ }
+
+ /// Sets cpuinfo `max_freq`.
+ #[inline]
+ pub fn set_cpuinfo_max_freq(&mut self, max_freq: Hertz) -> &mut Self {
+ self.as_mut_ref().cpuinfo.max_freq = max_freq.as_khz() as u32;
+ self
+ }
+
+ /// Set `transition_delay_us`, i.e. the minimum time between successive frequency change
+ /// requests.
+ #[inline]
+ pub fn set_transition_delay_us(&mut self, transition_delay_us: u32) -> &mut Self {
+ self.as_mut_ref().transition_delay_us = transition_delay_us;
+ self
+ }
+
+ /// Returns reference to the CPU frequency [`Table`] for the [`Policy`].
+ pub fn freq_table(&self) -> Result<&Table> {
+ if self.as_ref().freq_table.is_null() {
+ return Err(EINVAL);
+ }
+
+ // SAFETY: The `freq_table` is guaranteed to be valid for reading and remains valid for the
+ // lifetime of the returned reference.
+ Ok(unsafe { Table::from_raw(self.as_ref().freq_table) })
+ }
+
+ /// Sets the CPU frequency [`Table`] for the [`Policy`].
+ ///
+ /// # Safety
+ ///
+ /// The caller must guarantee that the [`Table`] is not dropped while it is getting used by the
+ /// C code.
+ #[inline]
+ pub unsafe fn set_freq_table(&mut self, table: &Table) -> &mut Self {
+ self.as_mut_ref().freq_table = table.as_raw();
+ self
+ }
+
+ /// Returns the [`Policy`]'s private data.
+ pub fn data<T: ForeignOwnable>(&mut self) -> Option<<T>::Borrowed<'_>> {
+ if self.as_ref().driver_data.is_null() {
+ None
+ } else {
+ // SAFETY: The data is earlier set from [`set_data`].
+ Some(unsafe { T::borrow(self.as_ref().driver_data) })
+ }
+ }
+
+ /// Sets the private data of the [`Policy`] using a foreign-ownable wrapper.
+ ///
+ /// # Errors
+ ///
+ /// Returns `EBUSY` if private data is already set.
+ fn set_data<T: ForeignOwnable>(&mut self, data: T) -> Result {
+ if self.as_ref().driver_data.is_null() {
+ // Transfer the ownership of the data to the foreign interface.
+ self.as_mut_ref().driver_data = <T as ForeignOwnable>::into_foreign(data) as _;
+ Ok(())
+ } else {
+ Err(EBUSY)
+ }
+ }
+
+ /// Clears and returns ownership of the private data.
+ fn clear_data<T: ForeignOwnable>(&mut self) -> Option<T> {
+ if self.as_ref().driver_data.is_null() {
+ None
+ } else {
+ let data = Some(
+ // SAFETY: The data is earlier set by us from [`set_data`]. It is safe to take
+ // back the ownership of the data from the foreign interface.
+ unsafe { <T as ForeignOwnable>::from_foreign(self.as_ref().driver_data) },
+ );
+ self.as_mut_ref().driver_data = ptr::null_mut();
+ data
+ }
+ }
+}
+
+/// CPU frequency policy created from a CPU number.
+///
+/// This struct represents the CPU frequency policy obtained for a specific CPU, providing safe
+/// access to the underlying `cpufreq_policy` and ensuring proper cleanup when the `PolicyCpu` is
+/// dropped.
+struct PolicyCpu<'a>(&'a mut Policy);
+
+impl<'a> PolicyCpu<'a> {
+ fn from_cpu(cpu: u32) -> Result<Self> {
+ // SAFETY: It is safe to call `cpufreq_cpu_get` for any valid CPU.
+ let ptr = from_err_ptr(unsafe { bindings::cpufreq_cpu_get(cpu) })?;
+
+ Ok(Self(
+ // SAFETY: The `ptr` is guaranteed to be valid and remains valid for the lifetime of
+ // the returned reference.
+ unsafe { Policy::from_raw_mut(ptr) },
+ ))
+ }
+}
+
+impl<'a> Deref for PolicyCpu<'a> {
+ type Target = Policy;
+
+ fn deref(&self) -> &Self::Target {
+ self.0
+ }
+}
+
+impl<'a> DerefMut for PolicyCpu<'a> {
+ fn deref_mut(&mut self) -> &mut Policy {
+ self.0
+ }
+}
+
+impl<'a> Drop for PolicyCpu<'a> {
+ fn drop(&mut self) {
+ // SAFETY: The underlying pointer is guaranteed to be valid for the lifetime of `self`.
+ unsafe { bindings::cpufreq_cpu_put(self.0.as_raw()) };
+ }
+}
+
+/// CPU frequency driver.
+///
+/// Implement this trait to provide a CPU frequency driver and its callbacks.
+///
+/// Reference: <https://docs.kernel.org/cpu-freq/cpu-drivers.html>
+#[vtable]
+pub trait Driver {
+ /// Driver's name.
+ const NAME: &'static CStr;
+
+ /// Driver's flags.
+ const FLAGS: u16;
+
+ /// Boost support.
+ const BOOST_ENABLED: bool;
+
+ /// Policy specific data.
+ ///
+ /// Require that `PData` implements `ForeignOwnable`. We guarantee to never move the underlying
+ /// wrapped data structure.
+ type PData: ForeignOwnable;
+
+ /// Driver's `init` callback.
+ fn init(policy: &mut Policy) -> Result<Self::PData>;
+
+ /// Driver's `exit` callback.
+ fn exit(_policy: &mut Policy, _data: Option<Self::PData>) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `online` callback.
+ fn online(_policy: &mut Policy) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `offline` callback.
+ fn offline(_policy: &mut Policy) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `suspend` callback.
+ fn suspend(_policy: &mut Policy) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `resume` callback.
+ fn resume(_policy: &mut Policy) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `ready` callback.
+ fn ready(_policy: &mut Policy) {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `verify` callback.
+ fn verify(data: &mut PolicyData) -> Result;
+
+ /// Driver's `setpolicy` callback.
+ fn setpolicy(_policy: &mut Policy) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `target` callback.
+ fn target(_policy: &mut Policy, _target_freq: u32, _relation: Relation) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `target_index` callback.
+ fn target_index(_policy: &mut Policy, _index: TableIndex) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `fast_switch` callback.
+ fn fast_switch(_policy: &mut Policy, _target_freq: u32) -> u32 {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `adjust_perf` callback.
+ fn adjust_perf(_policy: &mut Policy, _min_perf: usize, _target_perf: usize, _capacity: usize) {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `get_intermediate` callback.
+ fn get_intermediate(_policy: &mut Policy, _index: TableIndex) -> u32 {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `target_intermediate` callback.
+ fn target_intermediate(_policy: &mut Policy, _index: TableIndex) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `get` callback.
+ fn get(_policy: &mut Policy) -> Result<u32> {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `update_limits` callback.
+ fn update_limits(_policy: &mut Policy) {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `bios_limit` callback.
+ fn bios_limit(_policy: &mut Policy, _limit: &mut u32) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `set_boost` callback.
+ fn set_boost(_policy: &mut Policy, _state: i32) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Driver's `register_em` callback.
+ fn register_em(_policy: &mut Policy) {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+}
+
+/// CPU frequency driver Registration.
+///
+/// ## Examples
+///
+/// The following example demonstrates how to register a cpufreq driver.
+///
+/// ```
+/// use kernel::{
+/// cpufreq,
+/// c_str,
+/// device::{Core, Device},
+/// macros::vtable,
+/// of, platform,
+/// sync::Arc,
+/// };
+/// struct SampleDevice;
+///
+/// #[derive(Default)]
+/// struct SampleDriver;
+///
+/// #[vtable]
+/// impl cpufreq::Driver for SampleDriver {
+/// const NAME: &'static CStr = c_str!("cpufreq-sample");
+/// const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV;
+/// const BOOST_ENABLED: bool = true;
+///
+/// type PData = Arc<SampleDevice>;
+///
+/// fn init(policy: &mut cpufreq::Policy) -> Result<Self::PData> {
+/// // Initialize here
+/// Ok(Arc::new(SampleDevice, GFP_KERNEL)?)
+/// }
+///
+/// fn exit(_policy: &mut cpufreq::Policy, _data: Option<Self::PData>) -> Result {
+/// Ok(())
+/// }
+///
+/// fn suspend(policy: &mut cpufreq::Policy) -> Result {
+/// policy.generic_suspend()
+/// }
+///
+/// fn verify(data: &mut cpufreq::PolicyData) -> Result {
+/// data.generic_verify()
+/// }
+///
+/// fn target_index(policy: &mut cpufreq::Policy, index: cpufreq::TableIndex) -> Result {
+/// // Update CPU frequency
+/// Ok(())
+/// }
+///
+/// fn get(policy: &mut cpufreq::Policy) -> Result<u32> {
+/// policy.generic_get()
+/// }
+/// }
+///
+/// impl platform::Driver for SampleDriver {
+/// type IdInfo = ();
+/// const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = None;
+///
+/// fn probe(
+/// pdev: &platform::Device<Core>,
+/// _id_info: Option<&Self::IdInfo>,
+/// ) -> Result<Pin<KBox<Self>>> {
+/// cpufreq::Registration::<SampleDriver>::new_foreign_owned(pdev.as_ref())?;
+/// Ok(KBox::new(Self {}, GFP_KERNEL)?.into())
+/// }
+/// }
+/// ```
+#[repr(transparent)]
+pub struct Registration<T: Driver>(KBox<UnsafeCell<bindings::cpufreq_driver>>, PhantomData<T>);
+
+/// SAFETY: `Registration` doesn't offer any methods or access to fields when shared between threads
+/// or CPUs, so it is safe to share it.
+unsafe impl<T: Driver> Sync for Registration<T> {}
+
+#[allow(clippy::non_send_fields_in_send_ty)]
+/// SAFETY: Registration with and unregistration from the cpufreq subsystem can happen from any
+/// thread.
+unsafe impl<T: Driver> Send for Registration<T> {}
+
+impl<T: Driver> Registration<T> {
+ const VTABLE: bindings::cpufreq_driver = bindings::cpufreq_driver {
+ name: Self::copy_name(T::NAME),
+ boost_enabled: T::BOOST_ENABLED,
+ flags: T::FLAGS,
+
+ // Initialize mandatory callbacks.
+ init: Some(Self::init_callback),
+ verify: Some(Self::verify_callback),
+
+ // Initialize optional callbacks based on the traits of `T`.
+ setpolicy: if T::HAS_SETPOLICY {
+ Some(Self::setpolicy_callback)
+ } else {
+ None
+ },
+ target: if T::HAS_TARGET {
+ Some(Self::target_callback)
+ } else {
+ None
+ },
+ target_index: if T::HAS_TARGET_INDEX {
+ Some(Self::target_index_callback)
+ } else {
+ None
+ },
+ fast_switch: if T::HAS_FAST_SWITCH {
+ Some(Self::fast_switch_callback)
+ } else {
+ None
+ },
+ adjust_perf: if T::HAS_ADJUST_PERF {
+ Some(Self::adjust_perf_callback)
+ } else {
+ None
+ },
+ get_intermediate: if T::HAS_GET_INTERMEDIATE {
+ Some(Self::get_intermediate_callback)
+ } else {
+ None
+ },
+ target_intermediate: if T::HAS_TARGET_INTERMEDIATE {
+ Some(Self::target_intermediate_callback)
+ } else {
+ None
+ },
+ get: if T::HAS_GET {
+ Some(Self::get_callback)
+ } else {
+ None
+ },
+ update_limits: if T::HAS_UPDATE_LIMITS {
+ Some(Self::update_limits_callback)
+ } else {
+ None
+ },
+ bios_limit: if T::HAS_BIOS_LIMIT {
+ Some(Self::bios_limit_callback)
+ } else {
+ None
+ },
+ online: if T::HAS_ONLINE {
+ Some(Self::online_callback)
+ } else {
+ None
+ },
+ offline: if T::HAS_OFFLINE {
+ Some(Self::offline_callback)
+ } else {
+ None
+ },
+ exit: if T::HAS_EXIT {
+ Some(Self::exit_callback)
+ } else {
+ None
+ },
+ suspend: if T::HAS_SUSPEND {
+ Some(Self::suspend_callback)
+ } else {
+ None
+ },
+ resume: if T::HAS_RESUME {
+ Some(Self::resume_callback)
+ } else {
+ None
+ },
+ ready: if T::HAS_READY {
+ Some(Self::ready_callback)
+ } else {
+ None
+ },
+ set_boost: if T::HAS_SET_BOOST {
+ Some(Self::set_boost_callback)
+ } else {
+ None
+ },
+ register_em: if T::HAS_REGISTER_EM {
+ Some(Self::register_em_callback)
+ } else {
+ None
+ },
+ // SAFETY: All zeros is a valid value for `bindings::cpufreq_driver`.
+ ..unsafe { MaybeUninit::zeroed().assume_init() }
+ };
+
+ const fn copy_name(name: &'static CStr) -> [c_char; CPUFREQ_NAME_LEN] {
+ let src = name.as_bytes_with_nul();
+ let mut dst = [0; CPUFREQ_NAME_LEN];
+
+ build_assert!(src.len() <= CPUFREQ_NAME_LEN);
+
+ let mut i = 0;
+ while i < src.len() {
+ dst[i] = src[i];
+ i += 1;
+ }
+
+ dst
+ }
+
+ /// Registers a CPU frequency driver with the cpufreq core.
+ pub fn new() -> Result<Self> {
+ // We can't use `&Self::VTABLE` directly because the cpufreq core modifies some fields in
+ // the C `struct cpufreq_driver`, which requires a mutable reference.
+ let mut drv = KBox::new(UnsafeCell::new(Self::VTABLE), GFP_KERNEL)?;
+
+ // SAFETY: `drv` is guaranteed to be valid for the lifetime of `Registration`.
+ to_result(unsafe { bindings::cpufreq_register_driver(drv.get_mut()) })?;
+
+ Ok(Self(drv, PhantomData))
+ }
+
+ /// Same as [`Registration::new`], but does not return a [`Registration`] instance.
+ ///
+ /// Instead the [`Registration`] is owned by [`Devres`] and will be revoked / dropped, once the
+ /// device is detached.
+ pub fn new_foreign_owned(dev: &Device<Bound>) -> Result {
+ Devres::new_foreign_owned(dev, Self::new()?, GFP_KERNEL)
+ }
+}
+
+/// CPU frequency driver callbacks.
+impl<T: Driver> Registration<T> {
+ /// Driver's `init` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn init_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+
+ let data = T::init(policy)?;
+ policy.set_data(data)?;
+ Ok(0)
+ })
+ }
+
+ /// Driver's `exit` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn exit_callback(ptr: *mut bindings::cpufreq_policy) {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+
+ let data = policy.clear_data();
+ let _ = T::exit(policy, data);
+ }
+
+ /// Driver's `online` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn online_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+ T::online(policy).map(|()| 0)
+ })
+ }
+
+ /// Driver's `offline` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn offline_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+ T::offline(policy).map(|()| 0)
+ })
+ }
+
+ /// Driver's `suspend` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn suspend_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+ T::suspend(policy).map(|()| 0)
+ })
+ }
+
+ /// Driver's `resume` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn resume_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+ T::resume(policy).map(|()| 0)
+ })
+ }
+
+ /// Driver's `ready` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn ready_callback(ptr: *mut bindings::cpufreq_policy) {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+ T::ready(policy);
+ }
+
+ /// Driver's `verify` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn verify_callback(ptr: *mut bindings::cpufreq_policy_data) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let data = unsafe { PolicyData::from_raw_mut(ptr) };
+ T::verify(data).map(|()| 0)
+ })
+ }
+
+ /// Driver's `setpolicy` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn setpolicy_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+ T::setpolicy(policy).map(|()| 0)
+ })
+ }
+
+ /// Driver's `target` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn target_callback(
+ ptr: *mut bindings::cpufreq_policy,
+ target_freq: u32,
+ relation: u32,
+ ) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+ T::target(policy, target_freq, Relation::new(relation)?).map(|()| 0)
+ })
+ }
+
+ /// Driver's `target_index` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn target_index_callback(
+ ptr: *mut bindings::cpufreq_policy,
+ index: u32,
+ ) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+
+ // SAFETY: The C code guarantees that `index` corresponds to a valid entry in the
+ // frequency table.
+ let index = unsafe { TableIndex::new(index as usize) };
+
+ T::target_index(policy, index).map(|()| 0)
+ })
+ }
+
+ /// Driver's `fast_switch` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn fast_switch_callback(
+ ptr: *mut bindings::cpufreq_policy,
+ target_freq: u32,
+ ) -> kernel::ffi::c_uint {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+ T::fast_switch(policy, target_freq)
+ }
+
+ /// Driver's `adjust_perf` callback.
+ extern "C" fn adjust_perf_callback(
+ cpu: u32,
+ min_perf: usize,
+ target_perf: usize,
+ capacity: usize,
+ ) {
+ if let Ok(mut policy) = PolicyCpu::from_cpu(cpu) {
+ T::adjust_perf(&mut policy, min_perf, target_perf, capacity);
+ }
+ }
+
+ /// Driver's `get_intermediate` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn get_intermediate_callback(
+ ptr: *mut bindings::cpufreq_policy,
+ index: u32,
+ ) -> kernel::ffi::c_uint {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+
+ // SAFETY: The C code guarantees that `index` corresponds to a valid entry in the
+ // frequency table.
+ let index = unsafe { TableIndex::new(index as usize) };
+
+ T::get_intermediate(policy, index)
+ }
+
+ /// Driver's `target_intermediate` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn target_intermediate_callback(
+ ptr: *mut bindings::cpufreq_policy,
+ index: u32,
+ ) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+
+ // SAFETY: The C code guarantees that `index` corresponds to a valid entry in the
+ // frequency table.
+ let index = unsafe { TableIndex::new(index as usize) };
+
+ T::target_intermediate(policy, index).map(|()| 0)
+ })
+ }
+
+ /// Driver's `get` callback.
+ extern "C" fn get_callback(cpu: u32) -> kernel::ffi::c_uint {
+ PolicyCpu::from_cpu(cpu).map_or(0, |mut policy| T::get(&mut policy).map_or(0, |f| f))
+ }
+
+ /// Driver's `update_limit` callback.
+ extern "C" fn update_limits_callback(ptr: *mut bindings::cpufreq_policy) {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+ T::update_limits(policy);
+ }
+
+ /// Driver's `bios_limit` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn bios_limit_callback(cpu: i32, limit: *mut u32) -> kernel::ffi::c_int {
+ from_result(|| {
+ let mut policy = PolicyCpu::from_cpu(cpu as u32)?;
+
+ // SAFETY: `limit` is guaranteed by the C code to be valid.
+ T::bios_limit(&mut policy, &mut (unsafe { *limit })).map(|()| 0)
+ })
+ }
+
+ /// Driver's `set_boost` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn set_boost_callback(
+ ptr: *mut bindings::cpufreq_policy,
+ state: i32,
+ ) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+ T::set_boost(policy, state).map(|()| 0)
+ })
+ }
+
+ /// Driver's `register_em` callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn register_em_callback(ptr: *mut bindings::cpufreq_policy) {
+ // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
+ // lifetime of `policy`.
+ let policy = unsafe { Policy::from_raw_mut(ptr) };
+ T::register_em(policy);
+ }
+}
+
+impl<T: Driver> Drop for Registration<T> {
+ /// Unregisters with the cpufreq core.
+ fn drop(&mut self) {
+ // SAFETY: `self.0` is guaranteed to be valid for the lifetime of `Registration`.
+ unsafe { bindings::cpufreq_unregister_driver(self.0.get_mut()) };
+ }
+}
diff --git a/rust/kernel/cpumask.rs b/rust/kernel/cpumask.rs
new file mode 100644
index 000000000000..c90bfac9346a
--- /dev/null
+++ b/rust/kernel/cpumask.rs
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! CPU Mask abstractions.
+//!
+//! C header: [`include/linux/cpumask.h`](srctree/include/linux/cpumask.h)
+
+use crate::{
+ alloc::{AllocError, Flags},
+ prelude::*,
+ types::Opaque,
+};
+
+#[cfg(CONFIG_CPUMASK_OFFSTACK)]
+use core::ptr::{self, NonNull};
+
+#[cfg(not(CONFIG_CPUMASK_OFFSTACK))]
+use core::mem::MaybeUninit;
+
+use core::ops::{Deref, DerefMut};
+
+/// A CPU Mask.
+///
+/// Rust abstraction for the C `struct cpumask`.
+///
+/// # Invariants
+///
+/// A [`Cpumask`] instance always corresponds to a valid C `struct cpumask`.
+///
+/// The callers must ensure that the `struct cpumask` is valid for access and
+/// remains valid for the lifetime of the returned reference.
+///
+/// ## Examples
+///
+/// The following example demonstrates how to update a [`Cpumask`].
+///
+/// ```
+/// use kernel::bindings;
+/// use kernel::cpumask::Cpumask;
+///
+/// fn set_clear_cpu(ptr: *mut bindings::cpumask, set_cpu: u32, clear_cpu: i32) {
+/// // SAFETY: The `ptr` is valid for writing and remains valid for the lifetime of the
+/// // returned reference.
+/// let mask = unsafe { Cpumask::as_mut_ref(ptr) };
+///
+/// mask.set(set_cpu);
+/// mask.clear(clear_cpu);
+/// }
+/// ```
+#[repr(transparent)]
+pub struct Cpumask(Opaque<bindings::cpumask>);
+
+impl Cpumask {
+ /// Creates a mutable reference to an existing `struct cpumask` pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime
+ /// of the returned reference.
+ pub unsafe fn as_mut_ref<'a>(ptr: *mut bindings::cpumask) -> &'a mut Self {
+ // SAFETY: Guaranteed by the safety requirements of the function.
+ //
+ // INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the
+ // lifetime of the returned reference.
+ unsafe { &mut *ptr.cast() }
+ }
+
+ /// Creates a reference to an existing `struct cpumask` pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime
+ /// of the returned reference.
+ pub unsafe fn as_ref<'a>(ptr: *const bindings::cpumask) -> &'a Self {
+ // SAFETY: Guaranteed by the safety requirements of the function.
+ //
+ // INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the
+ // lifetime of the returned reference.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Obtain the raw `struct cpumask` pointer.
+ pub fn as_raw(&self) -> *mut bindings::cpumask {
+ let this: *const Self = self;
+ this.cast_mut().cast()
+ }
+
+ /// Set `cpu` in the cpumask.
+ ///
+ /// ATTENTION: Contrary to C, this Rust `set()` method is non-atomic.
+ /// This mismatches kernel naming convention and corresponds to the C
+ /// function `__cpumask_set_cpu()`.
+ #[inline]
+ pub fn set(&mut self, cpu: u32) {
+ // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `__cpumask_set_cpu`.
+ unsafe { bindings::__cpumask_set_cpu(cpu, self.as_raw()) };
+ }
+
+ /// Clear `cpu` in the cpumask.
+ ///
+ /// ATTENTION: Contrary to C, this Rust `clear()` method is non-atomic.
+ /// This mismatches kernel naming convention and corresponds to the C
+ /// function `__cpumask_clear_cpu()`.
+ #[inline]
+ pub fn clear(&mut self, cpu: i32) {
+ // SAFETY: By the type invariant, `self.as_raw` is a valid argument to
+ // `__cpumask_clear_cpu`.
+ unsafe { bindings::__cpumask_clear_cpu(cpu, self.as_raw()) };
+ }
+
+ /// Test `cpu` in the cpumask.
+ ///
+ /// Equivalent to the kernel's `cpumask_test_cpu` API.
+ #[inline]
+ pub fn test(&self, cpu: i32) -> bool {
+ // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `cpumask_test_cpu`.
+ unsafe { bindings::cpumask_test_cpu(cpu, self.as_raw()) }
+ }
+
+ /// Set all CPUs in the cpumask.
+ ///
+ /// Equivalent to the kernel's `cpumask_setall` API.
+ #[inline]
+ pub fn setall(&mut self) {
+ // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `cpumask_setall`.
+ unsafe { bindings::cpumask_setall(self.as_raw()) };
+ }
+
+ /// Checks if cpumask is empty.
+ ///
+ /// Equivalent to the kernel's `cpumask_empty` API.
+ #[inline]
+ pub fn empty(&self) -> bool {
+ // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `cpumask_empty`.
+ unsafe { bindings::cpumask_empty(self.as_raw()) }
+ }
+
+ /// Checks if cpumask is full.
+ ///
+ /// Equivalent to the kernel's `cpumask_full` API.
+ #[inline]
+ pub fn full(&self) -> bool {
+ // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `cpumask_full`.
+ unsafe { bindings::cpumask_full(self.as_raw()) }
+ }
+
+ /// Get weight of the cpumask.
+ ///
+ /// Equivalent to the kernel's `cpumask_weight` API.
+ #[inline]
+ pub fn weight(&self) -> u32 {
+ // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `cpumask_weight`.
+ unsafe { bindings::cpumask_weight(self.as_raw()) }
+ }
+
+ /// Copy cpumask.
+ ///
+ /// Equivalent to the kernel's `cpumask_copy` API.
+ #[inline]
+ pub fn copy(&self, dstp: &mut Self) {
+ // SAFETY: By the type invariant, `Self::as_raw` is a valid argument to `cpumask_copy`.
+ unsafe { bindings::cpumask_copy(dstp.as_raw(), self.as_raw()) };
+ }
+}
+
+/// A CPU Mask pointer.
+///
+/// Rust abstraction for the C `struct cpumask_var_t`.
+///
+/// # Invariants
+///
+/// A [`CpumaskVar`] instance always corresponds to a valid C `struct cpumask_var_t`.
+///
+/// The callers must ensure that the `struct cpumask_var_t` is valid for access and remains valid
+/// for the lifetime of [`CpumaskVar`].
+///
+/// ## Examples
+///
+/// The following example demonstrates how to create and update a [`CpumaskVar`].
+///
+/// ```
+/// use kernel::cpumask::CpumaskVar;
+///
+/// let mut mask = CpumaskVar::new_zero(GFP_KERNEL).unwrap();
+///
+/// assert!(mask.empty());
+/// mask.set(2);
+/// assert!(mask.test(2));
+/// mask.set(3);
+/// assert!(mask.test(3));
+/// assert_eq!(mask.weight(), 2);
+///
+/// let mask2 = CpumaskVar::try_clone(&mask).unwrap();
+/// assert!(mask2.test(2));
+/// assert!(mask2.test(3));
+/// assert_eq!(mask2.weight(), 2);
+/// ```
+pub struct CpumaskVar {
+ #[cfg(CONFIG_CPUMASK_OFFSTACK)]
+ ptr: NonNull<Cpumask>,
+ #[cfg(not(CONFIG_CPUMASK_OFFSTACK))]
+ mask: Cpumask,
+}
+
+impl CpumaskVar {
+ /// Creates a zero-initialized instance of the [`CpumaskVar`].
+ pub fn new_zero(_flags: Flags) -> Result<Self, AllocError> {
+ Ok(Self {
+ #[cfg(CONFIG_CPUMASK_OFFSTACK)]
+ ptr: {
+ let mut ptr: *mut bindings::cpumask = ptr::null_mut();
+
+ // SAFETY: It is safe to call this method as the reference to `ptr` is valid.
+ //
+ // INVARIANT: The associated memory is freed when the `CpumaskVar` goes out of
+ // scope.
+ unsafe { bindings::zalloc_cpumask_var(&mut ptr, _flags.as_raw()) };
+ NonNull::new(ptr.cast()).ok_or(AllocError)?
+ },
+
+ #[cfg(not(CONFIG_CPUMASK_OFFSTACK))]
+ // SAFETY: FFI type is valid to be zero-initialized.
+ //
+ // INVARIANT: The associated memory is freed when the `CpumaskVar` goes out of scope.
+ mask: unsafe { core::mem::zeroed() },
+ })
+ }
+
+ /// Creates an instance of the [`CpumaskVar`].
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the returned [`CpumaskVar`] is properly initialized before
+ /// getting used.
+ pub unsafe fn new(_flags: Flags) -> Result<Self, AllocError> {
+ Ok(Self {
+ #[cfg(CONFIG_CPUMASK_OFFSTACK)]
+ ptr: {
+ let mut ptr: *mut bindings::cpumask = ptr::null_mut();
+
+ // SAFETY: It is safe to call this method as the reference to `ptr` is valid.
+ //
+ // INVARIANT: The associated memory is freed when the `CpumaskVar` goes out of
+ // scope.
+ unsafe { bindings::alloc_cpumask_var(&mut ptr, _flags.as_raw()) };
+ NonNull::new(ptr.cast()).ok_or(AllocError)?
+ },
+ #[cfg(not(CONFIG_CPUMASK_OFFSTACK))]
+ // SAFETY: Guaranteed by the safety requirements of the function.
+ //
+ // INVARIANT: The associated memory is freed when the `CpumaskVar` goes out of scope.
+ mask: unsafe { MaybeUninit::uninit().assume_init() },
+ })
+ }
+
+ /// Creates a mutable reference to an existing `struct cpumask_var_t` pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime
+ /// of the returned reference.
+ pub unsafe fn as_mut_ref<'a>(ptr: *mut bindings::cpumask_var_t) -> &'a mut Self {
+ // SAFETY: Guaranteed by the safety requirements of the function.
+ //
+ // INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the
+ // lifetime of the returned reference.
+ unsafe { &mut *ptr.cast() }
+ }
+
+ /// Creates a reference to an existing `struct cpumask_var_t` pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime
+ /// of the returned reference.
+ pub unsafe fn as_ref<'a>(ptr: *const bindings::cpumask_var_t) -> &'a Self {
+ // SAFETY: Guaranteed by the safety requirements of the function.
+ //
+ // INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the
+ // lifetime of the returned reference.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Clones cpumask.
+ pub fn try_clone(cpumask: &Cpumask) -> Result<Self> {
+ // SAFETY: The returned cpumask_var is initialized right after this call.
+ let mut cpumask_var = unsafe { Self::new(GFP_KERNEL) }?;
+
+ cpumask.copy(&mut cpumask_var);
+ Ok(cpumask_var)
+ }
+}
+
+// Make [`CpumaskVar`] behave like a pointer to [`Cpumask`].
+impl Deref for CpumaskVar {
+ type Target = Cpumask;
+
+ #[cfg(CONFIG_CPUMASK_OFFSTACK)]
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The caller owns CpumaskVar, so it is safe to deref the cpumask.
+ unsafe { &*self.ptr.as_ptr() }
+ }
+
+ #[cfg(not(CONFIG_CPUMASK_OFFSTACK))]
+ fn deref(&self) -> &Self::Target {
+ &self.mask
+ }
+}
+
+impl DerefMut for CpumaskVar {
+ #[cfg(CONFIG_CPUMASK_OFFSTACK)]
+ fn deref_mut(&mut self) -> &mut Cpumask {
+ // SAFETY: The caller owns CpumaskVar, so it is safe to deref the cpumask.
+ unsafe { self.ptr.as_mut() }
+ }
+
+ #[cfg(not(CONFIG_CPUMASK_OFFSTACK))]
+ fn deref_mut(&mut self) -> &mut Cpumask {
+ &mut self.mask
+ }
+}
+
+impl Drop for CpumaskVar {
+ fn drop(&mut self) {
+ #[cfg(CONFIG_CPUMASK_OFFSTACK)]
+ // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `free_cpumask_var`.
+ unsafe {
+ bindings::free_cpumask_var(self.as_raw())
+ };
+ }
+}
diff --git a/rust/kernel/cred.rs b/rust/kernel/cred.rs
new file mode 100644
index 000000000000..2599f01e8b28
--- /dev/null
+++ b/rust/kernel/cred.rs
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Credentials management.
+//!
+//! C header: [`include/linux/cred.h`](srctree/include/linux/cred.h).
+//!
+//! Reference: <https://www.kernel.org/doc/html/latest/security/credentials.html>
+
+use crate::{
+ bindings,
+ task::Kuid,
+ types::{AlwaysRefCounted, Opaque},
+};
+
+/// Wraps the kernel's `struct cred`.
+///
+/// Credentials are used for various security checks in the kernel.
+///
+/// Most fields of credentials are immutable. When things have their credentials changed, that
+/// happens by replacing the credential instead of changing an existing credential. See the [kernel
+/// documentation][ref] for more info on this.
+///
+/// # Invariants
+///
+/// Instances of this type are always ref-counted, that is, a call to `get_cred` ensures that the
+/// allocation remains valid at least until the matching call to `put_cred`.
+///
+/// [ref]: https://www.kernel.org/doc/html/latest/security/credentials.html
+#[repr(transparent)]
+pub struct Credential(Opaque<bindings::cred>);
+
+// SAFETY:
+// - `Credential::dec_ref` can be called from any thread.
+// - It is okay to send ownership of `Credential` across thread boundaries.
+unsafe impl Send for Credential {}
+
+// SAFETY: It's OK to access `Credential` through shared references from other threads because
+// we're either accessing properties that don't change or that are properly synchronised by C code.
+unsafe impl Sync for Credential {}
+
+impl Credential {
+ /// Creates a reference to a [`Credential`] from a valid pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid and remains valid for the lifetime of the
+ /// returned [`Credential`] reference.
+ #[inline]
+ pub unsafe fn from_ptr<'a>(ptr: *const bindings::cred) -> &'a Credential {
+ // SAFETY: The safety requirements guarantee the validity of the dereference, while the
+ // `Credential` type being transparent makes the cast ok.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Get the id for this security context.
+ #[inline]
+ pub fn get_secid(&self) -> u32 {
+ let mut secid = 0;
+ // SAFETY: The invariants of this type ensures that the pointer is valid.
+ unsafe { bindings::security_cred_getsecid(self.0.get(), &mut secid) };
+ secid
+ }
+
+ /// Returns the effective UID of the given credential.
+ #[inline]
+ pub fn euid(&self) -> Kuid {
+ // SAFETY: By the type invariant, we know that `self.0` is valid. Furthermore, the `euid`
+ // field of a credential is never changed after initialization, so there is no potential
+ // for data races.
+ Kuid::from_raw(unsafe { (*self.0.get()).euid })
+ }
+}
+
+// SAFETY: The type invariants guarantee that `Credential` is always ref-counted.
+unsafe impl AlwaysRefCounted for Credential {
+ #[inline]
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::get_cred(self.0.get()) };
+ }
+
+ #[inline]
+ unsafe fn dec_ref(obj: core::ptr::NonNull<Credential>) {
+ // SAFETY: The safety requirements guarantee that the refcount is nonzero. The cast is okay
+ // because `Credential` has the same representation as `struct cred`.
+ unsafe { bindings::put_cred(obj.cast().as_ptr()) };
+ }
+}
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
new file mode 100644
index 000000000000..f08583fa39c9
--- /dev/null
+++ b/rust/kernel/device.rs
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic devices that are part of the kernel's driver model.
+//!
+//! C header: [`include/linux/device.h`](srctree/include/linux/device.h)
+
+use crate::{
+ bindings,
+ str::CStr,
+ types::{ARef, Opaque},
+};
+use core::{fmt, marker::PhantomData, ptr};
+
+#[cfg(CONFIG_PRINTK)]
+use crate::c_str;
+
+/// A reference-counted device.
+///
+/// This structure represents the Rust abstraction for a C `struct device`. This implementation
+/// abstracts the usage of an already existing C `struct device` within Rust code that we get
+/// passed from the C side.
+///
+/// An instance of this abstraction can be obtained temporarily or permanent.
+///
+/// A temporary one is bound to the lifetime of the C `struct device` pointer used for creation.
+/// A permanent instance is always reference-counted and hence not restricted by any lifetime
+/// boundaries.
+///
+/// For subsystems it is recommended to create a permanent instance to wrap into a subsystem
+/// specific device structure (e.g. `pci::Device`). This is useful for passing it to drivers in
+/// `T::probe()`, such that a driver can store the `ARef<Device>` (equivalent to storing a
+/// `struct device` pointer in a C driver) for arbitrary purposes, e.g. allocating DMA coherent
+/// memory.
+///
+/// # Invariants
+///
+/// A `Device` instance represents a valid `struct device` created by the C portion of the kernel.
+///
+/// Instances of this type are always reference-counted, that is, a call to `get_device` ensures
+/// that the allocation remains valid at least until the matching call to `put_device`.
+///
+/// `bindings::device::release` is valid to be called from any thread, hence `ARef<Device>` can be
+/// dropped from any thread.
+#[repr(transparent)]
+pub struct Device<Ctx: DeviceContext = Normal>(Opaque<bindings::device>, PhantomData<Ctx>);
+
+impl Device {
+ /// Creates a new reference-counted abstraction instance of an existing `struct device` pointer.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `ptr` is valid, non-null, and has a non-zero reference count,
+ /// i.e. it must be ensured that the reference count of the C `struct device` `ptr` points to
+ /// can't drop to zero, for the duration of this function call.
+ ///
+ /// It must also be ensured that `bindings::device::release` can be called from any thread.
+ /// While not officially documented, this should be the case for any `struct device`.
+ pub unsafe fn get_device(ptr: *mut bindings::device) -> ARef<Self> {
+ // SAFETY: By the safety requirements ptr is valid
+ unsafe { Self::as_ref(ptr) }.into()
+ }
+}
+
+impl<Ctx: DeviceContext> Device<Ctx> {
+ /// Obtain the raw `struct device *`.
+ pub(crate) fn as_raw(&self) -> *mut bindings::device {
+ self.0.get()
+ }
+
+ /// Returns a reference to the parent device, if any.
+ #[cfg_attr(not(CONFIG_AUXILIARY_BUS), expect(dead_code))]
+ pub(crate) fn parent(&self) -> Option<&Self> {
+ // SAFETY:
+ // - By the type invariant `self.as_raw()` is always valid.
+ // - The parent device is only ever set at device creation.
+ let parent = unsafe { (*self.as_raw()).parent };
+
+ if parent.is_null() {
+ None
+ } else {
+ // SAFETY:
+ // - Since `parent` is not NULL, it must be a valid pointer to a `struct device`.
+ // - `parent` is valid for the lifetime of `self`, since a `struct device` holds a
+ // reference count of its parent.
+ Some(unsafe { Self::as_ref(parent) })
+ }
+ }
+
+ /// Convert a raw C `struct device` pointer to a `&'a Device`.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `ptr` is valid, non-null, and has a non-zero reference count,
+ /// i.e. it must be ensured that the reference count of the C `struct device` `ptr` points to
+ /// can't drop to zero, for the duration of this function call and the entire duration when the
+ /// returned reference exists.
+ pub unsafe fn as_ref<'a>(ptr: *mut bindings::device) -> &'a Self {
+ // SAFETY: Guaranteed by the safety requirements of the function.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Prints an emergency-level message (level 0) prefixed with device information.
+ ///
+ /// More details are available from [`dev_emerg`].
+ ///
+ /// [`dev_emerg`]: crate::dev_emerg
+ pub fn pr_emerg(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_EMERG, args) };
+ }
+
+ /// Prints an alert-level message (level 1) prefixed with device information.
+ ///
+ /// More details are available from [`dev_alert`].
+ ///
+ /// [`dev_alert`]: crate::dev_alert
+ pub fn pr_alert(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_ALERT, args) };
+ }
+
+ /// Prints a critical-level message (level 2) prefixed with device information.
+ ///
+ /// More details are available from [`dev_crit`].
+ ///
+ /// [`dev_crit`]: crate::dev_crit
+ pub fn pr_crit(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_CRIT, args) };
+ }
+
+ /// Prints an error-level message (level 3) prefixed with device information.
+ ///
+ /// More details are available from [`dev_err`].
+ ///
+ /// [`dev_err`]: crate::dev_err
+ pub fn pr_err(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_ERR, args) };
+ }
+
+ /// Prints a warning-level message (level 4) prefixed with device information.
+ ///
+ /// More details are available from [`dev_warn`].
+ ///
+ /// [`dev_warn`]: crate::dev_warn
+ pub fn pr_warn(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_WARNING, args) };
+ }
+
+ /// Prints a notice-level message (level 5) prefixed with device information.
+ ///
+ /// More details are available from [`dev_notice`].
+ ///
+ /// [`dev_notice`]: crate::dev_notice
+ pub fn pr_notice(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_NOTICE, args) };
+ }
+
+ /// Prints an info-level message (level 6) prefixed with device information.
+ ///
+ /// More details are available from [`dev_info`].
+ ///
+ /// [`dev_info`]: crate::dev_info
+ pub fn pr_info(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_INFO, args) };
+ }
+
+ /// Prints a debug-level message (level 7) prefixed with device information.
+ ///
+ /// More details are available from [`dev_dbg`].
+ ///
+ /// [`dev_dbg`]: crate::dev_dbg
+ pub fn pr_dbg(&self, args: fmt::Arguments<'_>) {
+ if cfg!(debug_assertions) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_DEBUG, args) };
+ }
+ }
+
+ /// Prints the provided message to the console.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `klevel` is null-terminated; in particular, one of the
+ /// `KERN_*`constants, for example, `KERN_CRIT`, `KERN_ALERT`, etc.
+ #[cfg_attr(not(CONFIG_PRINTK), allow(unused_variables))]
+ unsafe fn printk(&self, klevel: &[u8], msg: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated and one of the kernel constants. `self.as_raw`
+ // is valid because `self` is valid. The "%pA" format string expects a pointer to
+ // `fmt::Arguments`, which is what we're passing as the last argument.
+ #[cfg(CONFIG_PRINTK)]
+ unsafe {
+ bindings::_dev_printk(
+ klevel as *const _ as *const crate::ffi::c_char,
+ self.as_raw(),
+ c_str!("%pA").as_char_ptr(),
+ &msg as *const _ as *const crate::ffi::c_void,
+ )
+ };
+ }
+
+ /// Checks if property is present or not.
+ pub fn property_present(&self, name: &CStr) -> bool {
+ // SAFETY: By the invariant of `CStr`, `name` is null-terminated.
+ unsafe { bindings::device_property_present(self.as_raw().cast_const(), name.as_char_ptr()) }
+ }
+}
+
+// SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic
+// argument.
+kernel::impl_device_context_deref!(unsafe { Device });
+kernel::impl_device_context_into_aref!(Device);
+
+// SAFETY: Instances of `Device` are always reference-counted.
+unsafe impl crate::types::AlwaysRefCounted for Device {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
+ unsafe { bindings::get_device(self.as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::put_device(obj.cast().as_ptr()) }
+ }
+}
+
+// SAFETY: As by the type invariant `Device` can be sent to any thread.
+unsafe impl Send for Device {}
+
+// SAFETY: `Device` can be shared among threads because all immutable methods are protected by the
+// synchronization in `struct device`.
+unsafe impl Sync for Device {}
+
+/// Marker trait for the context of a bus specific device.
+///
+/// Some functions of a bus specific device should only be called from a certain context, i.e. bus
+/// callbacks, such as `probe()`.
+///
+/// This is the marker trait for structures representing the context of a bus specific device.
+pub trait DeviceContext: private::Sealed {}
+
+/// The [`Normal`] context is the context of a bus specific device when it is not an argument of
+/// any bus callback.
+pub struct Normal;
+
+/// The [`Core`] context is the context of a bus specific device when it is supplied as argument of
+/// any of the bus callbacks, such as `probe()`.
+pub struct Core;
+
+/// The [`Bound`] context is the context of a bus specific device reference when it is guaranteed to
+/// be bound for the duration of its lifetime.
+pub struct Bound;
+
+mod private {
+ pub trait Sealed {}
+
+ impl Sealed for super::Bound {}
+ impl Sealed for super::Core {}
+ impl Sealed for super::Normal {}
+}
+
+impl DeviceContext for Bound {}
+impl DeviceContext for Core {}
+impl DeviceContext for Normal {}
+
+/// # Safety
+///
+/// The type given as `$device` must be a transparent wrapper of a type that doesn't depend on the
+/// generic argument of `$device`.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __impl_device_context_deref {
+ (unsafe { $device:ident, $src:ty => $dst:ty }) => {
+ impl ::core::ops::Deref for $device<$src> {
+ type Target = $device<$dst>;
+
+ fn deref(&self) -> &Self::Target {
+ let ptr: *const Self = self;
+
+ // CAST: `$device<$src>` and `$device<$dst>` transparently wrap the same type by the
+ // safety requirement of the macro.
+ let ptr = ptr.cast::<Self::Target>();
+
+ // SAFETY: `ptr` was derived from `&self`.
+ unsafe { &*ptr }
+ }
+ }
+ };
+}
+
+/// Implement [`core::ops::Deref`] traits for allowed [`DeviceContext`] conversions of a (bus
+/// specific) device.
+///
+/// # Safety
+///
+/// The type given as `$device` must be a transparent wrapper of a type that doesn't depend on the
+/// generic argument of `$device`.
+#[macro_export]
+macro_rules! impl_device_context_deref {
+ (unsafe { $device:ident }) => {
+ // SAFETY: This macro has the exact same safety requirement as
+ // `__impl_device_context_deref!`.
+ ::kernel::__impl_device_context_deref!(unsafe {
+ $device,
+ $crate::device::Core => $crate::device::Bound
+ });
+
+ // SAFETY: This macro has the exact same safety requirement as
+ // `__impl_device_context_deref!`.
+ ::kernel::__impl_device_context_deref!(unsafe {
+ $device,
+ $crate::device::Bound => $crate::device::Normal
+ });
+ };
+}
+
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __impl_device_context_into_aref {
+ ($src:ty, $device:tt) => {
+ impl ::core::convert::From<&$device<$src>> for $crate::types::ARef<$device> {
+ fn from(dev: &$device<$src>) -> Self {
+ (&**dev).into()
+ }
+ }
+ };
+}
+
+/// Implement [`core::convert::From`], such that all `&Device<Ctx>` can be converted to an
+/// `ARef<Device>`.
+#[macro_export]
+macro_rules! impl_device_context_into_aref {
+ ($device:tt) => {
+ ::kernel::__impl_device_context_into_aref!($crate::device::Core, $device);
+ ::kernel::__impl_device_context_into_aref!($crate::device::Bound, $device);
+ };
+}
+
+#[doc(hidden)]
+#[macro_export]
+macro_rules! dev_printk {
+ ($method:ident, $dev:expr, $($f:tt)*) => {
+ {
+ ($dev).$method(core::format_args!($($f)*));
+ }
+ }
+}
+
+/// Prints an emergency-level message (level 0) prefixed with device information.
+///
+/// This level should be used if the system is unusable.
+///
+/// Equivalent to the kernel's `dev_emerg` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and `alloc::format!`.
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_emerg!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_emerg {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_emerg, $($f)*); }
+}
+
+/// Prints an alert-level message (level 1) prefixed with device information.
+///
+/// This level should be used if action must be taken immediately.
+///
+/// Equivalent to the kernel's `dev_alert` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and `alloc::format!`.
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_alert!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_alert {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_alert, $($f)*); }
+}
+
+/// Prints a critical-level message (level 2) prefixed with device information.
+///
+/// This level should be used in critical conditions.
+///
+/// Equivalent to the kernel's `dev_crit` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and `alloc::format!`.
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_crit!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_crit {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_crit, $($f)*); }
+}
+
+/// Prints an error-level message (level 3) prefixed with device information.
+///
+/// This level should be used in error conditions.
+///
+/// Equivalent to the kernel's `dev_err` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and `alloc::format!`.
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_err!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_err {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_err, $($f)*); }
+}
+
+/// Prints a warning-level message (level 4) prefixed with device information.
+///
+/// This level should be used in warning conditions.
+///
+/// Equivalent to the kernel's `dev_warn` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and `alloc::format!`.
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_warn!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_warn {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_warn, $($f)*); }
+}
+
+/// Prints a notice-level message (level 5) prefixed with device information.
+///
+/// This level should be used in normal but significant conditions.
+///
+/// Equivalent to the kernel's `dev_notice` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and `alloc::format!`.
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_notice!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_notice {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_notice, $($f)*); }
+}
+
+/// Prints an info-level message (level 6) prefixed with device information.
+///
+/// This level should be used for informational messages.
+///
+/// Equivalent to the kernel's `dev_info` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and `alloc::format!`.
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_info!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_info {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_info, $($f)*); }
+}
+
+/// Prints a debug-level message (level 7) prefixed with device information.
+///
+/// This level should be used for debug messages.
+///
+/// Equivalent to the kernel's `dev_dbg` macro, except that it doesn't support dynamic debug yet.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and `alloc::format!`.
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_dbg!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_dbg {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_dbg, $($f)*); }
+}
diff --git a/rust/kernel/device_id.rs b/rust/kernel/device_id.rs
new file mode 100644
index 000000000000..e5859217a579
--- /dev/null
+++ b/rust/kernel/device_id.rs
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic implementation of device IDs.
+//!
+//! Each bus / subsystem that matches device and driver through a bus / subsystem specific ID is
+//! expected to implement [`RawDeviceId`].
+
+use core::mem::MaybeUninit;
+
+/// Marker trait to indicate a Rust device ID type represents a corresponding C device ID type.
+///
+/// This is meant to be implemented by buses/subsystems so that they can use [`IdTable`] to
+/// guarantee (at compile-time) zero-termination of device id tables provided by drivers.
+///
+/// # Safety
+///
+/// Implementers must ensure that:
+/// - `Self` is layout-compatible with [`RawDeviceId::RawType`]; i.e. it's safe to transmute to
+/// `RawDeviceId`.
+///
+/// This requirement is needed so `IdArray::new` can convert `Self` to `RawType` when building
+/// the ID table.
+///
+/// Ideally, this should be achieved using a const function that does conversion instead of
+/// transmute; however, const trait functions relies on `const_trait_impl` unstable feature,
+/// which is broken/gone in Rust 1.73.
+///
+/// - `DRIVER_DATA_OFFSET` is the offset of context/data field of the device ID (usually named
+/// `driver_data`) of the device ID, the field is suitable sized to write a `usize` value.
+///
+/// Similar to the previous requirement, the data should ideally be added during `Self` to
+/// `RawType` conversion, but there's currently no way to do it when using traits in const.
+pub unsafe trait RawDeviceId {
+ /// The raw type that holds the device id.
+ ///
+ /// Id tables created from [`Self`] are going to hold this type in its zero-terminated array.
+ type RawType: Copy;
+
+ /// The offset to the context/data field.
+ const DRIVER_DATA_OFFSET: usize;
+
+ /// The index stored at `DRIVER_DATA_OFFSET` of the implementor of the [`RawDeviceId`] trait.
+ fn index(&self) -> usize;
+}
+
+/// A zero-terminated device id array.
+#[repr(C)]
+pub struct RawIdArray<T: RawDeviceId, const N: usize> {
+ ids: [T::RawType; N],
+ sentinel: MaybeUninit<T::RawType>,
+}
+
+impl<T: RawDeviceId, const N: usize> RawIdArray<T, N> {
+ #[doc(hidden)]
+ pub const fn size(&self) -> usize {
+ core::mem::size_of::<Self>()
+ }
+}
+
+/// A zero-terminated device id array, followed by context data.
+#[repr(C)]
+pub struct IdArray<T: RawDeviceId, U, const N: usize> {
+ raw_ids: RawIdArray<T, N>,
+ id_infos: [U; N],
+}
+
+impl<T: RawDeviceId, U, const N: usize> IdArray<T, U, N> {
+ /// Creates a new instance of the array.
+ ///
+ /// The contents are derived from the given identifiers and context information.
+ pub const fn new(ids: [(T, U); N]) -> Self {
+ let mut raw_ids = [const { MaybeUninit::<T::RawType>::uninit() }; N];
+ let mut infos = [const { MaybeUninit::uninit() }; N];
+
+ let mut i = 0usize;
+ while i < N {
+ // SAFETY: by the safety requirement of `RawDeviceId`, we're guaranteed that `T` is
+ // layout-wise compatible with `RawType`.
+ raw_ids[i] = unsafe { core::mem::transmute_copy(&ids[i].0) };
+ // SAFETY: by the safety requirement of `RawDeviceId`, this would be effectively
+ // `raw_ids[i].driver_data = i;`.
+ unsafe {
+ raw_ids[i]
+ .as_mut_ptr()
+ .byte_offset(T::DRIVER_DATA_OFFSET as _)
+ .cast::<usize>()
+ .write(i);
+ }
+
+ // SAFETY: this is effectively a move: `infos[i] = ids[i].1`. We make a copy here but
+ // later forget `ids`.
+ infos[i] = MaybeUninit::new(unsafe { core::ptr::read(&ids[i].1) });
+ i += 1;
+ }
+
+ core::mem::forget(ids);
+
+ Self {
+ raw_ids: RawIdArray {
+ // SAFETY: this is effectively `array_assume_init`, which is unstable, so we use
+ // `transmute_copy` instead. We have initialized all elements of `raw_ids` so this
+ // `array_assume_init` is safe.
+ ids: unsafe { core::mem::transmute_copy(&raw_ids) },
+ sentinel: MaybeUninit::zeroed(),
+ },
+ // SAFETY: We have initialized all elements of `infos` so this `array_assume_init` is
+ // safe.
+ id_infos: unsafe { core::mem::transmute_copy(&infos) },
+ }
+ }
+
+ /// Reference to the contained [`RawIdArray`].
+ pub const fn raw_ids(&self) -> &RawIdArray<T, N> {
+ &self.raw_ids
+ }
+}
+
+/// A device id table.
+///
+/// This trait is only implemented by `IdArray`.
+///
+/// The purpose of this trait is to allow `&'static dyn IdArray<T, U>` to be in context when `N` in
+/// `IdArray` doesn't matter.
+pub trait IdTable<T: RawDeviceId, U> {
+ /// Obtain the pointer to the ID table.
+ fn as_ptr(&self) -> *const T::RawType;
+
+ /// Obtain the pointer to the bus specific device ID from an index.
+ fn id(&self, index: usize) -> &T::RawType;
+
+ /// Obtain the pointer to the driver-specific information from an index.
+ fn info(&self, index: usize) -> &U;
+}
+
+impl<T: RawDeviceId, U, const N: usize> IdTable<T, U> for IdArray<T, U, N> {
+ fn as_ptr(&self) -> *const T::RawType {
+ // This cannot be `self.ids.as_ptr()`, as the return pointer must have correct provenance
+ // to access the sentinel.
+ (self as *const Self).cast()
+ }
+
+ fn id(&self, index: usize) -> &T::RawType {
+ &self.raw_ids.ids[index]
+ }
+
+ fn info(&self, index: usize) -> &U {
+ &self.id_infos[index]
+ }
+}
+
+/// Create device table alias for modpost.
+#[macro_export]
+macro_rules! module_device_table {
+ ($table_type: literal, $module_table_name:ident, $table_name:ident) => {
+ #[rustfmt::skip]
+ #[export_name =
+ concat!("__mod_device_table__", $table_type,
+ "__", module_path!(),
+ "_", line!(),
+ "_", stringify!($table_name))
+ ]
+ static $module_table_name: [core::mem::MaybeUninit<u8>; $table_name.raw_ids().size()] =
+ unsafe { core::mem::transmute_copy($table_name.raw_ids()) };
+ };
+}
diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs
new file mode 100644
index 000000000000..0f79a2ec9474
--- /dev/null
+++ b/rust/kernel/devres.rs
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Devres abstraction
+//!
+//! [`Devres`] represents an abstraction for the kernel devres (device resource management)
+//! implementation.
+
+use crate::{
+ alloc::Flags,
+ bindings,
+ device::{Bound, Device},
+ error::{Error, Result},
+ ffi::c_void,
+ prelude::*,
+ revocable::Revocable,
+ sync::Arc,
+ types::ARef,
+};
+
+use core::ops::Deref;
+
+#[pin_data]
+struct DevresInner<T> {
+ dev: ARef<Device>,
+ callback: unsafe extern "C" fn(*mut c_void),
+ #[pin]
+ data: Revocable<T>,
+}
+
+/// This abstraction is meant to be used by subsystems to containerize [`Device`] bound resources to
+/// manage their lifetime.
+///
+/// [`Device`] bound resources should be freed when either the resource goes out of scope or the
+/// [`Device`] is unbound respectively, depending on what happens first.
+///
+/// To achieve that [`Devres`] registers a devres callback on creation, which is called once the
+/// [`Device`] is unbound, revoking access to the encapsulated resource (see also [`Revocable`]).
+///
+/// After the [`Devres`] has been unbound it is not possible to access the encapsulated resource
+/// anymore.
+///
+/// [`Devres`] users should make sure to simply free the corresponding backing resource in `T`'s
+/// [`Drop`] implementation.
+///
+/// # Example
+///
+/// ```no_run
+/// # use kernel::{bindings, c_str, device::{Bound, Device}, devres::Devres, io::{Io, IoRaw}};
+/// # use core::ops::Deref;
+///
+/// // See also [`pci::Bar`] for a real example.
+/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
+///
+/// impl<const SIZE: usize> IoMem<SIZE> {
+/// /// # Safety
+/// ///
+/// /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
+/// /// virtual address space.
+/// unsafe fn new(paddr: usize) -> Result<Self>{
+/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
+/// // valid for `ioremap`.
+/// let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) };
+/// if addr.is_null() {
+/// return Err(ENOMEM);
+/// }
+///
+/// Ok(IoMem(IoRaw::new(addr as _, SIZE)?))
+/// }
+/// }
+///
+/// impl<const SIZE: usize> Drop for IoMem<SIZE> {
+/// fn drop(&mut self) {
+/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
+/// unsafe { bindings::iounmap(self.0.addr() as _); };
+/// }
+/// }
+///
+/// impl<const SIZE: usize> Deref for IoMem<SIZE> {
+/// type Target = Io<SIZE>;
+///
+/// fn deref(&self) -> &Self::Target {
+/// // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
+/// unsafe { Io::from_raw(&self.0) }
+/// }
+/// }
+/// # fn no_run(dev: &Device<Bound>) -> Result<(), Error> {
+/// // SAFETY: Invalid usage for example purposes.
+/// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
+/// let devres = Devres::new(dev, iomem, GFP_KERNEL)?;
+///
+/// let res = devres.try_access().ok_or(ENXIO)?;
+/// res.write8(0x42, 0x0);
+/// # Ok(())
+/// # }
+/// ```
+pub struct Devres<T>(Arc<DevresInner<T>>);
+
+impl<T> DevresInner<T> {
+ fn new(dev: &Device<Bound>, data: T, flags: Flags) -> Result<Arc<DevresInner<T>>> {
+ let inner = Arc::pin_init(
+ pin_init!( DevresInner {
+ dev: dev.into(),
+ callback: Self::devres_callback,
+ data <- Revocable::new(data),
+ }),
+ flags,
+ )?;
+
+ // Convert `Arc<DevresInner>` into a raw pointer and make devres own this reference until
+ // `Self::devres_callback` is called.
+ let data = inner.clone().into_raw();
+
+ // SAFETY: `devm_add_action` guarantees to call `Self::devres_callback` once `dev` is
+ // detached.
+ let ret =
+ unsafe { bindings::devm_add_action(dev.as_raw(), Some(inner.callback), data as _) };
+
+ if ret != 0 {
+ // SAFETY: We just created another reference to `inner` in order to pass it to
+ // `bindings::devm_add_action`. If `bindings::devm_add_action` fails, we have to drop
+ // this reference accordingly.
+ let _ = unsafe { Arc::from_raw(data) };
+ return Err(Error::from_errno(ret));
+ }
+
+ Ok(inner)
+ }
+
+ fn as_ptr(&self) -> *const Self {
+ self as _
+ }
+
+ fn remove_action(this: &Arc<Self>) {
+ // SAFETY:
+ // - `self.inner.dev` is a valid `Device`,
+ // - the `action` and `data` pointers are the exact same ones as given to devm_add_action()
+ // previously,
+ // - `self` is always valid, even if the action has been released already.
+ let ret = unsafe {
+ bindings::devm_remove_action_nowarn(
+ this.dev.as_raw(),
+ Some(this.callback),
+ this.as_ptr() as _,
+ )
+ };
+
+ if ret == 0 {
+ // SAFETY: We leaked an `Arc` reference to devm_add_action() in `DevresInner::new`; if
+ // devm_remove_action_nowarn() was successful we can (and have to) claim back ownership
+ // of this reference.
+ let _ = unsafe { Arc::from_raw(this.as_ptr()) };
+ }
+ }
+
+ #[allow(clippy::missing_safety_doc)]
+ unsafe extern "C" fn devres_callback(ptr: *mut kernel::ffi::c_void) {
+ let ptr = ptr as *mut DevresInner<T>;
+ // Devres owned this memory; now that we received the callback, drop the `Arc` and hence the
+ // reference.
+ // SAFETY: Safe, since we leaked an `Arc` reference to devm_add_action() in
+ // `DevresInner::new`.
+ let inner = unsafe { Arc::from_raw(ptr) };
+
+ inner.data.revoke();
+ }
+}
+
+impl<T> Devres<T> {
+ /// Creates a new [`Devres`] instance of the given `data`. The `data` encapsulated within the
+ /// returned `Devres` instance' `data` will be revoked once the device is detached.
+ pub fn new(dev: &Device<Bound>, data: T, flags: Flags) -> Result<Self> {
+ let inner = DevresInner::new(dev, data, flags)?;
+
+ Ok(Devres(inner))
+ }
+
+ /// Same as [`Devres::new`], but does not return a `Devres` instance. Instead the given `data`
+ /// is owned by devres and will be revoked / dropped, once the device is detached.
+ pub fn new_foreign_owned(dev: &Device<Bound>, data: T, flags: Flags) -> Result {
+ let _ = DevresInner::new(dev, data, flags)?;
+
+ Ok(())
+ }
+
+ /// Obtain `&'a T`, bypassing the [`Revocable`].
+ ///
+ /// This method allows to directly obtain a `&'a T`, bypassing the [`Revocable`], by presenting
+ /// a `&'a Device<Bound>` of the same [`Device`] this [`Devres`] instance has been created with.
+ ///
+ /// # Errors
+ ///
+ /// An error is returned if `dev` does not match the same [`Device`] this [`Devres`] instance
+ /// has been created with.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// # #![cfg(CONFIG_PCI)]
+ /// # use kernel::{device::Core, devres::Devres, pci};
+ ///
+ /// fn from_core(dev: &pci::Device<Core>, devres: Devres<pci::Bar<0x4>>) -> Result {
+ /// let bar = devres.access(dev.as_ref())?;
+ ///
+ /// let _ = bar.read32(0x0);
+ ///
+ /// // might_sleep()
+ ///
+ /// bar.write32(0x42, 0x0);
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn access<'a>(&'a self, dev: &'a Device<Bound>) -> Result<&'a T> {
+ if self.0.dev.as_raw() != dev.as_raw() {
+ return Err(EINVAL);
+ }
+
+ // SAFETY: `dev` being the same device as the device this `Devres` has been created for
+ // proves that `self.0.data` hasn't been revoked and is guaranteed to not be revoked as
+ // long as `dev` lives; `dev` lives at least as long as `self`.
+ Ok(unsafe { self.deref().access() })
+ }
+}
+
+impl<T> Deref for Devres<T> {
+ type Target = Revocable<T>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0.data
+ }
+}
+
+impl<T> Drop for Devres<T> {
+ fn drop(&mut self) {
+ DevresInner::remove_action(&self.0);
+ }
+}
diff --git a/rust/kernel/dma.rs b/rust/kernel/dma.rs
new file mode 100644
index 000000000000..605e01e35715
--- /dev/null
+++ b/rust/kernel/dma.rs
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Direct memory access (DMA).
+//!
+//! C header: [`include/linux/dma-mapping.h`](srctree/include/linux/dma-mapping.h)
+
+use crate::{
+ bindings, build_assert,
+ device::{Bound, Device},
+ error::code::*,
+ error::Result,
+ transmute::{AsBytes, FromBytes},
+ types::ARef,
+};
+
+/// Possible attributes associated with a DMA mapping.
+///
+/// They can be combined with the operators `|`, `&`, and `!`.
+///
+/// Values can be used from the [`attrs`] module.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::{Bound, Device};
+/// use kernel::dma::{attrs::*, CoherentAllocation};
+///
+/// # fn test(dev: &Device<Bound>) -> Result {
+/// let attribs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_WARN;
+/// let c: CoherentAllocation<u64> =
+/// CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, attribs)?;
+/// # Ok::<(), Error>(()) }
+/// ```
+#[derive(Clone, Copy, PartialEq)]
+#[repr(transparent)]
+pub struct Attrs(u32);
+
+impl Attrs {
+ /// Get the raw representation of this attribute.
+ pub(crate) fn as_raw(self) -> crate::ffi::c_ulong {
+ self.0 as _
+ }
+
+ /// Check whether `flags` is contained in `self`.
+ pub fn contains(self, flags: Attrs) -> bool {
+ (self & flags) == flags
+ }
+}
+
+impl core::ops::BitOr for Attrs {
+ type Output = Self;
+ fn bitor(self, rhs: Self) -> Self::Output {
+ Self(self.0 | rhs.0)
+ }
+}
+
+impl core::ops::BitAnd for Attrs {
+ type Output = Self;
+ fn bitand(self, rhs: Self) -> Self::Output {
+ Self(self.0 & rhs.0)
+ }
+}
+
+impl core::ops::Not for Attrs {
+ type Output = Self;
+ fn not(self) -> Self::Output {
+ Self(!self.0)
+ }
+}
+
+/// DMA mapping attributes.
+pub mod attrs {
+ use super::Attrs;
+
+ /// Specifies that reads and writes to the mapping may be weakly ordered, that is that reads
+ /// and writes may pass each other.
+ pub const DMA_ATTR_WEAK_ORDERING: Attrs = Attrs(bindings::DMA_ATTR_WEAK_ORDERING);
+
+ /// Specifies that writes to the mapping may be buffered to improve performance.
+ pub const DMA_ATTR_WRITE_COMBINE: Attrs = Attrs(bindings::DMA_ATTR_WRITE_COMBINE);
+
+ /// Lets the platform to avoid creating a kernel virtual mapping for the allocated buffer.
+ pub const DMA_ATTR_NO_KERNEL_MAPPING: Attrs = Attrs(bindings::DMA_ATTR_NO_KERNEL_MAPPING);
+
+ /// Allows platform code to skip synchronization of the CPU cache for the given buffer assuming
+ /// that it has been already transferred to 'device' domain.
+ pub const DMA_ATTR_SKIP_CPU_SYNC: Attrs = Attrs(bindings::DMA_ATTR_SKIP_CPU_SYNC);
+
+ /// Forces contiguous allocation of the buffer in physical memory.
+ pub const DMA_ATTR_FORCE_CONTIGUOUS: Attrs = Attrs(bindings::DMA_ATTR_FORCE_CONTIGUOUS);
+
+ /// This is a hint to the DMA-mapping subsystem that it's probably not worth the time to try
+ /// to allocate memory to in a way that gives better TLB efficiency.
+ pub const DMA_ATTR_ALLOC_SINGLE_PAGES: Attrs = Attrs(bindings::DMA_ATTR_ALLOC_SINGLE_PAGES);
+
+ /// This tells the DMA-mapping subsystem to suppress allocation failure reports (similarly to
+ /// __GFP_NOWARN).
+ pub const DMA_ATTR_NO_WARN: Attrs = Attrs(bindings::DMA_ATTR_NO_WARN);
+
+ /// Used to indicate that the buffer is fully accessible at an elevated privilege level (and
+ /// ideally inaccessible or at least read-only at lesser-privileged levels).
+ pub const DMA_ATTR_PRIVILEGED: Attrs = Attrs(bindings::DMA_ATTR_PRIVILEGED);
+}
+
+/// An abstraction of the `dma_alloc_coherent` API.
+///
+/// This is an abstraction around the `dma_alloc_coherent` API which is used to allocate and map
+/// large consistent DMA regions.
+///
+/// A [`CoherentAllocation`] instance contains a pointer to the allocated region (in the
+/// processor's virtual address space) and the device address which can be given to the device
+/// as the DMA address base of the region. The region is released once [`CoherentAllocation`]
+/// is dropped.
+///
+/// # Invariants
+///
+/// For the lifetime of an instance of [`CoherentAllocation`], the `cpu_addr` is a valid pointer
+/// to an allocated region of consistent memory and `dma_handle` is the DMA address base of
+/// the region.
+// TODO
+//
+// DMA allocations potentially carry device resources (e.g.IOMMU mappings), hence for soundness
+// reasons DMA allocation would need to be embedded in a `Devres` container, in order to ensure
+// that device resources can never survive device unbind.
+//
+// However, it is neither desirable nor necessary to protect the allocated memory of the DMA
+// allocation from surviving device unbind; it would require RCU read side critical sections to
+// access the memory, which may require subsequent unnecessary copies.
+//
+// Hence, find a way to revoke the device resources of a `CoherentAllocation`, but not the
+// entire `CoherentAllocation` including the allocated memory itself.
+pub struct CoherentAllocation<T: AsBytes + FromBytes> {
+ dev: ARef<Device>,
+ dma_handle: bindings::dma_addr_t,
+ count: usize,
+ cpu_addr: *mut T,
+ dma_attrs: Attrs,
+}
+
+impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
+ /// Allocates a region of `size_of::<T> * count` of consistent memory.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::device::{Bound, Device};
+ /// use kernel::dma::{attrs::*, CoherentAllocation};
+ ///
+ /// # fn test(dev: &Device<Bound>) -> Result {
+ /// let c: CoherentAllocation<u64> =
+ /// CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, DMA_ATTR_NO_WARN)?;
+ /// # Ok::<(), Error>(()) }
+ /// ```
+ pub fn alloc_attrs(
+ dev: &Device<Bound>,
+ count: usize,
+ gfp_flags: kernel::alloc::Flags,
+ dma_attrs: Attrs,
+ ) -> Result<CoherentAllocation<T>> {
+ build_assert!(
+ core::mem::size_of::<T>() > 0,
+ "It doesn't make sense for the allocated type to be a ZST"
+ );
+
+ let size = count
+ .checked_mul(core::mem::size_of::<T>())
+ .ok_or(EOVERFLOW)?;
+ let mut dma_handle = 0;
+ // SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
+ let ret = unsafe {
+ bindings::dma_alloc_attrs(
+ dev.as_raw(),
+ size,
+ &mut dma_handle,
+ gfp_flags.as_raw(),
+ dma_attrs.as_raw(),
+ )
+ };
+ if ret.is_null() {
+ return Err(ENOMEM);
+ }
+ // INVARIANT: We just successfully allocated a coherent region which is accessible for
+ // `count` elements, hence the cpu address is valid. We also hold a refcounted reference
+ // to the device.
+ Ok(Self {
+ dev: dev.into(),
+ dma_handle,
+ count,
+ cpu_addr: ret as *mut T,
+ dma_attrs,
+ })
+ }
+
+ /// Performs the same functionality as [`CoherentAllocation::alloc_attrs`], except the
+ /// `dma_attrs` is 0 by default.
+ pub fn alloc_coherent(
+ dev: &Device<Bound>,
+ count: usize,
+ gfp_flags: kernel::alloc::Flags,
+ ) -> Result<CoherentAllocation<T>> {
+ CoherentAllocation::alloc_attrs(dev, count, gfp_flags, Attrs(0))
+ }
+
+ /// Returns the base address to the allocated region in the CPU's virtual address space.
+ pub fn start_ptr(&self) -> *const T {
+ self.cpu_addr
+ }
+
+ /// Returns the base address to the allocated region in the CPU's virtual address space as
+ /// a mutable pointer.
+ pub fn start_ptr_mut(&mut self) -> *mut T {
+ self.cpu_addr
+ }
+
+ /// Returns a DMA handle which may given to the device as the DMA address base of
+ /// the region.
+ pub fn dma_handle(&self) -> bindings::dma_addr_t {
+ self.dma_handle
+ }
+
+ /// Returns a pointer to an element from the region with bounds checking. `offset` is in
+ /// units of `T`, not the number of bytes.
+ ///
+ /// Public but hidden since it should only be used from [`dma_read`] and [`dma_write`] macros.
+ #[doc(hidden)]
+ pub fn item_from_index(&self, offset: usize) -> Result<*mut T> {
+ if offset >= self.count {
+ return Err(EINVAL);
+ }
+ // SAFETY:
+ // - The pointer is valid due to type invariant on `CoherentAllocation`
+ // and we've just checked that the range and index is within bounds.
+ // - `offset` can't overflow since it is smaller than `self.count` and we've checked
+ // that `self.count` won't overflow early in the constructor.
+ Ok(unsafe { self.cpu_addr.add(offset) })
+ }
+
+ /// Reads the value of `field` and ensures that its type is [`FromBytes`].
+ ///
+ /// # Safety
+ ///
+ /// This must be called from the [`dma_read`] macro which ensures that the `field` pointer is
+ /// validated beforehand.
+ ///
+ /// Public but hidden since it should only be used from [`dma_read`] macro.
+ #[doc(hidden)]
+ pub unsafe fn field_read<F: FromBytes>(&self, field: *const F) -> F {
+ // SAFETY:
+ // - By the safety requirements field is valid.
+ // - Using read_volatile() here is not sound as per the usual rules, the usage here is
+ // a special exception with the following notes in place. When dealing with a potential
+ // race from a hardware or code outside kernel (e.g. user-space program), we need that
+ // read on a valid memory is not UB. Currently read_volatile() is used for this, and the
+ // rationale behind is that it should generate the same code as READ_ONCE() which the
+ // kernel already relies on to avoid UB on data races. Note that the usage of
+ // read_volatile() is limited to this particular case, it cannot be used to prevent
+ // the UB caused by racing between two kernel functions nor do they provide atomicity.
+ unsafe { field.read_volatile() }
+ }
+
+ /// Writes a value to `field` and ensures that its type is [`AsBytes`].
+ ///
+ /// # Safety
+ ///
+ /// This must be called from the [`dma_write`] macro which ensures that the `field` pointer is
+ /// validated beforehand.
+ ///
+ /// Public but hidden since it should only be used from [`dma_write`] macro.
+ #[doc(hidden)]
+ pub unsafe fn field_write<F: AsBytes>(&self, field: *mut F, val: F) {
+ // SAFETY:
+ // - By the safety requirements field is valid.
+ // - Using write_volatile() here is not sound as per the usual rules, the usage here is
+ // a special exception with the following notes in place. When dealing with a potential
+ // race from a hardware or code outside kernel (e.g. user-space program), we need that
+ // write on a valid memory is not UB. Currently write_volatile() is used for this, and the
+ // rationale behind is that it should generate the same code as WRITE_ONCE() which the
+ // kernel already relies on to avoid UB on data races. Note that the usage of
+ // write_volatile() is limited to this particular case, it cannot be used to prevent
+ // the UB caused by racing between two kernel functions nor do they provide atomicity.
+ unsafe { field.write_volatile(val) }
+ }
+}
+
+/// Note that the device configured to do DMA must be halted before this object is dropped.
+impl<T: AsBytes + FromBytes> Drop for CoherentAllocation<T> {
+ fn drop(&mut self) {
+ let size = self.count * core::mem::size_of::<T>();
+ // SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
+ // The cpu address, and the dma handle are valid due to the type invariants on
+ // `CoherentAllocation`.
+ unsafe {
+ bindings::dma_free_attrs(
+ self.dev.as_raw(),
+ size,
+ self.cpu_addr as _,
+ self.dma_handle,
+ self.dma_attrs.as_raw(),
+ )
+ }
+ }
+}
+
+// SAFETY: It is safe to send a `CoherentAllocation` to another thread if `T`
+// can be sent to another thread.
+unsafe impl<T: AsBytes + FromBytes + Send> Send for CoherentAllocation<T> {}
+
+/// Reads a field of an item from an allocated region of structs.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::device::Device;
+/// use kernel::dma::{attrs::*, CoherentAllocation};
+///
+/// struct MyStruct { field: u32, }
+///
+/// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
+/// unsafe impl kernel::transmute::FromBytes for MyStruct{};
+/// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
+/// unsafe impl kernel::transmute::AsBytes for MyStruct{};
+///
+/// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
+/// let whole = kernel::dma_read!(alloc[2]);
+/// let field = kernel::dma_read!(alloc[1].field);
+/// # Ok::<(), Error>(()) }
+/// ```
+#[macro_export]
+macro_rules! dma_read {
+ ($dma:expr, $idx: expr, $($field:tt)*) => {{
+ let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
+ // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
+ // dereferenced. The compiler also further validates the expression on whether `field`
+ // is a member of `item` when expanded by the macro.
+ unsafe {
+ let ptr_field = ::core::ptr::addr_of!((*item) $($field)*);
+ $crate::dma::CoherentAllocation::field_read(&$dma, ptr_field)
+ }
+ }};
+ ($dma:ident [ $idx:expr ] $($field:tt)* ) => {
+ $crate::dma_read!($dma, $idx, $($field)*);
+ };
+ ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {
+ $crate::dma_read!($($dma).*, $idx, $($field)*);
+ };
+}
+
+/// Writes to a field of an item from an allocated region of structs.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::device::Device;
+/// use kernel::dma::{attrs::*, CoherentAllocation};
+///
+/// struct MyStruct { member: u32, }
+///
+/// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
+/// unsafe impl kernel::transmute::FromBytes for MyStruct{};
+/// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
+/// unsafe impl kernel::transmute::AsBytes for MyStruct{};
+///
+/// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
+/// kernel::dma_write!(alloc[2].member = 0xf);
+/// kernel::dma_write!(alloc[1] = MyStruct { member: 0xf });
+/// # Ok::<(), Error>(()) }
+/// ```
+#[macro_export]
+macro_rules! dma_write {
+ ($dma:ident [ $idx:expr ] $($field:tt)*) => {{
+ $crate::dma_write!($dma, $idx, $($field)*);
+ }};
+ ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {{
+ $crate::dma_write!($($dma).*, $idx, $($field)*);
+ }};
+ ($dma:expr, $idx: expr, = $val:expr) => {
+ let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
+ // SAFETY: `item_from_index` ensures that `item` is always a valid item.
+ unsafe { $crate::dma::CoherentAllocation::field_write(&$dma, item, $val) }
+ };
+ ($dma:expr, $idx: expr, $(.$field:ident)* = $val:expr) => {
+ let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
+ // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
+ // dereferenced. The compiler also further validates the expression on whether `field`
+ // is a member of `item` when expanded by the macro.
+ unsafe {
+ let ptr_field = ::core::ptr::addr_of_mut!((*item) $(.$field)*);
+ $crate::dma::CoherentAllocation::field_write(&$dma, ptr_field, $val)
+ }
+ };
+}
diff --git a/rust/kernel/driver.rs b/rust/kernel/driver.rs
new file mode 100644
index 000000000000..ec9166cedfa7
--- /dev/null
+++ b/rust/kernel/driver.rs
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic support for drivers of different buses (e.g., PCI, Platform, Amba, etc.).
+//!
+//! Each bus / subsystem is expected to implement [`RegistrationOps`], which allows drivers to
+//! register using the [`Registration`] class.
+
+use crate::error::{Error, Result};
+use crate::{device, of, str::CStr, try_pin_init, types::Opaque, ThisModule};
+use core::pin::Pin;
+use pin_init::{pin_data, pinned_drop, PinInit};
+
+/// The [`RegistrationOps`] trait serves as generic interface for subsystems (e.g., PCI, Platform,
+/// Amba, etc.) to provide the corresponding subsystem specific implementation to register /
+/// unregister a driver of the particular type (`RegType`).
+///
+/// For instance, the PCI subsystem would set `RegType` to `bindings::pci_driver` and call
+/// `bindings::__pci_register_driver` from `RegistrationOps::register` and
+/// `bindings::pci_unregister_driver` from `RegistrationOps::unregister`.
+///
+/// # Safety
+///
+/// A call to [`RegistrationOps::unregister`] for a given instance of `RegType` is only valid if a
+/// preceding call to [`RegistrationOps::register`] has been successful.
+pub unsafe trait RegistrationOps {
+ /// The type that holds information about the registration. This is typically a struct defined
+ /// by the C portion of the kernel.
+ type RegType: Default;
+
+ /// Registers a driver.
+ ///
+ /// # Safety
+ ///
+ /// On success, `reg` must remain pinned and valid until the matching call to
+ /// [`RegistrationOps::unregister`].
+ unsafe fn register(
+ reg: &Opaque<Self::RegType>,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result;
+
+ /// Unregisters a driver previously registered with [`RegistrationOps::register`].
+ ///
+ /// # Safety
+ ///
+ /// Must only be called after a preceding successful call to [`RegistrationOps::register`] for
+ /// the same `reg`.
+ unsafe fn unregister(reg: &Opaque<Self::RegType>);
+}
+
+/// A [`Registration`] is a generic type that represents the registration of some driver type (e.g.
+/// `bindings::pci_driver`). Therefore a [`Registration`] must be initialized with a type that
+/// implements the [`RegistrationOps`] trait, such that the generic `T::register` and
+/// `T::unregister` calls result in the subsystem specific registration calls.
+///
+///Once the `Registration` structure is dropped, the driver is unregistered.
+#[pin_data(PinnedDrop)]
+pub struct Registration<T: RegistrationOps> {
+ #[pin]
+ reg: Opaque<T::RegType>,
+}
+
+// SAFETY: `Registration` has no fields or methods accessible via `&Registration`, so it is safe to
+// share references to it with multiple threads as nothing can be done.
+unsafe impl<T: RegistrationOps> Sync for Registration<T> {}
+
+// SAFETY: Both registration and unregistration are implemented in C and safe to be performed from
+// any thread, so `Registration` is `Send`.
+unsafe impl<T: RegistrationOps> Send for Registration<T> {}
+
+impl<T: RegistrationOps> Registration<T> {
+ /// Creates a new instance of the registration object.
+ pub fn new(name: &'static CStr, module: &'static ThisModule) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ reg <- Opaque::try_ffi_init(|ptr: *mut T::RegType| {
+ // SAFETY: `try_ffi_init` guarantees that `ptr` is valid for write.
+ unsafe { ptr.write(T::RegType::default()) };
+
+ // SAFETY: `try_ffi_init` guarantees that `ptr` is valid for write, and it has
+ // just been initialised above, so it's also valid for read.
+ let drv = unsafe { &*(ptr as *const Opaque<T::RegType>) };
+
+ // SAFETY: `drv` is guaranteed to be pinned until `T::unregister`.
+ unsafe { T::register(drv, name, module) }
+ }),
+ })
+ }
+}
+
+#[pinned_drop]
+impl<T: RegistrationOps> PinnedDrop for Registration<T> {
+ fn drop(self: Pin<&mut Self>) {
+ // SAFETY: The existence of `self` guarantees that `self.reg` has previously been
+ // successfully registered with `T::register`
+ unsafe { T::unregister(&self.reg) };
+ }
+}
+
+/// Declares a kernel module that exposes a single driver.
+///
+/// It is meant to be used as a helper by other subsystems so they can more easily expose their own
+/// macros.
+#[macro_export]
+macro_rules! module_driver {
+ (<$gen_type:ident>, $driver_ops:ty, { type: $type:ty, $($f:tt)* }) => {
+ type Ops<$gen_type> = $driver_ops;
+
+ #[$crate::prelude::pin_data]
+ struct DriverModule {
+ #[pin]
+ _driver: $crate::driver::Registration<Ops<$type>>,
+ }
+
+ impl $crate::InPlaceModule for DriverModule {
+ fn init(
+ module: &'static $crate::ThisModule
+ ) -> impl ::pin_init::PinInit<Self, $crate::error::Error> {
+ $crate::try_pin_init!(Self {
+ _driver <- $crate::driver::Registration::new(
+ <Self as $crate::ModuleMetadata>::NAME,
+ module,
+ ),
+ })
+ }
+ }
+
+ $crate::prelude::module! {
+ type: DriverModule,
+ $($f)*
+ }
+ }
+}
+
+/// The bus independent adapter to match a drivers and a devices.
+///
+/// This trait should be implemented by the bus specific adapter, which represents the connection
+/// of a device and a driver.
+///
+/// It provides bus independent functions for device / driver interactions.
+pub trait Adapter {
+ /// The type holding driver private data about each device id supported by the driver.
+ type IdInfo: 'static;
+
+ /// The [`of::IdTable`] of the corresponding driver.
+ fn of_id_table() -> Option<of::IdTable<Self::IdInfo>>;
+
+ /// Returns the driver's private data from the matching entry in the [`of::IdTable`], if any.
+ ///
+ /// If this returns `None`, it means there is no match with an entry in the [`of::IdTable`].
+ #[cfg(CONFIG_OF)]
+ fn of_id_info(dev: &device::Device) -> Option<&'static Self::IdInfo> {
+ let table = Self::of_id_table()?;
+
+ // SAFETY:
+ // - `table` has static lifetime, hence it's valid for read,
+ // - `dev` is guaranteed to be valid while it's alive, and so is `pdev.as_ref().as_raw()`.
+ let raw_id = unsafe { bindings::of_match_device(table.as_ptr(), dev.as_raw()) };
+
+ if raw_id.is_null() {
+ None
+ } else {
+ // SAFETY: `DeviceId` is a `#[repr(transparent)` wrapper of `struct of_device_id` and
+ // does not add additional invariants, so it's safe to transmute.
+ let id = unsafe { &*raw_id.cast::<of::DeviceId>() };
+
+ Some(table.info(<of::DeviceId as crate::device_id::RawDeviceId>::index(id)))
+ }
+ }
+
+ #[cfg(not(CONFIG_OF))]
+ #[allow(missing_docs)]
+ fn of_id_info(_dev: &device::Device) -> Option<&'static Self::IdInfo> {
+ None
+ }
+
+ /// Returns the driver's private data from the matching entry of any of the ID tables, if any.
+ ///
+ /// If this returns `None`, it means that there is no match in any of the ID tables directly
+ /// associated with a [`device::Device`].
+ fn id_info(dev: &device::Device) -> Option<&'static Self::IdInfo> {
+ let id = Self::of_id_info(dev);
+ if id.is_some() {
+ return id;
+ }
+
+ None
+ }
+}
diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs
new file mode 100644
index 000000000000..74c9a3dd719e
--- /dev/null
+++ b/rust/kernel/drm/device.rs
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM device.
+//!
+//! C header: [`include/linux/drm/drm_device.h`](srctree/include/linux/drm/drm_device.h)
+
+use crate::{
+ bindings, device, drm,
+ drm::driver::AllocImpl,
+ error::from_err_ptr,
+ error::Result,
+ prelude::*,
+ types::{ARef, AlwaysRefCounted, Opaque},
+};
+use core::{mem, ops::Deref, ptr, ptr::NonNull};
+
+#[cfg(CONFIG_DRM_LEGACY)]
+macro_rules! drm_legacy_fields {
+ ( $($field:ident: $val:expr),* $(,)? ) => {
+ bindings::drm_driver {
+ $( $field: $val ),*,
+ firstopen: None,
+ preclose: None,
+ dma_ioctl: None,
+ dma_quiescent: None,
+ context_dtor: None,
+ irq_handler: None,
+ irq_preinstall: None,
+ irq_postinstall: None,
+ irq_uninstall: None,
+ get_vblank_counter: None,
+ enable_vblank: None,
+ disable_vblank: None,
+ dev_priv_size: 0,
+ }
+ }
+}
+
+#[cfg(not(CONFIG_DRM_LEGACY))]
+macro_rules! drm_legacy_fields {
+ ( $($field:ident: $val:expr),* $(,)? ) => {
+ bindings::drm_driver {
+ $( $field: $val ),*
+ }
+ }
+}
+
+/// A typed DRM device with a specific `drm::Driver` implementation.
+///
+/// The device is always reference-counted.
+///
+/// # Invariants
+///
+/// `self.dev` is a valid instance of a `struct device`.
+#[repr(C)]
+#[pin_data]
+pub struct Device<T: drm::Driver> {
+ dev: Opaque<bindings::drm_device>,
+ #[pin]
+ data: T::Data,
+}
+
+impl<T: drm::Driver> Device<T> {
+ const VTABLE: bindings::drm_driver = drm_legacy_fields! {
+ load: None,
+ open: Some(drm::File::<T::File>::open_callback),
+ postclose: Some(drm::File::<T::File>::postclose_callback),
+ unload: None,
+ release: None,
+ master_set: None,
+ master_drop: None,
+ debugfs_init: None,
+ gem_create_object: T::Object::ALLOC_OPS.gem_create_object,
+ prime_handle_to_fd: T::Object::ALLOC_OPS.prime_handle_to_fd,
+ prime_fd_to_handle: T::Object::ALLOC_OPS.prime_fd_to_handle,
+ gem_prime_import: T::Object::ALLOC_OPS.gem_prime_import,
+ gem_prime_import_sg_table: T::Object::ALLOC_OPS.gem_prime_import_sg_table,
+ dumb_create: T::Object::ALLOC_OPS.dumb_create,
+ dumb_map_offset: T::Object::ALLOC_OPS.dumb_map_offset,
+ show_fdinfo: None,
+ fbdev_probe: None,
+
+ major: T::INFO.major,
+ minor: T::INFO.minor,
+ patchlevel: T::INFO.patchlevel,
+ name: T::INFO.name.as_char_ptr() as *mut _,
+ desc: T::INFO.desc.as_char_ptr() as *mut _,
+
+ driver_features: drm::driver::FEAT_GEM,
+ ioctls: T::IOCTLS.as_ptr(),
+ num_ioctls: T::IOCTLS.len() as i32,
+ fops: &Self::GEM_FOPS as _,
+ };
+
+ const GEM_FOPS: bindings::file_operations = drm::gem::create_fops();
+
+ /// Create a new `drm::Device` for a `drm::Driver`.
+ pub fn new(dev: &device::Device, data: impl PinInit<T::Data, Error>) -> Result<ARef<Self>> {
+ // SAFETY:
+ // - `VTABLE`, as a `const` is pinned to the read-only section of the compilation,
+ // - `dev` is valid by its type invarants,
+ let raw_drm: *mut Self = unsafe {
+ bindings::__drm_dev_alloc(
+ dev.as_raw(),
+ &Self::VTABLE,
+ mem::size_of::<Self>(),
+ mem::offset_of!(Self, dev),
+ )
+ }
+ .cast();
+ let raw_drm = NonNull::new(from_err_ptr(raw_drm)?).ok_or(ENOMEM)?;
+
+ // SAFETY: `raw_drm` is a valid pointer to `Self`.
+ let raw_data = unsafe { ptr::addr_of_mut!((*raw_drm.as_ptr()).data) };
+
+ // SAFETY:
+ // - `raw_data` is a valid pointer to uninitialized memory.
+ // - `raw_data` will not move until it is dropped.
+ unsafe { data.__pinned_init(raw_data) }.inspect_err(|_| {
+ // SAFETY: `__drm_dev_alloc()` was successful, hence `raw_drm` must be valid and the
+ // refcount must be non-zero.
+ unsafe { bindings::drm_dev_put(ptr::addr_of_mut!((*raw_drm.as_ptr()).dev).cast()) };
+ })?;
+
+ // SAFETY: The reference count is one, and now we take ownership of that reference as a
+ // `drm::Device`.
+ Ok(unsafe { ARef::from_raw(raw_drm) })
+ }
+
+ pub(crate) fn as_raw(&self) -> *mut bindings::drm_device {
+ self.dev.get()
+ }
+
+ /// # Safety
+ ///
+ /// `ptr` must be a valid pointer to a `struct device` embedded in `Self`.
+ unsafe fn from_drm_device(ptr: *const bindings::drm_device) -> *mut Self {
+ // SAFETY: By the safety requirements of this function `ptr` is a valid pointer to a
+ // `struct drm_device` embedded in `Self`.
+ unsafe { crate::container_of!(ptr, Self, dev) }.cast_mut()
+ }
+
+ /// Not intended to be called externally, except via declare_drm_ioctls!()
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `ptr` is valid, non-null, and has a non-zero reference count,
+ /// i.e. it must be ensured that the reference count of the C `struct drm_device` `ptr` points
+ /// to can't drop to zero, for the duration of this function call and the entire duration when
+ /// the returned reference exists.
+ ///
+ /// Additionally, callers must ensure that the `struct device`, `ptr` is pointing to, is
+ /// embedded in `Self`.
+ #[doc(hidden)]
+ pub unsafe fn as_ref<'a>(ptr: *const bindings::drm_device) -> &'a Self {
+ // SAFETY: By the safety requirements of this function `ptr` is a valid pointer to a
+ // `struct drm_device` embedded in `Self`.
+ let ptr = unsafe { Self::from_drm_device(ptr) };
+
+ // SAFETY: `ptr` is valid by the safety requirements of this function.
+ unsafe { &*ptr.cast() }
+ }
+}
+
+impl<T: drm::Driver> Deref for Device<T> {
+ type Target = T::Data;
+
+ fn deref(&self) -> &Self::Target {
+ &self.data
+ }
+}
+
+// SAFETY: DRM device objects are always reference counted and the get/put functions
+// satisfy the requirements.
+unsafe impl<T: drm::Driver> AlwaysRefCounted for Device<T> {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
+ unsafe { bindings::drm_dev_get(self.as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::drm_dev_put(obj.cast().as_ptr()) };
+ }
+}
+
+impl<T: drm::Driver> AsRef<device::Device> for Device<T> {
+ fn as_ref(&self) -> &device::Device {
+ // SAFETY: `bindings::drm_device::dev` is valid as long as the DRM device itself is valid,
+ // which is guaranteed by the type invariant.
+ unsafe { device::Device::as_ref((*self.as_raw()).dev) }
+ }
+}
+
+// SAFETY: A `drm::Device` can be released from any thread.
+unsafe impl<T: drm::Driver> Send for Device<T> {}
+
+// SAFETY: A `drm::Device` can be shared among threads because all immutable methods are protected
+// by the synchronization in `struct drm_device`.
+unsafe impl<T: drm::Driver> Sync for Device<T> {}
diff --git a/rust/kernel/drm/driver.rs b/rust/kernel/drm/driver.rs
new file mode 100644
index 000000000000..acb638086131
--- /dev/null
+++ b/rust/kernel/drm/driver.rs
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM driver core.
+//!
+//! C header: [`include/linux/drm/drm_drv.h`](srctree/include/linux/drm/drm_drv.h)
+
+use crate::{
+ bindings, device,
+ devres::Devres,
+ drm,
+ error::{to_result, Result},
+ prelude::*,
+ str::CStr,
+ types::ARef,
+};
+use macros::vtable;
+
+/// Driver use the GEM memory manager. This should be set for all modern drivers.
+pub(crate) const FEAT_GEM: u32 = bindings::drm_driver_feature_DRIVER_GEM;
+
+/// Information data for a DRM Driver.
+pub struct DriverInfo {
+ /// Driver major version.
+ pub major: i32,
+ /// Driver minor version.
+ pub minor: i32,
+ /// Driver patchlevel version.
+ pub patchlevel: i32,
+ /// Driver name.
+ pub name: &'static CStr,
+ /// Driver description.
+ pub desc: &'static CStr,
+}
+
+/// Internal memory management operation set, normally created by memory managers (e.g. GEM).
+pub struct AllocOps {
+ pub(crate) gem_create_object: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ size: usize,
+ ) -> *mut bindings::drm_gem_object,
+ >,
+ pub(crate) prime_handle_to_fd: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ file_priv: *mut bindings::drm_file,
+ handle: u32,
+ flags: u32,
+ prime_fd: *mut core::ffi::c_int,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) prime_fd_to_handle: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ file_priv: *mut bindings::drm_file,
+ prime_fd: core::ffi::c_int,
+ handle: *mut u32,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) gem_prime_import: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ dma_buf: *mut bindings::dma_buf,
+ ) -> *mut bindings::drm_gem_object,
+ >,
+ pub(crate) gem_prime_import_sg_table: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ attach: *mut bindings::dma_buf_attachment,
+ sgt: *mut bindings::sg_table,
+ ) -> *mut bindings::drm_gem_object,
+ >,
+ pub(crate) dumb_create: Option<
+ unsafe extern "C" fn(
+ file_priv: *mut bindings::drm_file,
+ dev: *mut bindings::drm_device,
+ args: *mut bindings::drm_mode_create_dumb,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) dumb_map_offset: Option<
+ unsafe extern "C" fn(
+ file_priv: *mut bindings::drm_file,
+ dev: *mut bindings::drm_device,
+ handle: u32,
+ offset: *mut u64,
+ ) -> core::ffi::c_int,
+ >,
+}
+
+/// Trait for memory manager implementations. Implemented internally.
+pub trait AllocImpl: super::private::Sealed + drm::gem::IntoGEMObject {
+ /// The C callback operations for this memory manager.
+ const ALLOC_OPS: AllocOps;
+}
+
+/// The DRM `Driver` trait.
+///
+/// This trait must be implemented by drivers in order to create a `struct drm_device` and `struct
+/// drm_driver` to be registered in the DRM subsystem.
+#[vtable]
+pub trait Driver {
+ /// Context data associated with the DRM driver
+ type Data: Sync + Send;
+
+ /// The type used to manage memory for this driver.
+ type Object: AllocImpl;
+
+ /// The type used to represent a DRM File (client)
+ type File: drm::file::DriverFile;
+
+ /// Driver metadata
+ const INFO: DriverInfo;
+
+ /// IOCTL list. See `kernel::drm::ioctl::declare_drm_ioctls!{}`.
+ const IOCTLS: &'static [drm::ioctl::DrmIoctlDescriptor];
+}
+
+/// The registration type of a `drm::Device`.
+///
+/// Once the `Registration` structure is dropped, the device is unregistered.
+pub struct Registration<T: Driver>(ARef<drm::Device<T>>);
+
+impl<T: Driver> Registration<T> {
+ /// Creates a new [`Registration`] and registers it.
+ fn new(drm: &drm::Device<T>, flags: usize) -> Result<Self> {
+ // SAFETY: `drm.as_raw()` is valid by the invariants of `drm::Device`.
+ to_result(unsafe { bindings::drm_dev_register(drm.as_raw(), flags) })?;
+
+ Ok(Self(drm.into()))
+ }
+
+ /// Same as [`Registration::new`}, but transfers ownership of the [`Registration`] to
+ /// [`Devres`].
+ pub fn new_foreign_owned(
+ drm: &drm::Device<T>,
+ dev: &device::Device<device::Bound>,
+ flags: usize,
+ ) -> Result {
+ if drm.as_ref().as_raw() != dev.as_raw() {
+ return Err(EINVAL);
+ }
+
+ let reg = Registration::<T>::new(drm, flags)?;
+ Devres::new_foreign_owned(dev, reg, GFP_KERNEL)
+ }
+
+ /// Returns a reference to the `Device` instance for this registration.
+ pub fn device(&self) -> &drm::Device<T> {
+ &self.0
+ }
+}
+
+// SAFETY: `Registration` doesn't offer any methods or access to fields when shared between
+// threads, hence it's safe to share it.
+unsafe impl<T: Driver> Sync for Registration<T> {}
+
+// SAFETY: Registration with and unregistration from the DRM subsystem can happen from any thread.
+unsafe impl<T: Driver> Send for Registration<T> {}
+
+impl<T: Driver> Drop for Registration<T> {
+ fn drop(&mut self) {
+ // SAFETY: Safe by the invariant of `ARef<drm::Device<T>>`. The existence of this
+ // `Registration` also guarantees the this `drm::Device` is actually registered.
+ unsafe { bindings::drm_dev_unregister(self.0.as_raw()) };
+ }
+}
diff --git a/rust/kernel/drm/file.rs b/rust/kernel/drm/file.rs
new file mode 100644
index 000000000000..b9527705e551
--- /dev/null
+++ b/rust/kernel/drm/file.rs
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM File objects.
+//!
+//! C header: [`include/linux/drm/drm_file.h`](srctree/include/linux/drm/drm_file.h)
+
+use crate::{bindings, drm, error::Result, prelude::*, types::Opaque};
+use core::marker::PhantomData;
+use core::pin::Pin;
+
+/// Trait that must be implemented by DRM drivers to represent a DRM File (a client instance).
+pub trait DriverFile {
+ /// The parent `Driver` implementation for this `DriverFile`.
+ type Driver: drm::Driver;
+
+ /// Open a new file (called when a client opens the DRM device).
+ fn open(device: &drm::Device<Self::Driver>) -> Result<Pin<KBox<Self>>>;
+}
+
+/// An open DRM File.
+///
+/// # Invariants
+///
+/// `self.0` is a valid instance of a `struct drm_file`.
+#[repr(transparent)]
+pub struct File<T: DriverFile>(Opaque<bindings::drm_file>, PhantomData<T>);
+
+impl<T: DriverFile> File<T> {
+ #[doc(hidden)]
+ /// Not intended to be called externally, except via declare_drm_ioctls!()
+ ///
+ /// # Safety
+ ///
+ /// `raw_file` must be a valid pointer to an open `struct drm_file`, opened through `T::open`.
+ pub unsafe fn as_ref<'a>(ptr: *mut bindings::drm_file) -> &'a File<T> {
+ // SAFETY: `raw_file` is valid by the safety requirements of this function.
+ unsafe { &*ptr.cast() }
+ }
+
+ pub(super) fn as_raw(&self) -> *mut bindings::drm_file {
+ self.0.get()
+ }
+
+ fn driver_priv(&self) -> *mut T {
+ // SAFETY: By the type invariants of `Self`, `self.as_raw()` is always valid.
+ unsafe { (*self.as_raw()).driver_priv }.cast()
+ }
+
+ /// Return a pinned reference to the driver file structure.
+ pub fn inner(&self) -> Pin<&T> {
+ // SAFETY: By the type invariant the pointer `self.as_raw()` points to a valid and opened
+ // `struct drm_file`, hence `driver_priv` has been properly initialized by `open_callback`.
+ unsafe { Pin::new_unchecked(&*(self.driver_priv())) }
+ }
+
+ /// The open callback of a `struct drm_file`.
+ pub(crate) extern "C" fn open_callback(
+ raw_dev: *mut bindings::drm_device,
+ raw_file: *mut bindings::drm_file,
+ ) -> core::ffi::c_int {
+ // SAFETY: A callback from `struct drm_driver::open` guarantees that
+ // - `raw_dev` is valid pointer to a `struct drm_device`,
+ // - the corresponding `struct drm_device` has been registered.
+ let drm = unsafe { drm::Device::as_ref(raw_dev) };
+
+ // SAFETY: `raw_file` is a valid pointer to a `struct drm_file`.
+ let file = unsafe { File::<T>::as_ref(raw_file) };
+
+ let inner = match T::open(drm) {
+ Err(e) => {
+ return e.to_errno();
+ }
+ Ok(i) => i,
+ };
+
+ // SAFETY: This pointer is treated as pinned, and the Drop guarantee is upheld in
+ // `postclose_callback()`.
+ let driver_priv = KBox::into_raw(unsafe { Pin::into_inner_unchecked(inner) });
+
+ // SAFETY: By the type invariants of `Self`, `self.as_raw()` is always valid.
+ unsafe { (*file.as_raw()).driver_priv = driver_priv.cast() };
+
+ 0
+ }
+
+ /// The postclose callback of a `struct drm_file`.
+ pub(crate) extern "C" fn postclose_callback(
+ _raw_dev: *mut bindings::drm_device,
+ raw_file: *mut bindings::drm_file,
+ ) {
+ // SAFETY: This reference won't escape this function
+ let file = unsafe { File::<T>::as_ref(raw_file) };
+
+ // SAFETY: `file.driver_priv` has been created in `open_callback` through `KBox::into_raw`.
+ let _ = unsafe { KBox::from_raw(file.driver_priv()) };
+ }
+}
+
+impl<T: DriverFile> super::private::Sealed for File<T> {}
diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs
new file mode 100644
index 000000000000..d8765e61c6c2
--- /dev/null
+++ b/rust/kernel/drm/gem/mod.rs
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM GEM API
+//!
+//! C header: [`include/linux/drm/drm_gem.h`](srctree/include/linux/drm/drm_gem.h)
+
+use crate::{
+ alloc::flags::*,
+ bindings, drm,
+ drm::driver::{AllocImpl, AllocOps},
+ error::{to_result, Result},
+ prelude::*,
+ types::{ARef, AlwaysRefCounted, Opaque},
+};
+use core::{mem, ops::Deref, ptr::NonNull};
+
+/// GEM object functions, which must be implemented by drivers.
+pub trait BaseDriverObject<T: BaseObject>: Sync + Send + Sized {
+ /// Create a new driver data object for a GEM object of a given size.
+ fn new(dev: &drm::Device<T::Driver>, size: usize) -> impl PinInit<Self, Error>;
+
+ /// Open a new handle to an existing object, associated with a File.
+ fn open(
+ _obj: &<<T as IntoGEMObject>::Driver as drm::Driver>::Object,
+ _file: &drm::File<<<T as IntoGEMObject>::Driver as drm::Driver>::File>,
+ ) -> Result {
+ Ok(())
+ }
+
+ /// Close a handle to an existing object, associated with a File.
+ fn close(
+ _obj: &<<T as IntoGEMObject>::Driver as drm::Driver>::Object,
+ _file: &drm::File<<<T as IntoGEMObject>::Driver as drm::Driver>::File>,
+ ) {
+ }
+}
+
+/// Trait that represents a GEM object subtype
+pub trait IntoGEMObject: Sized + super::private::Sealed + AlwaysRefCounted {
+ /// Owning driver for this type
+ type Driver: drm::Driver;
+
+ /// Returns a reference to the raw `drm_gem_object` structure, which must be valid as long as
+ /// this owning object is valid.
+ fn as_raw(&self) -> *mut bindings::drm_gem_object;
+
+ /// Converts a pointer to a `struct drm_gem_object` into a reference to `Self`.
+ ///
+ /// # Safety
+ ///
+ /// - `self_ptr` must be a valid pointer to `Self`.
+ /// - The caller promises that holding the immutable reference returned by this function does
+ /// not violate rust's data aliasing rules and remains valid throughout the lifetime of `'a`.
+ unsafe fn as_ref<'a>(self_ptr: *mut bindings::drm_gem_object) -> &'a Self;
+}
+
+// SAFETY: All gem objects are refcounted.
+unsafe impl<T: IntoGEMObject> AlwaysRefCounted for T {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
+ unsafe { bindings::drm_gem_object_get(self.as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: We either hold the only refcount on `obj`, or one of many - meaning that no one
+ // else could possibly hold a mutable reference to `obj` and thus this immutable reference
+ // is safe.
+ let obj = unsafe { obj.as_ref() }.as_raw();
+
+ // SAFETY:
+ // - The safety requirements guarantee that the refcount is non-zero.
+ // - We hold no references to `obj` now, making it safe for us to potentially deallocate it.
+ unsafe { bindings::drm_gem_object_put(obj) };
+ }
+}
+
+/// Trait which must be implemented by drivers using base GEM objects.
+pub trait DriverObject: BaseDriverObject<Object<Self>> {
+ /// Parent `Driver` for this object.
+ type Driver: drm::Driver;
+}
+
+extern "C" fn open_callback<T: BaseDriverObject<U>, U: BaseObject>(
+ raw_obj: *mut bindings::drm_gem_object,
+ raw_file: *mut bindings::drm_file,
+) -> core::ffi::c_int {
+ // SAFETY: `open_callback` is only ever called with a valid pointer to a `struct drm_file`.
+ let file = unsafe {
+ drm::File::<<<U as IntoGEMObject>::Driver as drm::Driver>::File>::as_ref(raw_file)
+ };
+ // SAFETY: `open_callback` is specified in the AllocOps structure for `Object<T>`, ensuring that
+ // `raw_obj` is indeed contained within a `Object<T>`.
+ let obj = unsafe {
+ <<<U as IntoGEMObject>::Driver as drm::Driver>::Object as IntoGEMObject>::as_ref(raw_obj)
+ };
+
+ match T::open(obj, file) {
+ Err(e) => e.to_errno(),
+ Ok(()) => 0,
+ }
+}
+
+extern "C" fn close_callback<T: BaseDriverObject<U>, U: BaseObject>(
+ raw_obj: *mut bindings::drm_gem_object,
+ raw_file: *mut bindings::drm_file,
+) {
+ // SAFETY: `open_callback` is only ever called with a valid pointer to a `struct drm_file`.
+ let file = unsafe {
+ drm::File::<<<U as IntoGEMObject>::Driver as drm::Driver>::File>::as_ref(raw_file)
+ };
+ // SAFETY: `close_callback` is specified in the AllocOps structure for `Object<T>`, ensuring
+ // that `raw_obj` is indeed contained within a `Object<T>`.
+ let obj = unsafe {
+ <<<U as IntoGEMObject>::Driver as drm::Driver>::Object as IntoGEMObject>::as_ref(raw_obj)
+ };
+
+ T::close(obj, file);
+}
+
+impl<T: DriverObject> IntoGEMObject for Object<T> {
+ type Driver = T::Driver;
+
+ fn as_raw(&self) -> *mut bindings::drm_gem_object {
+ self.obj.get()
+ }
+
+ unsafe fn as_ref<'a>(self_ptr: *mut bindings::drm_gem_object) -> &'a Self {
+ // SAFETY: `obj` is guaranteed to be in an `Object<T>` via the safety contract of this
+ // function
+ unsafe { &*crate::container_of!(self_ptr, Object<T>, obj) }
+ }
+}
+
+/// Base operations shared by all GEM object classes
+pub trait BaseObject: IntoGEMObject {
+ /// Returns the size of the object in bytes.
+ fn size(&self) -> usize {
+ // SAFETY: `self.as_raw()` is guaranteed to be a pointer to a valid `struct drm_gem_object`.
+ unsafe { (*self.as_raw()).size }
+ }
+
+ /// Creates a new handle for the object associated with a given `File`
+ /// (or returns an existing one).
+ fn create_handle(
+ &self,
+ file: &drm::File<<<Self as IntoGEMObject>::Driver as drm::Driver>::File>,
+ ) -> Result<u32> {
+ let mut handle: u32 = 0;
+ // SAFETY: The arguments are all valid per the type invariants.
+ to_result(unsafe {
+ bindings::drm_gem_handle_create(file.as_raw().cast(), self.as_raw(), &mut handle)
+ })?;
+ Ok(handle)
+ }
+
+ /// Looks up an object by its handle for a given `File`.
+ fn lookup_handle(
+ file: &drm::File<<<Self as IntoGEMObject>::Driver as drm::Driver>::File>,
+ handle: u32,
+ ) -> Result<ARef<Self>> {
+ // SAFETY: The arguments are all valid per the type invariants.
+ let ptr = unsafe { bindings::drm_gem_object_lookup(file.as_raw().cast(), handle) };
+ if ptr.is_null() {
+ return Err(ENOENT);
+ }
+
+ // SAFETY:
+ // - A `drm::Driver` can only have a single `File` implementation.
+ // - `file` uses the same `drm::Driver` as `Self`.
+ // - Therefore, we're guaranteed that `ptr` must be a gem object embedded within `Self`.
+ // - And we check if the pointer is null befoe calling as_ref(), ensuring that `ptr` is a
+ // valid pointer to an initialized `Self`.
+ let obj = unsafe { Self::as_ref(ptr) };
+
+ // SAFETY:
+ // - We take ownership of the reference of `drm_gem_object_lookup()`.
+ // - Our `NonNull` comes from an immutable reference, thus ensuring it is a valid pointer to
+ // `Self`.
+ Ok(unsafe { ARef::from_raw(obj.into()) })
+ }
+
+ /// Creates an mmap offset to map the object from userspace.
+ fn create_mmap_offset(&self) -> Result<u64> {
+ // SAFETY: The arguments are valid per the type invariant.
+ to_result(unsafe { bindings::drm_gem_create_mmap_offset(self.as_raw()) })?;
+
+ // SAFETY: The arguments are valid per the type invariant.
+ Ok(unsafe { bindings::drm_vma_node_offset_addr(&raw mut (*self.as_raw()).vma_node) })
+ }
+}
+
+impl<T: IntoGEMObject> BaseObject for T {}
+
+/// A base GEM object.
+///
+/// Invariants
+///
+/// - `self.obj` is a valid instance of a `struct drm_gem_object`.
+/// - `self.dev` is always a valid pointer to a `struct drm_device`.
+#[repr(C)]
+#[pin_data]
+pub struct Object<T: DriverObject + Send + Sync> {
+ obj: Opaque<bindings::drm_gem_object>,
+ dev: NonNull<drm::Device<T::Driver>>,
+ #[pin]
+ data: T,
+}
+
+impl<T: DriverObject> Object<T> {
+ /// The size of this object's structure.
+ pub const SIZE: usize = mem::size_of::<Self>();
+
+ const OBJECT_FUNCS: bindings::drm_gem_object_funcs = bindings::drm_gem_object_funcs {
+ free: Some(Self::free_callback),
+ open: Some(open_callback::<T, Object<T>>),
+ close: Some(close_callback::<T, Object<T>>),
+ print_info: None,
+ export: None,
+ pin: None,
+ unpin: None,
+ get_sg_table: None,
+ vmap: None,
+ vunmap: None,
+ mmap: None,
+ status: None,
+ vm_ops: core::ptr::null_mut(),
+ evict: None,
+ rss: None,
+ };
+
+ /// Create a new GEM object.
+ pub fn new(dev: &drm::Device<T::Driver>, size: usize) -> Result<ARef<Self>> {
+ let obj: Pin<KBox<Self>> = KBox::pin_init(
+ try_pin_init!(Self {
+ obj: Opaque::new(bindings::drm_gem_object::default()),
+ data <- T::new(dev, size),
+ // INVARIANT: The drm subsystem guarantees that the `struct drm_device` will live
+ // as long as the GEM object lives.
+ dev: dev.into(),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ // SAFETY: `obj.as_raw()` is guaranteed to be valid by the initialization above.
+ unsafe { (*obj.as_raw()).funcs = &Self::OBJECT_FUNCS };
+
+ // SAFETY: The arguments are all valid per the type invariants.
+ to_result(unsafe { bindings::drm_gem_object_init(dev.as_raw(), obj.obj.get(), size) })?;
+
+ // SAFETY: We never move out of `Self`.
+ let ptr = KBox::into_raw(unsafe { Pin::into_inner_unchecked(obj) });
+
+ // SAFETY: `ptr` comes from `KBox::into_raw` and hence can't be NULL.
+ let ptr = unsafe { NonNull::new_unchecked(ptr) };
+
+ // SAFETY: We take over the initial reference count from `drm_gem_object_init()`.
+ Ok(unsafe { ARef::from_raw(ptr) })
+ }
+
+ /// Returns the `Device` that owns this GEM object.
+ pub fn dev(&self) -> &drm::Device<T::Driver> {
+ // SAFETY: The DRM subsystem guarantees that the `struct drm_device` will live as long as
+ // the GEM object lives, hence the pointer must be valid.
+ unsafe { self.dev.as_ref() }
+ }
+
+ fn as_raw(&self) -> *mut bindings::drm_gem_object {
+ self.obj.get()
+ }
+
+ extern "C" fn free_callback(obj: *mut bindings::drm_gem_object) {
+ // SAFETY: All of our objects are of type `Object<T>`.
+ let this = unsafe { crate::container_of!(obj, Self, obj) }.cast_mut();
+
+ // SAFETY: The C code only ever calls this callback with a valid pointer to a `struct
+ // drm_gem_object`.
+ unsafe { bindings::drm_gem_object_release(obj) };
+
+ // SAFETY: All of our objects are allocated via `KBox`, and we're in the
+ // free callback which guarantees this object has zero remaining references,
+ // so we can drop it.
+ let _ = unsafe { KBox::from_raw(this) };
+ }
+}
+
+impl<T: DriverObject> super::private::Sealed for Object<T> {}
+
+impl<T: DriverObject> Deref for Object<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.data
+ }
+}
+
+impl<T: DriverObject> AllocImpl for Object<T> {
+ const ALLOC_OPS: AllocOps = AllocOps {
+ gem_create_object: None,
+ prime_handle_to_fd: None,
+ prime_fd_to_handle: None,
+ gem_prime_import: None,
+ gem_prime_import_sg_table: None,
+ dumb_create: None,
+ dumb_map_offset: None,
+ };
+}
+
+pub(super) const fn create_fops() -> bindings::file_operations {
+ // SAFETY: As by the type invariant, it is safe to initialize `bindings::file_operations`
+ // zeroed.
+ let mut fops: bindings::file_operations = unsafe { core::mem::zeroed() };
+
+ fops.owner = core::ptr::null_mut();
+ fops.open = Some(bindings::drm_open);
+ fops.release = Some(bindings::drm_release);
+ fops.unlocked_ioctl = Some(bindings::drm_ioctl);
+ #[cfg(CONFIG_COMPAT)]
+ {
+ fops.compat_ioctl = Some(bindings::drm_compat_ioctl);
+ }
+ fops.poll = Some(bindings::drm_poll);
+ fops.read = Some(bindings::drm_read);
+ fops.llseek = Some(bindings::noop_llseek);
+ fops.mmap = Some(bindings::drm_gem_mmap);
+ fops.fop_flags = bindings::FOP_UNSIGNED_OFFSET;
+
+ fops
+}
diff --git a/rust/kernel/drm/ioctl.rs b/rust/kernel/drm/ioctl.rs
new file mode 100644
index 000000000000..445639404fb7
--- /dev/null
+++ b/rust/kernel/drm/ioctl.rs
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM IOCTL definitions.
+//!
+//! C header: [`include/linux/drm/drm_ioctl.h`](srctree/include/linux/drm/drm_ioctl.h)
+
+use crate::ioctl;
+
+const BASE: u32 = uapi::DRM_IOCTL_BASE as u32;
+
+/// Construct a DRM ioctl number with no argument.
+#[allow(non_snake_case)]
+#[inline(always)]
+pub const fn IO(nr: u32) -> u32 {
+ ioctl::_IO(BASE, nr)
+}
+
+/// Construct a DRM ioctl number with a read-only argument.
+#[allow(non_snake_case)]
+#[inline(always)]
+pub const fn IOR<T>(nr: u32) -> u32 {
+ ioctl::_IOR::<T>(BASE, nr)
+}
+
+/// Construct a DRM ioctl number with a write-only argument.
+#[allow(non_snake_case)]
+#[inline(always)]
+pub const fn IOW<T>(nr: u32) -> u32 {
+ ioctl::_IOW::<T>(BASE, nr)
+}
+
+/// Construct a DRM ioctl number with a read-write argument.
+#[allow(non_snake_case)]
+#[inline(always)]
+pub const fn IOWR<T>(nr: u32) -> u32 {
+ ioctl::_IOWR::<T>(BASE, nr)
+}
+
+/// Descriptor type for DRM ioctls. Use the `declare_drm_ioctls!{}` macro to construct them.
+pub type DrmIoctlDescriptor = bindings::drm_ioctl_desc;
+
+/// This is for ioctl which are used for rendering, and require that the file descriptor is either
+/// for a render node, or if it’s a legacy/primary node, then it must be authenticated.
+pub const AUTH: u32 = bindings::drm_ioctl_flags_DRM_AUTH;
+
+/// This must be set for any ioctl which can change the modeset or display state. Userspace must
+/// call the ioctl through a primary node, while it is the active master.
+///
+/// Note that read-only modeset ioctl can also be called by unauthenticated clients, or when a
+/// master is not the currently active one.
+pub const MASTER: u32 = bindings::drm_ioctl_flags_DRM_MASTER;
+
+/// Anything that could potentially wreak a master file descriptor needs to have this flag set.
+///
+/// Current that’s only for the SETMASTER and DROPMASTER ioctl, which e.g. logind can call to
+/// force a non-behaving master (display compositor) into compliance.
+///
+/// This is equivalent to callers with the SYSADMIN capability.
+pub const ROOT_ONLY: u32 = bindings::drm_ioctl_flags_DRM_ROOT_ONLY;
+
+/// This is used for all ioctl needed for rendering only, for drivers which support render nodes.
+/// This should be all new render drivers, and hence it should be always set for any ioctl with
+/// `AUTH` set. Note though that read-only query ioctl might have this set, but have not set
+/// DRM_AUTH because they do not require authentication.
+pub const RENDER_ALLOW: u32 = bindings::drm_ioctl_flags_DRM_RENDER_ALLOW;
+
+/// Internal structures used by the `declare_drm_ioctls!{}` macro. Do not use directly.
+#[doc(hidden)]
+pub mod internal {
+ pub use bindings::drm_device;
+ pub use bindings::drm_file;
+ pub use bindings::drm_ioctl_desc;
+}
+
+/// Declare the DRM ioctls for a driver.
+///
+/// Each entry in the list should have the form:
+///
+/// `(ioctl_number, argument_type, flags, user_callback),`
+///
+/// `argument_type` is the type name within the `bindings` crate.
+/// `user_callback` should have the following prototype:
+///
+/// ```ignore
+/// fn foo(device: &kernel::drm::Device<Self>,
+/// data: &Opaque<uapi::argument_type>,
+/// file: &kernel::drm::File<Self::File>,
+/// ) -> Result<u32>
+/// ```
+/// where `Self` is the drm::drv::Driver implementation these ioctls are being declared within.
+///
+/// # Examples
+///
+/// ```ignore
+/// kernel::declare_drm_ioctls! {
+/// (FOO_GET_PARAM, drm_foo_get_param, ioctl::RENDER_ALLOW, my_get_param_handler),
+/// }
+/// ```
+///
+#[macro_export]
+macro_rules! declare_drm_ioctls {
+ ( $(($cmd:ident, $struct:ident, $flags:expr, $func:expr)),* $(,)? ) => {
+ const IOCTLS: &'static [$crate::drm::ioctl::DrmIoctlDescriptor] = {
+ use $crate::uapi::*;
+ const _:() = {
+ let i: u32 = $crate::uapi::DRM_COMMAND_BASE;
+ // Assert that all the IOCTLs are in the right order and there are no gaps,
+ // and that the size of the specified type is correct.
+ $(
+ let cmd: u32 = $crate::macros::concat_idents!(DRM_IOCTL_, $cmd);
+ ::core::assert!(i == $crate::ioctl::_IOC_NR(cmd));
+ ::core::assert!(core::mem::size_of::<$crate::uapi::$struct>() ==
+ $crate::ioctl::_IOC_SIZE(cmd));
+ let i: u32 = i + 1;
+ )*
+ };
+
+ let ioctls = &[$(
+ $crate::drm::ioctl::internal::drm_ioctl_desc {
+ cmd: $crate::macros::concat_idents!(DRM_IOCTL_, $cmd) as u32,
+ func: {
+ #[allow(non_snake_case)]
+ unsafe extern "C" fn $cmd(
+ raw_dev: *mut $crate::drm::ioctl::internal::drm_device,
+ raw_data: *mut ::core::ffi::c_void,
+ raw_file: *mut $crate::drm::ioctl::internal::drm_file,
+ ) -> core::ffi::c_int {
+ // SAFETY:
+ // - The DRM core ensures the device lives while callbacks are being
+ // called.
+ // - The DRM device must have been registered when we're called through
+ // an IOCTL.
+ //
+ // FIXME: Currently there is nothing enforcing that the types of the
+ // dev/file match the current driver these ioctls are being declared
+ // for, and it's not clear how to enforce this within the type system.
+ let dev = $crate::drm::device::Device::as_ref(raw_dev);
+ // SAFETY: The ioctl argument has size `_IOC_SIZE(cmd)`, which we
+ // asserted above matches the size of this type, and all bit patterns of
+ // UAPI structs must be valid.
+ let data = unsafe {
+ &*(raw_data as *const $crate::types::Opaque<$crate::uapi::$struct>)
+ };
+ // SAFETY: This is just the DRM file structure
+ let file = unsafe { $crate::drm::File::as_ref(raw_file) };
+
+ match $func(dev, data, file) {
+ Err(e) => e.to_errno(),
+ Ok(i) => i.try_into()
+ .unwrap_or($crate::error::code::ERANGE.to_errno()),
+ }
+ }
+ Some($cmd)
+ },
+ flags: $flags,
+ name: $crate::c_str!(::core::stringify!($cmd)).as_char_ptr(),
+ }
+ ),*];
+ ioctls
+ };
+ };
+}
diff --git a/rust/kernel/drm/mod.rs b/rust/kernel/drm/mod.rs
new file mode 100644
index 000000000000..1b82b6945edf
--- /dev/null
+++ b/rust/kernel/drm/mod.rs
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM subsystem abstractions.
+
+pub mod device;
+pub mod driver;
+pub mod file;
+pub mod gem;
+pub mod ioctl;
+
+pub use self::device::Device;
+pub use self::driver::Driver;
+pub use self::driver::DriverInfo;
+pub use self::driver::Registration;
+pub use self::file::File;
+
+pub(crate) mod private {
+ pub trait Sealed {}
+}
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 4786d3ee1e92..3dee3139fcd4 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -4,15 +4,13 @@
//!
//! C header: [`include/uapi/asm-generic/errno-base.h`](srctree/include/uapi/asm-generic/errno-base.h)
-use crate::str::CStr;
-
-use alloc::{
- alloc::{AllocError, LayoutError},
- collections::TryReserveError,
+use crate::{
+ alloc::{layout::LayoutError, AllocError},
+ str::CStr,
};
-use core::convert::From;
use core::fmt;
+use core::num::NonZeroI32;
use core::num::TryFromIntError;
use core::str::Utf8Error;
@@ -24,7 +22,11 @@ pub mod code {
$(
#[doc = $doc]
)*
- pub const $err: super::Error = super::Error(-(crate::bindings::$err as i32));
+ pub const $err: super::Error =
+ match super::Error::try_from_errno(-(crate::bindings::$err as i32)) {
+ Some(err) => err,
+ None => panic!("Invalid errno in `declare_err!`"),
+ };
};
}
@@ -62,6 +64,7 @@ pub mod code {
declare_err!(EPIPE, "Broken pipe.");
declare_err!(EDOM, "Math argument out of domain of func.");
declare_err!(ERANGE, "Math result not representable.");
+ declare_err!(EOVERFLOW, "Value too large for defined data type.");
declare_err!(ERESTARTSYS, "Restart the system call.");
declare_err!(ERESTARTNOINTR, "System call was interrupted by a signal and will be restarted.");
declare_err!(ERESTARTNOHAND, "Restart if no handler.");
@@ -92,26 +95,36 @@ pub mod code {
///
/// The value is a valid `errno` (i.e. `>= -MAX_ERRNO && < 0`).
#[derive(Clone, Copy, PartialEq, Eq)]
-pub struct Error(core::ffi::c_int);
+pub struct Error(NonZeroI32);
impl Error {
/// Creates an [`Error`] from a kernel error code.
///
/// It is a bug to pass an out-of-range `errno`. `EINVAL` would
/// be returned in such a case.
- pub(crate) fn from_errno(errno: core::ffi::c_int) -> Error {
- if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 {
+ pub fn from_errno(errno: crate::ffi::c_int) -> Error {
+ if let Some(error) = Self::try_from_errno(errno) {
+ error
+ } else {
// TODO: Make it a `WARN_ONCE` once available.
crate::pr_warn!(
- "attempted to create `Error` with out of range `errno`: {}",
+ "attempted to create `Error` with out of range `errno`: {}\n",
errno
);
- return code::EINVAL;
+ code::EINVAL
}
+ }
- // INVARIANT: The check above ensures the type invariant
- // will hold.
- Error(errno)
+ /// Creates an [`Error`] from a kernel error code.
+ ///
+ /// Returns [`None`] if `errno` is out-of-range.
+ const fn try_from_errno(errno: crate::ffi::c_int) -> Option<Error> {
+ if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 {
+ return None;
+ }
+
+ // SAFETY: `errno` is checked above to be in a valid range.
+ Some(unsafe { Error::from_errno_unchecked(errno) })
}
/// Creates an [`Error`] from a kernel error code.
@@ -119,29 +132,35 @@ impl Error {
/// # Safety
///
/// `errno` must be within error code range (i.e. `>= -MAX_ERRNO && < 0`).
- unsafe fn from_errno_unchecked(errno: core::ffi::c_int) -> Error {
+ const unsafe fn from_errno_unchecked(errno: crate::ffi::c_int) -> Error {
// INVARIANT: The contract ensures the type invariant
// will hold.
- Error(errno)
+ // SAFETY: The caller guarantees `errno` is non-zero.
+ Error(unsafe { NonZeroI32::new_unchecked(errno) })
}
/// Returns the kernel error code.
- pub fn to_errno(self) -> core::ffi::c_int {
- self.0
+ pub fn to_errno(self) -> crate::ffi::c_int {
+ self.0.get()
+ }
+
+ #[cfg(CONFIG_BLOCK)]
+ pub(crate) fn to_blk_status(self) -> bindings::blk_status_t {
+ // SAFETY: `self.0` is a valid error due to its invariant.
+ unsafe { bindings::errno_to_blk_status(self.0.get()) }
}
/// Returns the error encoded as a pointer.
- #[allow(dead_code)]
- pub(crate) fn to_ptr<T>(self) -> *mut T {
+ pub fn to_ptr<T>(self) -> *mut T {
// SAFETY: `self.0` is a valid error due to its invariant.
- unsafe { bindings::ERR_PTR(self.0.into()) as *mut _ }
+ unsafe { bindings::ERR_PTR(self.0.get() as _) as *mut _ }
}
/// Returns a string representing the error, if one exists.
- #[cfg(not(testlib))]
+ #[cfg(not(any(test, testlib)))]
pub fn name(&self) -> Option<&'static CStr> {
// SAFETY: Just an FFI call, there are no extra safety requirements.
- let ptr = unsafe { bindings::errname(-self.0) };
+ let ptr = unsafe { bindings::errname(-self.0.get()) };
if ptr.is_null() {
None
} else {
@@ -155,7 +174,7 @@ impl Error {
/// When `testlib` is configured, this always returns `None` to avoid the dependency on a
/// kernel function so that tests that use this (e.g., by calling [`Result::unwrap`]) can still
/// run in userspace.
- #[cfg(testlib)]
+ #[cfg(any(test, testlib))]
pub fn name(&self) -> Option<&'static CStr> {
None
}
@@ -166,9 +185,11 @@ impl fmt::Debug for Error {
match self.name() {
// Print out number if no name can be found.
None => f.debug_tuple("Error").field(&-self.0).finish(),
- // SAFETY: These strings are ASCII-only.
Some(name) => f
- .debug_tuple(unsafe { core::str::from_utf8_unchecked(name) })
+ .debug_tuple(
+ // SAFETY: These strings are ASCII-only.
+ unsafe { core::str::from_utf8_unchecked(name) },
+ )
.finish(),
}
}
@@ -192,12 +213,6 @@ impl From<Utf8Error> for Error {
}
}
-impl From<TryReserveError> for Error {
- fn from(_: TryReserveError) -> Error {
- code::ENOMEM
- }
-}
-
impl From<LayoutError> for Error {
fn from(_: LayoutError) -> Error {
code::ENOMEM
@@ -234,13 +249,134 @@ impl From<core::convert::Infallible> for Error {
/// [`Error`] as its error type.
///
/// Note that even if a function does not return anything when it succeeds,
-/// it should still be modeled as returning a `Result` rather than
+/// it should still be modeled as returning a [`Result`] rather than
/// just an [`Error`].
+///
+/// Calling a function that returns [`Result`] forces the caller to handle
+/// the returned [`Result`].
+///
+/// This can be done "manually" by using [`match`]. Using [`match`] to decode
+/// the [`Result`] is similar to C where all the return value decoding and the
+/// error handling is done explicitly by writing handling code for each
+/// error to cover. Using [`match`] the error and success handling can be
+/// implemented in all detail as required. For example (inspired by
+/// [`samples/rust/rust_minimal.rs`]):
+///
+/// ```
+/// # #[allow(clippy::single_match)]
+/// fn example() -> Result {
+/// let mut numbers = KVec::new();
+///
+/// match numbers.push(72, GFP_KERNEL) {
+/// Err(e) => {
+/// pr_err!("Error pushing 72: {e:?}");
+/// return Err(e.into());
+/// }
+/// // Do nothing, continue.
+/// Ok(()) => (),
+/// }
+///
+/// match numbers.push(108, GFP_KERNEL) {
+/// Err(e) => {
+/// pr_err!("Error pushing 108: {e:?}");
+/// return Err(e.into());
+/// }
+/// // Do nothing, continue.
+/// Ok(()) => (),
+/// }
+///
+/// match numbers.push(200, GFP_KERNEL) {
+/// Err(e) => {
+/// pr_err!("Error pushing 200: {e:?}");
+/// return Err(e.into());
+/// }
+/// // Do nothing, continue.
+/// Ok(()) => (),
+/// }
+///
+/// Ok(())
+/// }
+/// # example()?;
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// An alternative to be more concise is the [`if let`] syntax:
+///
+/// ```
+/// fn example() -> Result {
+/// let mut numbers = KVec::new();
+///
+/// if let Err(e) = numbers.push(72, GFP_KERNEL) {
+/// pr_err!("Error pushing 72: {e:?}");
+/// return Err(e.into());
+/// }
+///
+/// if let Err(e) = numbers.push(108, GFP_KERNEL) {
+/// pr_err!("Error pushing 108: {e:?}");
+/// return Err(e.into());
+/// }
+///
+/// if let Err(e) = numbers.push(200, GFP_KERNEL) {
+/// pr_err!("Error pushing 200: {e:?}");
+/// return Err(e.into());
+/// }
+///
+/// Ok(())
+/// }
+/// # example()?;
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// Instead of these verbose [`match`]/[`if let`], the [`?`] operator can
+/// be used to handle the [`Result`]. Using the [`?`] operator is often
+/// the best choice to handle [`Result`] in a non-verbose way as done in
+/// [`samples/rust/rust_minimal.rs`]:
+///
+/// ```
+/// fn example() -> Result {
+/// let mut numbers = KVec::new();
+///
+/// numbers.push(72, GFP_KERNEL)?;
+/// numbers.push(108, GFP_KERNEL)?;
+/// numbers.push(200, GFP_KERNEL)?;
+///
+/// Ok(())
+/// }
+/// # example()?;
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// Another possibility is to call [`unwrap()`](Result::unwrap) or
+/// [`expect()`](Result::expect). However, use of these functions is
+/// *heavily discouraged* in the kernel because they trigger a Rust
+/// [`panic!`] if an error happens, which may destabilize the system or
+/// entirely break it as a result -- just like the C [`BUG()`] macro.
+/// Please see the documentation for the C macro [`BUG()`] for guidance
+/// on when to use these functions.
+///
+/// Alternatively, depending on the use case, using [`unwrap_or()`],
+/// [`unwrap_or_else()`], [`unwrap_or_default()`] or [`unwrap_unchecked()`]
+/// might be an option, as well.
+///
+/// For even more details, please see the [Rust documentation].
+///
+/// [`match`]: https://doc.rust-lang.org/reference/expressions/match-expr.html
+/// [`samples/rust/rust_minimal.rs`]: srctree/samples/rust/rust_minimal.rs
+/// [`if let`]: https://doc.rust-lang.org/reference/expressions/if-expr.html#if-let-expressions
+/// [`?`]: https://doc.rust-lang.org/reference/expressions/operator-expr.html#the-question-mark-operator
+/// [`unwrap()`]: Result::unwrap
+/// [`expect()`]: Result::expect
+/// [`BUG()`]: https://docs.kernel.org/process/deprecated.html#bug-and-bug-on
+/// [`unwrap_or()`]: Result::unwrap_or
+/// [`unwrap_or_else()`]: Result::unwrap_or_else
+/// [`unwrap_or_default()`]: Result::unwrap_or_default
+/// [`unwrap_unchecked()`]: Result::unwrap_unchecked
+/// [Rust documentation]: https://doc.rust-lang.org/book/ch09-02-recoverable-errors-with-result.html
pub type Result<T = (), E = Error> = core::result::Result<T, E>;
/// Converts an integer as returned by a C kernel function to an error if it's negative, and
/// `Ok(())` otherwise.
-pub fn to_result(err: core::ffi::c_int) -> Result {
+pub fn to_result(err: crate::ffi::c_int) -> Result {
if err < 0 {
Err(Error::from_errno(err))
} else {
@@ -263,21 +399,21 @@ pub fn to_result(err: core::ffi::c_int) -> Result {
/// fn devm_platform_ioremap_resource(
/// pdev: &mut PlatformDevice,
/// index: u32,
-/// ) -> Result<*mut core::ffi::c_void> {
+/// ) -> Result<*mut kernel::ffi::c_void> {
/// // SAFETY: `pdev` points to a valid platform device. There are no safety requirements
/// // on `index`.
/// from_err_ptr(unsafe { bindings::devm_platform_ioremap_resource(pdev.to_ptr(), index) })
/// }
/// ```
-// TODO: Remove `dead_code` marker once an in-kernel client is available.
-#[allow(dead_code)]
-pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
- // CAST: Casting a pointer to `*const core::ffi::c_void` is always valid.
- let const_ptr: *const core::ffi::c_void = ptr.cast();
+pub fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
+ // CAST: Casting a pointer to `*const crate::ffi::c_void` is always valid.
+ let const_ptr: *const crate::ffi::c_void = ptr.cast();
// SAFETY: The FFI function does not deref the pointer.
if unsafe { bindings::IS_ERR(const_ptr) } {
// SAFETY: The FFI function does not deref the pointer.
let err = unsafe { bindings::PTR_ERR(const_ptr) };
+
+ #[allow(clippy::unnecessary_cast)]
// CAST: If `IS_ERR()` returns `true`,
// then `PTR_ERR()` is guaranteed to return a
// negative value greater-or-equal to `-bindings::MAX_ERRNO`,
@@ -287,8 +423,7 @@ pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
//
// SAFETY: `IS_ERR()` ensures `err` is a
// negative value greater-or-equal to `-bindings::MAX_ERRNO`.
- #[allow(clippy::unnecessary_cast)]
- return Err(unsafe { Error::from_errno_unchecked(err as core::ffi::c_int) });
+ return Err(unsafe { Error::from_errno_unchecked(err as crate::ffi::c_int) });
}
Ok(ptr)
}
@@ -308,7 +443,7 @@ pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
/// # use kernel::bindings;
/// unsafe extern "C" fn probe_callback(
/// pdev: *mut bindings::platform_device,
-/// ) -> core::ffi::c_int {
+/// ) -> kernel::ffi::c_int {
/// from_result(|| {
/// let ptr = devm_alloc(pdev)?;
/// bindings::platform_set_drvdata(pdev, ptr);
@@ -316,9 +451,7 @@ pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
/// })
/// }
/// ```
-// TODO: Remove `dead_code` marker once an in-kernel client is available.
-#[allow(dead_code)]
-pub(crate) fn from_result<T, F>(f: F) -> T
+pub fn from_result<T, F>(f: F) -> T
where
T: From<i16>,
F: FnOnce() -> Result<T>,
diff --git a/rust/kernel/faux.rs b/rust/kernel/faux.rs
new file mode 100644
index 000000000000..8a50fcd4c9bb
--- /dev/null
+++ b/rust/kernel/faux.rs
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+//! Abstractions for the faux bus.
+//!
+//! This module provides bindings for working with faux devices in kernel modules.
+//!
+//! C header: [`include/linux/device/faux.h`]
+
+use crate::{bindings, device, error::code::*, prelude::*};
+use core::ptr::{addr_of_mut, null, null_mut, NonNull};
+
+/// The registration of a faux device.
+///
+/// This type represents the registration of a [`struct faux_device`]. When an instance of this type
+/// is dropped, its respective faux device will be unregistered from the system.
+///
+/// # Invariants
+///
+/// `self.0` always holds a valid pointer to an initialized and registered [`struct faux_device`].
+///
+/// [`struct faux_device`]: srctree/include/linux/device/faux.h
+pub struct Registration(NonNull<bindings::faux_device>);
+
+impl Registration {
+ /// Create and register a new faux device with the given name.
+ #[inline]
+ pub fn new(name: &CStr, parent: Option<&device::Device>) -> Result<Self> {
+ // SAFETY:
+ // - `name` is copied by this function into its own storage
+ // - `faux_ops` is safe to leave NULL according to the C API
+ // - `parent` can be either NULL or a pointer to a `struct device`, and `faux_device_create`
+ // will take a reference to `parent` using `device_add` - ensuring that it remains valid
+ // for the lifetime of the faux device.
+ let dev = unsafe {
+ bindings::faux_device_create(
+ name.as_char_ptr(),
+ parent.map_or(null_mut(), |p| p.as_raw()),
+ null(),
+ )
+ };
+
+ // The above function will return either a valid device, or NULL on failure
+ // INVARIANT: The device will remain registered until faux_device_destroy() is called, which
+ // happens in our Drop implementation.
+ Ok(Self(NonNull::new(dev).ok_or(ENODEV)?))
+ }
+
+ fn as_raw(&self) -> *mut bindings::faux_device {
+ self.0.as_ptr()
+ }
+}
+
+impl AsRef<device::Device> for Registration {
+ fn as_ref(&self) -> &device::Device {
+ // SAFETY: The underlying `device` in `faux_device` is guaranteed by the C API to be
+ // a valid initialized `device`.
+ unsafe { device::Device::as_ref(addr_of_mut!((*self.as_raw()).dev)) }
+ }
+}
+
+impl Drop for Registration {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: `self.0` is a valid registered faux_device via our type invariants.
+ unsafe { bindings::faux_device_destroy(self.as_raw()) }
+ }
+}
+
+// SAFETY: The faux device API is thread-safe as guaranteed by the device core, as long as
+// faux_device_destroy() is guaranteed to only be called once - which is guaranteed by our type not
+// having Copy/Clone.
+unsafe impl Send for Registration {}
+
+// SAFETY: The faux device API is thread-safe as guaranteed by the device core, as long as
+// faux_device_destroy() is guaranteed to only be called once - which is guaranteed by our type not
+// having Copy/Clone.
+unsafe impl Sync for Registration {}
diff --git a/rust/kernel/firmware.rs b/rust/kernel/firmware.rs
new file mode 100644
index 000000000000..2494c96e105f
--- /dev/null
+++ b/rust/kernel/firmware.rs
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Firmware abstraction
+//!
+//! C header: [`include/linux/firmware.h`](srctree/include/linux/firmware.h)
+
+use crate::{bindings, device::Device, error::Error, error::Result, ffi, str::CStr};
+use core::ptr::NonNull;
+
+/// # Invariants
+///
+/// One of the following: `bindings::request_firmware`, `bindings::firmware_request_nowarn`,
+/// `bindings::firmware_request_platform`, `bindings::request_firmware_direct`.
+struct FwFunc(
+ unsafe extern "C" fn(
+ *mut *const bindings::firmware,
+ *const ffi::c_char,
+ *mut bindings::device,
+ ) -> i32,
+);
+
+impl FwFunc {
+ fn request() -> Self {
+ Self(bindings::request_firmware)
+ }
+
+ fn request_nowarn() -> Self {
+ Self(bindings::firmware_request_nowarn)
+ }
+}
+
+/// Abstraction around a C `struct firmware`.
+///
+/// This is a simple abstraction around the C firmware API. Just like with the C API, firmware can
+/// be requested. Once requested the abstraction provides direct access to the firmware buffer as
+/// `&[u8]`. The firmware is released once [`Firmware`] is dropped.
+///
+/// # Invariants
+///
+/// The pointer is valid, and has ownership over the instance of `struct firmware`.
+///
+/// The `Firmware`'s backing buffer is not modified.
+///
+/// # Examples
+///
+/// ```no_run
+/// # use kernel::{c_str, device::Device, firmware::Firmware};
+///
+/// # fn no_run() -> Result<(), Error> {
+/// # // SAFETY: *NOT* safe, just for the example to get an `ARef<Device>` instance
+/// # let dev = unsafe { Device::get_device(core::ptr::null_mut()) };
+///
+/// let fw = Firmware::request(c_str!("path/to/firmware.bin"), &dev)?;
+/// let blob = fw.data();
+///
+/// # Ok(())
+/// # }
+/// ```
+pub struct Firmware(NonNull<bindings::firmware>);
+
+impl Firmware {
+ fn request_internal(name: &CStr, dev: &Device, func: FwFunc) -> Result<Self> {
+ let mut fw: *mut bindings::firmware = core::ptr::null_mut();
+ let pfw: *mut *mut bindings::firmware = &mut fw;
+
+ // SAFETY: `pfw` is a valid pointer to a NULL initialized `bindings::firmware` pointer.
+ // `name` and `dev` are valid as by their type invariants.
+ let ret = unsafe { func.0(pfw as _, name.as_char_ptr(), dev.as_raw()) };
+ if ret != 0 {
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: `func` not bailing out with a non-zero error code, guarantees that `fw` is a
+ // valid pointer to `bindings::firmware`.
+ Ok(Firmware(unsafe { NonNull::new_unchecked(fw) }))
+ }
+
+ /// Send a firmware request and wait for it. See also `bindings::request_firmware`.
+ pub fn request(name: &CStr, dev: &Device) -> Result<Self> {
+ Self::request_internal(name, dev, FwFunc::request())
+ }
+
+ /// Send a request for an optional firmware module. See also
+ /// `bindings::firmware_request_nowarn`.
+ pub fn request_nowarn(name: &CStr, dev: &Device) -> Result<Self> {
+ Self::request_internal(name, dev, FwFunc::request_nowarn())
+ }
+
+ fn as_raw(&self) -> *mut bindings::firmware {
+ self.0.as_ptr()
+ }
+
+ /// Returns the size of the requested firmware in bytes.
+ pub fn size(&self) -> usize {
+ // SAFETY: `self.as_raw()` is valid by the type invariant.
+ unsafe { (*self.as_raw()).size }
+ }
+
+ /// Returns the requested firmware as `&[u8]`.
+ pub fn data(&self) -> &[u8] {
+ // SAFETY: `self.as_raw()` is valid by the type invariant. Additionally,
+ // `bindings::firmware` guarantees, if successfully requested, that
+ // `bindings::firmware::data` has a size of `bindings::firmware::size` bytes.
+ unsafe { core::slice::from_raw_parts((*self.as_raw()).data, self.size()) }
+ }
+}
+
+impl Drop for Firmware {
+ fn drop(&mut self) {
+ // SAFETY: `self.as_raw()` is valid by the type invariant.
+ unsafe { bindings::release_firmware(self.as_raw()) };
+ }
+}
+
+// SAFETY: `Firmware` only holds a pointer to a C `struct firmware`, which is safe to be used from
+// any thread.
+unsafe impl Send for Firmware {}
+
+// SAFETY: `Firmware` only holds a pointer to a C `struct firmware`, references to which are safe to
+// be used from any thread.
+unsafe impl Sync for Firmware {}
+
+/// Create firmware .modinfo entries.
+///
+/// This macro is the counterpart of the C macro `MODULE_FIRMWARE()`, but instead of taking a
+/// simple string literals, which is already covered by the `firmware` field of
+/// [`crate::prelude::module!`], it allows the caller to pass a builder type, based on the
+/// [`ModInfoBuilder`], which can create the firmware modinfo strings in a more flexible way.
+///
+/// Drivers should extend the [`ModInfoBuilder`] with their own driver specific builder type.
+///
+/// The `builder` argument must be a type which implements the following function.
+///
+/// `const fn create(module_name: &'static CStr) -> ModInfoBuilder`
+///
+/// `create` should pass the `module_name` to the [`ModInfoBuilder`] and, with the help of
+/// it construct the corresponding firmware modinfo.
+///
+/// Typically, such contracts would be enforced by a trait, however traits do not (yet) support
+/// const functions.
+///
+/// # Example
+///
+/// ```
+/// # mod module_firmware_test {
+/// # use kernel::firmware;
+/// # use kernel::prelude::*;
+/// #
+/// # struct MyModule;
+/// #
+/// # impl kernel::Module for MyModule {
+/// # fn init(_module: &'static ThisModule) -> Result<Self> {
+/// # Ok(Self)
+/// # }
+/// # }
+/// #
+/// #
+/// struct Builder<const N: usize>;
+///
+/// impl<const N: usize> Builder<N> {
+/// const DIR: &'static str = "vendor/chip/";
+/// const FILES: [&'static str; 3] = [ "foo", "bar", "baz" ];
+///
+/// const fn create(module_name: &'static kernel::str::CStr) -> firmware::ModInfoBuilder<N> {
+/// let mut builder = firmware::ModInfoBuilder::new(module_name);
+///
+/// let mut i = 0;
+/// while i < Self::FILES.len() {
+/// builder = builder.new_entry()
+/// .push(Self::DIR)
+/// .push(Self::FILES[i])
+/// .push(".bin");
+///
+/// i += 1;
+/// }
+///
+/// builder
+/// }
+/// }
+///
+/// module! {
+/// type: MyModule,
+/// name: "module_firmware_test",
+/// author: "Rust for Linux",
+/// description: "module_firmware! test module",
+/// license: "GPL",
+/// }
+///
+/// kernel::module_firmware!(Builder);
+/// # }
+/// ```
+#[macro_export]
+macro_rules! module_firmware {
+ // The argument is the builder type without the const generic, since it's deferred from within
+ // this macro. Hence, we can neither use `expr` nor `ty`.
+ ($($builder:tt)*) => {
+ const _: () = {
+ const __MODULE_FIRMWARE_PREFIX: &'static $crate::str::CStr = if cfg!(MODULE) {
+ $crate::c_str!("")
+ } else {
+ <LocalModule as $crate::ModuleMetadata>::NAME
+ };
+
+ #[link_section = ".modinfo"]
+ #[used]
+ static __MODULE_FIRMWARE: [u8; $($builder)*::create(__MODULE_FIRMWARE_PREFIX)
+ .build_length()] = $($builder)*::create(__MODULE_FIRMWARE_PREFIX).build();
+ };
+ };
+}
+
+/// Builder for firmware module info.
+///
+/// [`ModInfoBuilder`] is a helper component to flexibly compose firmware paths strings for the
+/// .modinfo section in const context.
+///
+/// Therefore the [`ModInfoBuilder`] provides the methods [`ModInfoBuilder::new_entry`] and
+/// [`ModInfoBuilder::push`], where the latter is used to push path components and the former to
+/// mark the beginning of a new path string.
+///
+/// [`ModInfoBuilder`] is meant to be used in combination with [`kernel::module_firmware!`].
+///
+/// The const generic `N` as well as the `module_name` parameter of [`ModInfoBuilder::new`] is an
+/// internal implementation detail and supplied through the above macro.
+pub struct ModInfoBuilder<const N: usize> {
+ buf: [u8; N],
+ n: usize,
+ module_name: &'static CStr,
+}
+
+impl<const N: usize> ModInfoBuilder<N> {
+ /// Create an empty builder instance.
+ pub const fn new(module_name: &'static CStr) -> Self {
+ Self {
+ buf: [0; N],
+ n: 0,
+ module_name,
+ }
+ }
+
+ const fn push_internal(mut self, bytes: &[u8]) -> Self {
+ let mut j = 0;
+
+ if N == 0 {
+ self.n += bytes.len();
+ return self;
+ }
+
+ while j < bytes.len() {
+ if self.n < N {
+ self.buf[self.n] = bytes[j];
+ }
+ self.n += 1;
+ j += 1;
+ }
+ self
+ }
+
+ /// Push an additional path component.
+ ///
+ /// Append path components to the [`ModInfoBuilder`] instance. Paths need to be separated
+ /// with [`ModInfoBuilder::new_entry`].
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use kernel::firmware::ModInfoBuilder;
+ ///
+ /// # const DIR: &str = "vendor/chip/";
+ /// # const fn no_run<const N: usize>(builder: ModInfoBuilder<N>) {
+ /// let builder = builder.new_entry()
+ /// .push(DIR)
+ /// .push("foo.bin")
+ /// .new_entry()
+ /// .push(DIR)
+ /// .push("bar.bin");
+ /// # }
+ /// ```
+ pub const fn push(self, s: &str) -> Self {
+ // Check whether there has been an initial call to `next_entry()`.
+ if N != 0 && self.n == 0 {
+ crate::build_error!("Must call next_entry() before push().");
+ }
+
+ self.push_internal(s.as_bytes())
+ }
+
+ const fn push_module_name(self) -> Self {
+ let mut this = self;
+ let module_name = this.module_name;
+
+ if !this.module_name.is_empty() {
+ this = this.push_internal(module_name.as_bytes_with_nul());
+
+ if N != 0 {
+ // Re-use the space taken by the NULL terminator and swap it with the '.' separator.
+ this.buf[this.n - 1] = b'.';
+ }
+ }
+
+ this
+ }
+
+ /// Prepare the [`ModInfoBuilder`] for the next entry.
+ ///
+ /// This method acts as a separator between module firmware path entries.
+ ///
+ /// Must be called before constructing a new entry with subsequent calls to
+ /// [`ModInfoBuilder::push`].
+ ///
+ /// See [`ModInfoBuilder::push`] for an example.
+ pub const fn new_entry(self) -> Self {
+ self.push_internal(b"\0")
+ .push_module_name()
+ .push_internal(b"firmware=")
+ }
+
+ /// Build the byte array.
+ pub const fn build(self) -> [u8; N] {
+ // Add the final NULL terminator.
+ let this = self.push_internal(b"\0");
+
+ if this.n == N {
+ this.buf
+ } else {
+ crate::build_error!("Length mismatch.");
+ }
+ }
+}
+
+impl ModInfoBuilder<0> {
+ /// Return the length of the byte array to build.
+ pub const fn build_length(self) -> usize {
+ // Compensate for the NULL terminator added by `build`.
+ self.n + 1
+ }
+}
diff --git a/rust/kernel/fs.rs b/rust/kernel/fs.rs
new file mode 100644
index 000000000000..0121b38c59e6
--- /dev/null
+++ b/rust/kernel/fs.rs
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel file systems.
+//!
+//! C headers: [`include/linux/fs.h`](srctree/include/linux/fs.h)
+
+pub mod file;
+pub use self::file::{File, LocalFile};
diff --git a/rust/kernel/fs/file.rs b/rust/kernel/fs/file.rs
new file mode 100644
index 000000000000..13a0e44cd1aa
--- /dev/null
+++ b/rust/kernel/fs/file.rs
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Files and file descriptors.
+//!
+//! C headers: [`include/linux/fs.h`](srctree/include/linux/fs.h) and
+//! [`include/linux/file.h`](srctree/include/linux/file.h)
+
+use crate::{
+ bindings,
+ cred::Credential,
+ error::{code::*, Error, Result},
+ types::{ARef, AlwaysRefCounted, NotThreadSafe, Opaque},
+};
+use core::ptr;
+
+/// Flags associated with a [`File`].
+pub mod flags {
+ /// File is opened in append mode.
+ pub const O_APPEND: u32 = bindings::O_APPEND;
+
+ /// Signal-driven I/O is enabled.
+ pub const O_ASYNC: u32 = bindings::FASYNC;
+
+ /// Close-on-exec flag is set.
+ pub const O_CLOEXEC: u32 = bindings::O_CLOEXEC;
+
+ /// File was created if it didn't already exist.
+ pub const O_CREAT: u32 = bindings::O_CREAT;
+
+ /// Direct I/O is enabled for this file.
+ pub const O_DIRECT: u32 = bindings::O_DIRECT;
+
+ /// File must be a directory.
+ pub const O_DIRECTORY: u32 = bindings::O_DIRECTORY;
+
+ /// Like [`O_SYNC`] except metadata is not synced.
+ pub const O_DSYNC: u32 = bindings::O_DSYNC;
+
+ /// Ensure that this file is created with the `open(2)` call.
+ pub const O_EXCL: u32 = bindings::O_EXCL;
+
+ /// Large file size enabled (`off64_t` over `off_t`).
+ pub const O_LARGEFILE: u32 = bindings::O_LARGEFILE;
+
+ /// Do not update the file last access time.
+ pub const O_NOATIME: u32 = bindings::O_NOATIME;
+
+ /// File should not be used as process's controlling terminal.
+ pub const O_NOCTTY: u32 = bindings::O_NOCTTY;
+
+ /// If basename of path is a symbolic link, fail open.
+ pub const O_NOFOLLOW: u32 = bindings::O_NOFOLLOW;
+
+ /// File is using nonblocking I/O.
+ pub const O_NONBLOCK: u32 = bindings::O_NONBLOCK;
+
+ /// File is using nonblocking I/O.
+ ///
+ /// This is effectively the same flag as [`O_NONBLOCK`] on all architectures
+ /// except SPARC64.
+ pub const O_NDELAY: u32 = bindings::O_NDELAY;
+
+ /// Used to obtain a path file descriptor.
+ pub const O_PATH: u32 = bindings::O_PATH;
+
+ /// Write operations on this file will flush data and metadata.
+ pub const O_SYNC: u32 = bindings::O_SYNC;
+
+ /// This file is an unnamed temporary regular file.
+ pub const O_TMPFILE: u32 = bindings::O_TMPFILE;
+
+ /// File should be truncated to length 0.
+ pub const O_TRUNC: u32 = bindings::O_TRUNC;
+
+ /// Bitmask for access mode flags.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::fs::file;
+ /// # fn do_something() {}
+ /// # let flags = 0;
+ /// if (flags & file::flags::O_ACCMODE) == file::flags::O_RDONLY {
+ /// do_something();
+ /// }
+ /// ```
+ pub const O_ACCMODE: u32 = bindings::O_ACCMODE;
+
+ /// File is read only.
+ pub const O_RDONLY: u32 = bindings::O_RDONLY;
+
+ /// File is write only.
+ pub const O_WRONLY: u32 = bindings::O_WRONLY;
+
+ /// File can be both read and written.
+ pub const O_RDWR: u32 = bindings::O_RDWR;
+}
+
+/// Wraps the kernel's `struct file`. Thread safe.
+///
+/// This represents an open file rather than a file on a filesystem. Processes generally reference
+/// open files using file descriptors. However, file descriptors are not the same as files. A file
+/// descriptor is just an integer that corresponds to a file, and a single file may be referenced
+/// by multiple file descriptors.
+///
+/// # Refcounting
+///
+/// Instances of this type are reference-counted. The reference count is incremented by the
+/// `fget`/`get_file` functions and decremented by `fput`. The Rust type `ARef<File>` represents a
+/// pointer that owns a reference count on the file.
+///
+/// Whenever a process opens a file descriptor (fd), it stores a pointer to the file in its fd
+/// table (`struct files_struct`). This pointer owns a reference count to the file, ensuring the
+/// file isn't prematurely deleted while the file descriptor is open. In Rust terminology, the
+/// pointers in `struct files_struct` are `ARef<File>` pointers.
+///
+/// ## Light refcounts
+///
+/// Whenever a process has an fd to a file, it may use something called a "light refcount" as a
+/// performance optimization. Light refcounts are acquired by calling `fdget` and released with
+/// `fdput`. The idea behind light refcounts is that if the fd is not closed between the calls to
+/// `fdget` and `fdput`, then the refcount cannot hit zero during that time, as the `struct
+/// files_struct` holds a reference until the fd is closed. This means that it's safe to access the
+/// file even if `fdget` does not increment the refcount.
+///
+/// The requirement that the fd is not closed during a light refcount applies globally across all
+/// threads - not just on the thread using the light refcount. For this reason, light refcounts are
+/// only used when the `struct files_struct` is not shared with other threads, since this ensures
+/// that other unrelated threads cannot suddenly start using the fd and close it. Therefore,
+/// calling `fdget` on a shared `struct files_struct` creates a normal refcount instead of a light
+/// refcount.
+///
+/// Light reference counts must be released with `fdput` before the system call returns to
+/// userspace. This means that if you wait until the current system call returns to userspace, then
+/// all light refcounts that existed at the time have gone away.
+///
+/// ### The file position
+///
+/// Each `struct file` has a position integer, which is protected by the `f_pos_lock` mutex.
+/// However, if the `struct file` is not shared, then the kernel may avoid taking the lock as a
+/// performance optimization.
+///
+/// The condition for avoiding the `f_pos_lock` mutex is different from the condition for using
+/// `fdget`. With `fdget`, you may avoid incrementing the refcount as long as the current fd table
+/// is not shared; it is okay if there are other fd tables that also reference the same `struct
+/// file`. However, `fdget_pos` can only avoid taking the `f_pos_lock` if the entire `struct file`
+/// is not shared, as different processes with an fd to the same `struct file` share the same
+/// position.
+///
+/// To represent files that are not thread safe due to this optimization, the [`LocalFile`] type is
+/// used.
+///
+/// ## Rust references
+///
+/// The reference type `&File` is similar to light refcounts:
+///
+/// * `&File` references don't own a reference count. They can only exist as long as the reference
+/// count stays positive, and can only be created when there is some mechanism in place to ensure
+/// this.
+///
+/// * The Rust borrow-checker normally ensures this by enforcing that the `ARef<File>` from which
+/// a `&File` is created outlives the `&File`.
+///
+/// * Using the unsafe [`File::from_raw_file`] means that it is up to the caller to ensure that the
+/// `&File` only exists while the reference count is positive.
+///
+/// * You can think of `fdget` as using an fd to look up an `ARef<File>` in the `struct
+/// files_struct` and create an `&File` from it. The "fd cannot be closed" rule is like the Rust
+/// rule "the `ARef<File>` must outlive the `&File`".
+///
+/// # Invariants
+///
+/// * All instances of this type are refcounted using the `f_count` field.
+/// * There must not be any active calls to `fdget_pos` on this file that did not take the
+/// `f_pos_lock` mutex.
+#[repr(transparent)]
+pub struct File {
+ inner: Opaque<bindings::file>,
+}
+
+// SAFETY: This file is known to not have any active `fdget_pos` calls that did not take the
+// `f_pos_lock` mutex, so it is safe to transfer it between threads.
+unsafe impl Send for File {}
+
+// SAFETY: This file is known to not have any active `fdget_pos` calls that did not take the
+// `f_pos_lock` mutex, so it is safe to access its methods from several threads in parallel.
+unsafe impl Sync for File {}
+
+// SAFETY: The type invariants guarantee that `File` is always ref-counted. This implementation
+// makes `ARef<File>` own a normal refcount.
+unsafe impl AlwaysRefCounted for File {
+ #[inline]
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::get_file(self.as_ptr()) };
+ }
+
+ #[inline]
+ unsafe fn dec_ref(obj: ptr::NonNull<File>) {
+ // SAFETY: To call this method, the caller passes us ownership of a normal refcount, so we
+ // may drop it. The cast is okay since `File` has the same representation as `struct file`.
+ unsafe { bindings::fput(obj.cast().as_ptr()) }
+ }
+}
+
+/// Wraps the kernel's `struct file`. Not thread safe.
+///
+/// This type represents a file that is not known to be safe to transfer across thread boundaries.
+/// To obtain a thread-safe [`File`], use the [`assume_no_fdget_pos`] conversion.
+///
+/// See the documentation for [`File`] for more information.
+///
+/// # Invariants
+///
+/// * All instances of this type are refcounted using the `f_count` field.
+/// * If there is an active call to `fdget_pos` that did not take the `f_pos_lock` mutex, then it
+/// must be on the same thread as this file.
+///
+/// [`assume_no_fdget_pos`]: LocalFile::assume_no_fdget_pos
+pub struct LocalFile {
+ inner: Opaque<bindings::file>,
+}
+
+// SAFETY: The type invariants guarantee that `LocalFile` is always ref-counted. This implementation
+// makes `ARef<File>` own a normal refcount.
+unsafe impl AlwaysRefCounted for LocalFile {
+ #[inline]
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::get_file(self.as_ptr()) };
+ }
+
+ #[inline]
+ unsafe fn dec_ref(obj: ptr::NonNull<LocalFile>) {
+ // SAFETY: To call this method, the caller passes us ownership of a normal refcount, so we
+ // may drop it. The cast is okay since `File` has the same representation as `struct file`.
+ unsafe { bindings::fput(obj.cast().as_ptr()) }
+ }
+}
+
+impl LocalFile {
+ /// Constructs a new `struct file` wrapper from a file descriptor.
+ ///
+ /// The file descriptor belongs to the current process, and there might be active local calls
+ /// to `fdget_pos` on the same file.
+ ///
+ /// To obtain an `ARef<File>`, use the [`assume_no_fdget_pos`] function to convert.
+ ///
+ /// [`assume_no_fdget_pos`]: LocalFile::assume_no_fdget_pos
+ #[inline]
+ pub fn fget(fd: u32) -> Result<ARef<LocalFile>, BadFdError> {
+ // SAFETY: FFI call, there are no requirements on `fd`.
+ let ptr = ptr::NonNull::new(unsafe { bindings::fget(fd) }).ok_or(BadFdError)?;
+
+ // SAFETY: `bindings::fget` created a refcount, and we pass ownership of it to the `ARef`.
+ //
+ // INVARIANT: This file is in the fd table on this thread, so either all `fdget_pos` calls
+ // are on this thread, or the file is shared, in which case `fdget_pos` calls took the
+ // `f_pos_lock` mutex.
+ Ok(unsafe { ARef::from_raw(ptr.cast()) })
+ }
+
+ /// Creates a reference to a [`LocalFile`] from a valid pointer.
+ ///
+ /// # Safety
+ ///
+ /// * The caller must ensure that `ptr` points at a valid file and that the file's refcount is
+ /// positive for the duration of `'a`.
+ /// * The caller must ensure that if there is an active call to `fdget_pos` that did not take
+ /// the `f_pos_lock` mutex, then that call is on the current thread.
+ #[inline]
+ pub unsafe fn from_raw_file<'a>(ptr: *const bindings::file) -> &'a LocalFile {
+ // SAFETY: The caller guarantees that the pointer is not dangling and stays valid for the
+ // duration of 'a. The cast is okay because `File` is `repr(transparent)`.
+ //
+ // INVARIANT: The caller guarantees that there are no problematic `fdget_pos` calls.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Assume that there are no active `fdget_pos` calls that prevent us from sharing this file.
+ ///
+ /// This makes it safe to transfer this file to other threads. No checks are performed, and
+ /// using it incorrectly may lead to a data race on the file position if the file is shared
+ /// with another thread.
+ ///
+ /// This method is intended to be used together with [`LocalFile::fget`] when the caller knows
+ /// statically that there are no `fdget_pos` calls on the current thread. For example, you
+ /// might use it when calling `fget` from an ioctl, since ioctls usually do not touch the file
+ /// position.
+ ///
+ /// # Safety
+ ///
+ /// There must not be any active `fdget_pos` calls on the current thread.
+ #[inline]
+ pub unsafe fn assume_no_fdget_pos(me: ARef<LocalFile>) -> ARef<File> {
+ // INVARIANT: There are no `fdget_pos` calls on the current thread, and by the type
+ // invariants, if there is a `fdget_pos` call on another thread, then it took the
+ // `f_pos_lock` mutex.
+ //
+ // SAFETY: `LocalFile` and `File` have the same layout.
+ unsafe { ARef::from_raw(ARef::into_raw(me).cast()) }
+ }
+
+ /// Returns a raw pointer to the inner C struct.
+ #[inline]
+ pub fn as_ptr(&self) -> *mut bindings::file {
+ self.inner.get()
+ }
+
+ /// Returns the credentials of the task that originally opened the file.
+ pub fn cred(&self) -> &Credential {
+ // SAFETY: It's okay to read the `f_cred` field without synchronization because `f_cred` is
+ // never changed after initialization of the file.
+ let ptr = unsafe { (*self.as_ptr()).f_cred };
+
+ // SAFETY: The signature of this function ensures that the caller will only access the
+ // returned credential while the file is still valid, and the C side ensures that the
+ // credential stays valid at least as long as the file.
+ unsafe { Credential::from_ptr(ptr) }
+ }
+
+ /// Returns the flags associated with the file.
+ ///
+ /// The flags are a combination of the constants in [`flags`].
+ #[inline]
+ pub fn flags(&self) -> u32 {
+ // This `read_volatile` is intended to correspond to a READ_ONCE call.
+ //
+ // SAFETY: The file is valid because the shared reference guarantees a nonzero refcount.
+ //
+ // FIXME(read_once): Replace with `read_once` when available on the Rust side.
+ unsafe { core::ptr::addr_of!((*self.as_ptr()).f_flags).read_volatile() }
+ }
+}
+
+impl File {
+ /// Creates a reference to a [`File`] from a valid pointer.
+ ///
+ /// # Safety
+ ///
+ /// * The caller must ensure that `ptr` points at a valid file and that the file's refcount is
+ /// positive for the duration of `'a`.
+ /// * The caller must ensure that if there are active `fdget_pos` calls on this file, then they
+ /// took the `f_pos_lock` mutex.
+ #[inline]
+ pub unsafe fn from_raw_file<'a>(ptr: *const bindings::file) -> &'a File {
+ // SAFETY: The caller guarantees that the pointer is not dangling and stays valid for the
+ // duration of 'a. The cast is okay because `File` is `repr(transparent)`.
+ //
+ // INVARIANT: The caller guarantees that there are no problematic `fdget_pos` calls.
+ unsafe { &*ptr.cast() }
+ }
+}
+
+// Make LocalFile methods available on File.
+impl core::ops::Deref for File {
+ type Target = LocalFile;
+ #[inline]
+ fn deref(&self) -> &LocalFile {
+ // SAFETY: The caller provides a `&File`, and since it is a reference, it must point at a
+ // valid file for the desired duration.
+ //
+ // By the type invariants, there are no `fdget_pos` calls that did not take the
+ // `f_pos_lock` mutex.
+ unsafe { LocalFile::from_raw_file(self as *const File as *const bindings::file) }
+ }
+}
+
+/// A file descriptor reservation.
+///
+/// This allows the creation of a file descriptor in two steps: first, we reserve a slot for it,
+/// then we commit or drop the reservation. The first step may fail (e.g., the current process ran
+/// out of available slots), but commit and drop never fail (and are mutually exclusive).
+///
+/// Dropping the reservation happens in the destructor of this type.
+///
+/// # Invariants
+///
+/// The fd stored in this struct must correspond to a reserved file descriptor of the current task.
+pub struct FileDescriptorReservation {
+ fd: u32,
+ /// Prevent values of this type from being moved to a different task.
+ ///
+ /// The `fd_install` and `put_unused_fd` functions assume that the value of `current` is
+ /// unchanged since the call to `get_unused_fd_flags`. By adding this marker to this type, we
+ /// prevent it from being moved across task boundaries, which ensures that `current` does not
+ /// change while this value exists.
+ _not_send: NotThreadSafe,
+}
+
+impl FileDescriptorReservation {
+ /// Creates a new file descriptor reservation.
+ #[inline]
+ pub fn get_unused_fd_flags(flags: u32) -> Result<Self> {
+ // SAFETY: FFI call, there are no safety requirements on `flags`.
+ let fd: i32 = unsafe { bindings::get_unused_fd_flags(flags) };
+ if fd < 0 {
+ return Err(Error::from_errno(fd));
+ }
+ Ok(Self {
+ fd: fd as u32,
+ _not_send: NotThreadSafe,
+ })
+ }
+
+ /// Returns the file descriptor number that was reserved.
+ #[inline]
+ pub fn reserved_fd(&self) -> u32 {
+ self.fd
+ }
+
+ /// Commits the reservation.
+ ///
+ /// The previously reserved file descriptor is bound to `file`. This method consumes the
+ /// [`FileDescriptorReservation`], so it will not be usable after this call.
+ #[inline]
+ pub fn fd_install(self, file: ARef<File>) {
+ // SAFETY: `self.fd` was previously returned by `get_unused_fd_flags`. We have not yet used
+ // the fd, so it is still valid, and `current` still refers to the same task, as this type
+ // cannot be moved across task boundaries.
+ //
+ // Furthermore, the file pointer is guaranteed to own a refcount by its type invariants,
+ // and we take ownership of that refcount by not running the destructor below.
+ // Additionally, the file is known to not have any non-shared `fdget_pos` calls, so even if
+ // this process starts using the file position, this will not result in a data race on the
+ // file position.
+ unsafe { bindings::fd_install(self.fd, file.as_ptr()) };
+
+ // `fd_install` consumes both the file descriptor and the file reference, so we cannot run
+ // the destructors.
+ core::mem::forget(self);
+ core::mem::forget(file);
+ }
+}
+
+impl Drop for FileDescriptorReservation {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants of this type, `self.fd` was previously returned by
+ // `get_unused_fd_flags`. We have not yet used the fd, so it is still valid, and `current`
+ // still refers to the same task, as this type cannot be moved across task boundaries.
+ unsafe { bindings::put_unused_fd(self.fd) };
+ }
+}
+
+/// Represents the `EBADF` error code.
+///
+/// Used for methods that can only fail with `EBADF`.
+#[derive(Copy, Clone, Eq, PartialEq)]
+pub struct BadFdError;
+
+impl From<BadFdError> for Error {
+ #[inline]
+ fn from(_: BadFdError) -> Error {
+ EBADF
+ }
+}
+
+impl core::fmt::Debug for BadFdError {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ f.pad("EBADF")
+ }
+}
diff --git a/rust/kernel/generated_arch_static_branch_asm.rs.S b/rust/kernel/generated_arch_static_branch_asm.rs.S
new file mode 100644
index 000000000000..2afb638708db
--- /dev/null
+++ b/rust/kernel/generated_arch_static_branch_asm.rs.S
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/jump_label.h>
+
+// Cut here.
+
+::kernel::concat_literals!(ARCH_STATIC_BRANCH_ASM("{symb} + {off} + {branch}", "{l_yes}"))
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 09004b56fb65..8d228c237954 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -1,127 +1,74 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
+// SPDX-License-Identifier: GPL-2.0
-//! API to safely and fallibly initialize pinned `struct`s using in-place constructors.
-//!
-//! It also allows in-place initialization of big `struct`s that would otherwise produce a stack
-//! overflow.
+//! Extensions to the [`pin-init`] crate.
//!
//! Most `struct`s from the [`sync`] module need to be pinned, because they contain self-referential
//! `struct`s from C. [Pinning][pinning] is Rust's way of ensuring data does not move.
//!
-//! # Overview
-//!
-//! To initialize a `struct` with an in-place constructor you will need two things:
-//! - an in-place constructor,
-//! - a memory location that can hold your `struct` (this can be the [stack], an [`Arc<T>`],
-//! [`UniqueArc<T>`], [`Box<T>`] or any other smart pointer that implements [`InPlaceInit`]).
-//!
-//! To get an in-place constructor there are generally three options:
-//! - directly creating an in-place constructor using the [`pin_init!`] macro,
-//! - a custom function/macro returning an in-place constructor provided by someone else,
-//! - using the unsafe function [`pin_init_from_closure()`] to manually create an initializer.
-//!
-//! Aside from pinned initialization, this API also supports in-place construction without pinning,
-//! the macros/types/functions are generally named like the pinned variants without the `pin`
-//! prefix.
+//! The [`pin-init`] crate is the way such structs are initialized on the Rust side. Please refer
+//! to its documentation to better understand how to use it. Additionally, there are many examples
+//! throughout the kernel, such as the types from the [`sync`] module. And the ones presented
+//! below.
//!
-//! # Examples
+//! [`sync`]: crate::sync
+//! [pinning]: https://doc.rust-lang.org/std/pin/index.html
+//! [`pin-init`]: https://rust.docs.kernel.org/pin_init/
//!
-//! ## Using the [`pin_init!`] macro
+//! # [`Opaque<T>`]
//!
-//! If you want to use [`PinInit`], then you will have to annotate your `struct` with
-//! `#[`[`pin_data`]`]`. It is a macro that uses `#[pin]` as a marker for
-//! [structurally pinned fields]. After doing this, you can then create an in-place constructor via
-//! [`pin_init!`]. The syntax is almost the same as normal `struct` initializers. The difference is
-//! that you need to write `<-` instead of `:` for fields that you want to initialize in-place.
+//! For the special case where initializing a field is a single FFI-function call that cannot fail,
+//! there exist the helper function [`Opaque::ffi_init`]. This function initialize a single
+//! [`Opaque<T>`] field by just delegating to the supplied closure. You can use these in
+//! combination with [`pin_init!`].
//!
-//! ```rust
-//! # #![allow(clippy::disallowed_names)]
-//! use kernel::sync::{new_mutex, Mutex};
-//! # use core::pin::Pin;
-//! #[pin_data]
-//! struct Foo {
-//! #[pin]
-//! a: Mutex<usize>,
-//! b: u32,
-//! }
+//! [`Opaque<T>`]: crate::types::Opaque
+//! [`Opaque::ffi_init`]: crate::types::Opaque::ffi_init
+//! [`pin_init!`]: pin_init::pin_init
//!
-//! let foo = pin_init!(Foo {
-//! a <- new_mutex!(42, "Foo::a"),
-//! b: 24,
-//! });
-//! ```
+//! # Examples
//!
-//! `foo` now is of the type [`impl PinInit<Foo>`]. We can now use any smart pointer that we like
-//! (or just the stack) to actually initialize a `Foo`:
+//! ## General Examples
//!
-//! ```rust
+//! ```rust,ignore
//! # #![allow(clippy::disallowed_names)]
-//! # use kernel::sync::{new_mutex, Mutex};
-//! # use core::pin::Pin;
-//! # #[pin_data]
-//! # struct Foo {
-//! # #[pin]
-//! # a: Mutex<usize>,
-//! # b: u32,
-//! # }
-//! # let foo = pin_init!(Foo {
-//! # a <- new_mutex!(42, "Foo::a"),
-//! # b: 24,
-//! # });
-//! let foo: Result<Pin<Box<Foo>>> = Box::pin_init(foo);
-//! ```
-//!
-//! For more information see the [`pin_init!`] macro.
+//! use kernel::types::Opaque;
+//! use pin_init::pin_init_from_closure;
//!
-//! ## Using a custom function/macro that returns an initializer
-//!
-//! Many types from the kernel supply a function/macro that returns an initializer, because the
-//! above method only works for types where you can access the fields.
-//!
-//! ```rust
-//! # use kernel::sync::{new_mutex, Arc, Mutex};
-//! let mtx: Result<Arc<Mutex<usize>>> = Arc::pin_init(new_mutex!(42, "example::mtx"));
-//! ```
-//!
-//! To declare an init macro/function you just return an [`impl PinInit<T, E>`]:
+//! // assume we have some `raw_foo` type in C:
+//! #[repr(C)]
+//! struct RawFoo([u8; 16]);
+//! extern {
+//! fn init_foo(_: *mut RawFoo);
+//! }
//!
-//! ```rust
-//! # #![allow(clippy::disallowed_names)]
-//! # use kernel::{sync::Mutex, prelude::*, new_mutex, init::PinInit, try_pin_init};
//! #[pin_data]
-//! struct DriverData {
+//! struct Foo {
//! #[pin]
-//! status: Mutex<i32>,
-//! buffer: Box<[u8; 1_000_000]>,
+//! raw: Opaque<RawFoo>,
//! }
//!
-//! impl DriverData {
-//! fn new() -> impl PinInit<Self, Error> {
-//! try_pin_init!(Self {
-//! status <- new_mutex!(0, "DriverData::status"),
-//! buffer: Box::init(kernel::init::zeroed())?,
-//! })
+//! impl Foo {
+//! fn setup(self: Pin<&mut Self>) {
+//! pr_info!("Setting up foo\n");
//! }
//! }
-//! ```
-//!
-//! ## Manual creation of an initializer
//!
-//! Often when working with primitives the previous approaches are not sufficient. That is where
-//! [`pin_init_from_closure()`] comes in. This `unsafe` function allows you to create a
-//! [`impl PinInit<T, E>`] directly from a closure. Of course you have to ensure that the closure
-//! actually does the initialization in the correct way. Here are the things to look out for
-//! (we are calling the parameter to the closure `slot`):
-//! - when the closure returns `Ok(())`, then it has completed the initialization successfully, so
-//! `slot` now contains a valid bit pattern for the type `T`,
-//! - when the closure returns `Err(e)`, then the caller may deallocate the memory at `slot`, so
-//! you need to take care to clean up anything if your initialization fails mid-way,
-//! - you may assume that `slot` will stay pinned even after the closure returns until `drop` of
-//! `slot` gets called.
+//! let foo = pin_init!(Foo {
+//! raw <- unsafe {
+//! Opaque::ffi_init(|s| {
+//! // note that this cannot fail.
+//! init_foo(s);
+//! })
+//! },
+//! }).pin_chain(|foo| {
+//! foo.setup();
+//! Ok(())
+//! });
+//! ```
//!
-//! ```rust
+//! ```rust,ignore
//! # #![allow(unreachable_pub, clippy::disallowed_names)]
-//! use kernel::{prelude::*, init, types::Opaque};
+//! use kernel::{prelude::*, types::Opaque};
//! use core::{ptr::addr_of_mut, marker::PhantomPinned, pin::Pin};
//! # mod bindings {
//! # #![allow(non_camel_case_types)]
@@ -156,7 +103,7 @@
//! // enabled `foo`,
//! // - when it returns `Err(e)`, then it has cleaned up before
//! unsafe {
-//! init::pin_init_from_closure(move |slot: *mut Self| {
+//! pin_init::pin_init_from_closure(move |slot: *mut Self| {
//! // `slot` contains uninit memory, avoid creating a reference.
//! let foo = addr_of_mut!((*slot).foo);
//!
@@ -186,387 +133,114 @@
//! }
//! }
//! ```
-//!
-//! For the special case where initializing a field is a single FFI-function call that cannot fail,
-//! there exist the helper function [`Opaque::ffi_init`]. This function initialize a single
-//! [`Opaque`] field by just delegating to the supplied closure. You can use these in combination
-//! with [`pin_init!`].
-//!
-//! For more information on how to use [`pin_init_from_closure()`], take a look at the uses inside
-//! the `kernel` crate. The [`sync`] module is a good starting point.
-//!
-//! [`sync`]: kernel::sync
-//! [pinning]: https://doc.rust-lang.org/std/pin/index.html
-//! [structurally pinned fields]:
-//! https://doc.rust-lang.org/std/pin/index.html#pinning-is-structural-for-field
-//! [stack]: crate::stack_pin_init
-//! [`Arc<T>`]: crate::sync::Arc
-//! [`impl PinInit<Foo>`]: PinInit
-//! [`impl PinInit<T, E>`]: PinInit
-//! [`impl Init<T, E>`]: Init
-//! [`Opaque`]: kernel::types::Opaque
-//! [`Opaque::ffi_init`]: kernel::types::Opaque::ffi_init
-//! [`pin_data`]: ::macros::pin_data
-//! [`pin_init!`]: crate::pin_init!
use crate::{
+ alloc::{AllocError, Flags},
error::{self, Error},
- sync::UniqueArc,
- types::{Opaque, ScopeGuard},
-};
-use alloc::boxed::Box;
-use core::{
- alloc::AllocError,
- cell::UnsafeCell,
- convert::Infallible,
- marker::PhantomData,
- mem::MaybeUninit,
- num::*,
- pin::Pin,
- ptr::{self, NonNull},
};
+use pin_init::{init_from_closure, pin_init_from_closure, Init, PinInit};
-#[doc(hidden)]
-pub mod __internal;
-#[doc(hidden)]
-pub mod macros;
+/// Smart pointer that can initialize memory in-place.
+pub trait InPlaceInit<T>: Sized {
+ /// Pinned version of `Self`.
+ ///
+ /// If a type already implicitly pins its pointee, `Pin<Self>` is unnecessary. In this case use
+ /// `Self`, otherwise just use `Pin<Self>`.
+ type PinnedSelf;
-/// Initialize and pin a type directly on the stack.
-///
-/// # Examples
-///
-/// ```rust
-/// # #![allow(clippy::disallowed_names)]
-/// # use kernel::{init, macros::pin_data, pin_init, stack_pin_init, init::*, sync::Mutex, new_mutex};
-/// # use core::pin::Pin;
-/// #[pin_data]
-/// struct Foo {
-/// #[pin]
-/// a: Mutex<usize>,
-/// b: Bar,
-/// }
-///
-/// #[pin_data]
-/// struct Bar {
-/// x: u32,
-/// }
-///
-/// stack_pin_init!(let foo = pin_init!(Foo {
-/// a <- new_mutex!(42),
-/// b: Bar {
-/// x: 64,
-/// },
-/// }));
-/// let foo: Pin<&mut Foo> = foo;
-/// pr_info!("a: {}", &*foo.a.lock());
-/// ```
-///
-/// # Syntax
-///
-/// A normal `let` binding with optional type annotation. The expression is expected to implement
-/// [`PinInit`]/[`Init`] with the error type [`Infallible`]. If you want to use a different error
-/// type, then use [`stack_try_pin_init!`].
-///
-/// [`stack_try_pin_init!`]: crate::stack_try_pin_init!
-#[macro_export]
-macro_rules! stack_pin_init {
- (let $var:ident $(: $t:ty)? = $val:expr) => {
- let val = $val;
- let mut $var = ::core::pin::pin!($crate::init::__internal::StackInit$(::<$t>)?::uninit());
- let mut $var = match $crate::init::__internal::StackInit::init($var, val) {
- Ok(res) => res,
- Err(x) => {
- let x: ::core::convert::Infallible = x;
- match x {}
- }
+ /// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
+ /// type.
+ ///
+ /// If `T: !Unpin` it will not be able to move afterwards.
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
+ where
+ E: From<AllocError>;
+
+ /// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
+ /// type.
+ ///
+ /// If `T: !Unpin` it will not be able to move afterwards.
+ fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Self::PinnedSelf>
+ where
+ Error: From<E>,
+ {
+ // SAFETY: We delegate to `init` and only change the error type.
+ let init = unsafe {
+ pin_init_from_closure(|slot| init.__pinned_init(slot).map_err(|e| Error::from(e)))
};
- };
-}
+ Self::try_pin_init(init, flags)
+ }
-/// Initialize and pin a type directly on the stack.
-///
-/// # Examples
-///
-/// ```rust,ignore
-/// # #![allow(clippy::disallowed_names)]
-/// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};
-/// # use macros::pin_data;
-/// # use core::{alloc::AllocError, pin::Pin};
-/// #[pin_data]
-/// struct Foo {
-/// #[pin]
-/// a: Mutex<usize>,
-/// b: Box<Bar>,
-/// }
-///
-/// struct Bar {
-/// x: u32,
-/// }
-///
-/// stack_try_pin_init!(let foo: Result<Pin<&mut Foo>, AllocError> = pin_init!(Foo {
-/// a <- new_mutex!(42),
-/// b: Box::try_new(Bar {
-/// x: 64,
-/// })?,
-/// }));
-/// let foo = foo.unwrap();
-/// pr_info!("a: {}", &*foo.a.lock());
-/// ```
-///
-/// ```rust,ignore
-/// # #![allow(clippy::disallowed_names)]
-/// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};
-/// # use macros::pin_data;
-/// # use core::{alloc::AllocError, pin::Pin};
-/// #[pin_data]
-/// struct Foo {
-/// #[pin]
-/// a: Mutex<usize>,
-/// b: Box<Bar>,
-/// }
-///
-/// struct Bar {
-/// x: u32,
-/// }
-///
-/// stack_try_pin_init!(let foo: Pin<&mut Foo> =? pin_init!(Foo {
-/// a <- new_mutex!(42),
-/// b: Box::try_new(Bar {
-/// x: 64,
-/// })?,
-/// }));
-/// pr_info!("a: {}", &*foo.a.lock());
-/// # Ok::<_, AllocError>(())
-/// ```
-///
-/// # Syntax
-///
-/// A normal `let` binding with optional type annotation. The expression is expected to implement
-/// [`PinInit`]/[`Init`]. This macro assigns a result to the given variable, adding a `?` after the
-/// `=` will propagate this error.
-#[macro_export]
-macro_rules! stack_try_pin_init {
- (let $var:ident $(: $t:ty)? = $val:expr) => {
- let val = $val;
- let mut $var = ::core::pin::pin!($crate::init::__internal::StackInit$(::<$t>)?::uninit());
- let mut $var = $crate::init::__internal::StackInit::init($var, val);
- };
- (let $var:ident $(: $t:ty)? =? $val:expr) => {
- let val = $val;
- let mut $var = ::core::pin::pin!($crate::init::__internal::StackInit$(::<$t>)?::uninit());
- let mut $var = $crate::init::__internal::StackInit::init($var, val)?;
- };
+ /// Use the given initializer to in-place initialize a `T`.
+ fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>;
+
+ /// Use the given initializer to in-place initialize a `T`.
+ fn init<E>(init: impl Init<T, E>, flags: Flags) -> error::Result<Self>
+ where
+ Error: From<E>,
+ {
+ // SAFETY: We delegate to `init` and only change the error type.
+ let init = unsafe {
+ init_from_closure(|slot| init.__pinned_init(slot).map_err(|e| Error::from(e)))
+ };
+ Self::try_init(init, flags)
+ }
}
-/// Construct an in-place, pinned initializer for `struct`s.
-///
-/// This macro defaults the error to [`Infallible`]. If you need [`Error`], then use
-/// [`try_pin_init!`].
-///
-/// The syntax is almost identical to that of a normal `struct` initializer:
-///
-/// ```rust
-/// # #![allow(clippy::disallowed_names)]
-/// # use kernel::{init, pin_init, macros::pin_data, init::*};
-/// # use core::pin::Pin;
-/// #[pin_data]
-/// struct Foo {
-/// a: usize,
-/// b: Bar,
-/// }
-///
-/// #[pin_data]
-/// struct Bar {
-/// x: u32,
-/// }
-///
-/// # fn demo() -> impl PinInit<Foo> {
-/// let a = 42;
-///
-/// let initializer = pin_init!(Foo {
-/// a,
-/// b: Bar {
-/// x: 64,
-/// },
-/// });
-/// # initializer }
-/// # Box::pin_init(demo()).unwrap();
-/// ```
-///
-/// Arbitrary Rust expressions can be used to set the value of a variable.
-///
-/// The fields are initialized in the order that they appear in the initializer. So it is possible
-/// to read already initialized fields using raw pointers.
-///
-/// IMPORTANT: You are not allowed to create references to fields of the struct inside of the
-/// initializer.
-///
-/// # Init-functions
-///
-/// When working with this API it is often desired to let others construct your types without
-/// giving access to all fields. This is where you would normally write a plain function `new`
-/// that would return a new instance of your type. With this API that is also possible.
-/// However, there are a few extra things to keep in mind.
-///
-/// To create an initializer function, simply declare it like this:
-///
-/// ```rust
-/// # #![allow(clippy::disallowed_names)]
-/// # use kernel::{init, pin_init, prelude::*, init::*};
-/// # use core::pin::Pin;
-/// # #[pin_data]
-/// # struct Foo {
-/// # a: usize,
-/// # b: Bar,
-/// # }
-/// # #[pin_data]
-/// # struct Bar {
-/// # x: u32,
-/// # }
-/// impl Foo {
-/// fn new() -> impl PinInit<Self> {
-/// pin_init!(Self {
-/// a: 42,
-/// b: Bar {
-/// x: 64,
-/// },
-/// })
-/// }
-/// }
-/// ```
+/// Construct an in-place fallible initializer for `struct`s.
///
-/// Users of `Foo` can now create it like this:
+/// This macro defaults the error to [`Error`]. If you need [`Infallible`], then use
+/// [`init!`].
///
-/// ```rust
-/// # #![allow(clippy::disallowed_names)]
-/// # use kernel::{init, pin_init, macros::pin_data, init::*};
-/// # use core::pin::Pin;
-/// # #[pin_data]
-/// # struct Foo {
-/// # a: usize,
-/// # b: Bar,
-/// # }
-/// # #[pin_data]
-/// # struct Bar {
-/// # x: u32,
-/// # }
-/// # impl Foo {
-/// # fn new() -> impl PinInit<Self> {
-/// # pin_init!(Self {
-/// # a: 42,
-/// # b: Bar {
-/// # x: 64,
-/// # },
-/// # })
-/// # }
-/// # }
-/// let foo = Box::pin_init(Foo::new());
-/// ```
+/// The syntax is identical to [`try_pin_init!`]. If you want to specify a custom error,
+/// append `? $type` after the `struct` initializer.
+/// The safety caveats from [`try_pin_init!`] also apply:
+/// - `unsafe` code must guarantee either full initialization or return an error and allow
+/// deallocation of the memory.
+/// - the fields are initialized in the order given in the initializer.
+/// - no references to fields are allowed to be created inside of the initializer.
///
-/// They can also easily embed it into their own `struct`s:
+/// # Examples
///
/// ```rust
-/// # #![allow(clippy::disallowed_names)]
-/// # use kernel::{init, pin_init, macros::pin_data, init::*};
-/// # use core::pin::Pin;
-/// # #[pin_data]
-/// # struct Foo {
-/// # a: usize,
-/// # b: Bar,
-/// # }
-/// # #[pin_data]
-/// # struct Bar {
-/// # x: u32,
-/// # }
-/// # impl Foo {
-/// # fn new() -> impl PinInit<Self> {
-/// # pin_init!(Self {
-/// # a: 42,
-/// # b: Bar {
-/// # x: 64,
-/// # },
-/// # })
-/// # }
-/// # }
-/// #[pin_data]
-/// struct FooContainer {
-/// #[pin]
-/// foo1: Foo,
-/// #[pin]
-/// foo2: Foo,
-/// other: u32,
+/// use kernel::error::Error;
+/// use pin_init::zeroed;
+/// struct BigBuf {
+/// big: KBox<[u8; 1024 * 1024 * 1024]>,
+/// small: [u8; 1024 * 1024],
/// }
///
-/// impl FooContainer {
-/// fn new(other: u32) -> impl PinInit<Self> {
-/// pin_init!(Self {
-/// foo1 <- Foo::new(),
-/// foo2 <- Foo::new(),
-/// other,
-/// })
+/// impl BigBuf {
+/// fn new() -> impl Init<Self, Error> {
+/// try_init!(Self {
+/// big: KBox::init(zeroed(), GFP_KERNEL)?,
+/// small: [0; 1024 * 1024],
+/// }? Error)
/// }
/// }
/// ```
///
-/// Here we see that when using `pin_init!` with `PinInit`, one needs to write `<-` instead of `:`.
-/// This signifies that the given field is initialized in-place. As with `struct` initializers, just
-/// writing the field (in this case `other`) without `:` or `<-` means `other: other,`.
-///
-/// # Syntax
-///
-/// As already mentioned in the examples above, inside of `pin_init!` a `struct` initializer with
-/// the following modifications is expected:
-/// - Fields that you want to initialize in-place have to use `<-` instead of `:`.
-/// - In front of the initializer you can write `&this in` to have access to a [`NonNull<Self>`]
-/// pointer named `this` inside of the initializer.
-/// - Using struct update syntax one can place `..Zeroable::zeroed()` at the very end of the
-/// struct, this initializes every field with 0 and then runs all initializers specified in the
-/// body. This can only be done if [`Zeroable`] is implemented for the struct.
-///
-/// For instance:
-///
-/// ```rust
-/// # use kernel::{macros::{Zeroable, pin_data}, pin_init};
-/// # use core::{ptr::addr_of_mut, marker::PhantomPinned};
-/// #[pin_data]
-/// #[derive(Zeroable)]
-/// struct Buf {
-/// // `ptr` points into `buf`.
-/// ptr: *mut u8,
-/// buf: [u8; 64],
-/// #[pin]
-/// pin: PhantomPinned,
-/// }
-/// pin_init!(&this in Buf {
-/// buf: [0; 64],
-/// ptr: unsafe { addr_of_mut!((*this.as_ptr()).buf).cast() },
-/// pin: PhantomPinned,
-/// });
-/// pin_init!(Buf {
-/// buf: [1; 64],
-/// ..Zeroable::zeroed()
-/// });
-/// ```
-///
-/// [`try_pin_init!`]: kernel::try_pin_init
-/// [`NonNull<Self>`]: core::ptr::NonNull
-// For a detailed example of how this macro works, see the module documentation of the hidden
-// module `__internal` inside of `init/__internal.rs`.
+/// [`Infallible`]: core::convert::Infallible
+/// [`init!`]: pin_init::init
+/// [`try_pin_init!`]: crate::try_pin_init!
+/// [`Error`]: crate::error::Error
#[macro_export]
-macro_rules! pin_init {
+macro_rules! try_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::__init_internal!(
- @this($($this)?),
- @typ($t $(::<$($generics),*>)?),
- @fields($($fields)*),
- @error(::core::convert::Infallible),
- @data(PinData, use_data),
- @has_data(HasPinData, __pin_data),
- @construct_closure(pin_init_from_closure),
- @munch_fields($($fields)*),
- )
+ ::pin_init::try_init!($(&$this in)? $t $(::<$($generics),* $(,)?>)? {
+ $($fields)*
+ }? $crate::error::Error)
+ };
+ ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+ $($fields:tt)*
+ }? $err:ty) => {
+ ::pin_init::try_init!($(&$this in)? $t $(::<$($generics),* $(,)?>)? {
+ $($fields)*
+ }? $err)
};
}
@@ -589,10 +263,11 @@ macro_rules! pin_init {
///
/// ```rust
/// # #![feature(new_uninit)]
-/// use kernel::{init::{self, PinInit}, error::Error};
+/// use kernel::error::Error;
+/// use pin_init::zeroed;
/// #[pin_data]
/// struct BigBuf {
-/// big: Box<[u8; 1024 * 1024 * 1024]>,
+/// big: KBox<[u8; 1024 * 1024 * 1024]>,
/// small: [u8; 1024 * 1024],
/// ptr: *mut u8,
/// }
@@ -600,752 +275,31 @@ macro_rules! pin_init {
/// impl BigBuf {
/// fn new() -> impl PinInit<Self, Error> {
/// try_pin_init!(Self {
-/// big: Box::init(init::zeroed())?,
+/// big: KBox::init(zeroed(), GFP_KERNEL)?,
/// small: [0; 1024 * 1024],
/// ptr: core::ptr::null_mut(),
/// }? Error)
/// }
/// }
/// ```
-// For a detailed example of how this macro works, see the module documentation of the hidden
-// module `__internal` inside of `init/__internal.rs`.
-#[macro_export]
-macro_rules! try_pin_init {
- ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
- $($fields:tt)*
- }) => {
- $crate::__init_internal!(
- @this($($this)?),
- @typ($t $(::<$($generics),*>)? ),
- @fields($($fields)*),
- @error($crate::error::Error),
- @data(PinData, use_data),
- @has_data(HasPinData, __pin_data),
- @construct_closure(pin_init_from_closure),
- @munch_fields($($fields)*),
- )
- };
- ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
- $($fields:tt)*
- }? $err:ty) => {
- $crate::__init_internal!(
- @this($($this)?),
- @typ($t $(::<$($generics),*>)? ),
- @fields($($fields)*),
- @error($err),
- @data(PinData, use_data),
- @has_data(HasPinData, __pin_data),
- @construct_closure(pin_init_from_closure),
- @munch_fields($($fields)*),
- )
- };
-}
-
-/// Construct an in-place initializer for `struct`s.
///
-/// This macro defaults the error to [`Infallible`]. If you need [`Error`], then use
-/// [`try_init!`].
-///
-/// The syntax is identical to [`pin_init!`] and its safety caveats also apply:
-/// - `unsafe` code must guarantee either full initialization or return an error and allow
-/// deallocation of the memory.
-/// - the fields are initialized in the order given in the initializer.
-/// - no references to fields are allowed to be created inside of the initializer.
-///
-/// This initializer is for initializing data in-place that might later be moved. If you want to
-/// pin-initialize, use [`pin_init!`].
-///
-/// [`try_init!`]: crate::try_init!
-// For a detailed example of how this macro works, see the module documentation of the hidden
-// module `__internal` inside of `init/__internal.rs`.
+/// [`Infallible`]: core::convert::Infallible
+/// [`pin_init!`]: pin_init::pin_init
+/// [`Error`]: crate::error::Error
#[macro_export]
-macro_rules! init {
- ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
- $($fields:tt)*
- }) => {
- $crate::__init_internal!(
- @this($($this)?),
- @typ($t $(::<$($generics),*>)?),
- @fields($($fields)*),
- @error(::core::convert::Infallible),
- @data(InitData, /*no use_data*/),
- @has_data(HasInitData, __init_data),
- @construct_closure(init_from_closure),
- @munch_fields($($fields)*),
- )
- }
-}
-
-/// Construct an in-place fallible initializer for `struct`s.
-///
-/// This macro defaults the error to [`Error`]. If you need [`Infallible`], then use
-/// [`init!`].
-///
-/// The syntax is identical to [`try_pin_init!`]. If you want to specify a custom error,
-/// append `? $type` after the `struct` initializer.
-/// The safety caveats from [`try_pin_init!`] also apply:
-/// - `unsafe` code must guarantee either full initialization or return an error and allow
-/// deallocation of the memory.
-/// - the fields are initialized in the order given in the initializer.
-/// - no references to fields are allowed to be created inside of the initializer.
-///
-/// # Examples
-///
-/// ```rust
-/// use kernel::{init::{PinInit, zeroed}, error::Error};
-/// struct BigBuf {
-/// big: Box<[u8; 1024 * 1024 * 1024]>,
-/// small: [u8; 1024 * 1024],
-/// }
-///
-/// impl BigBuf {
-/// fn new() -> impl Init<Self, Error> {
-/// try_init!(Self {
-/// big: Box::init(zeroed())?,
-/// small: [0; 1024 * 1024],
-/// }? Error)
-/// }
-/// }
-/// ```
-// For a detailed example of how this macro works, see the module documentation of the hidden
-// module `__internal` inside of `init/__internal.rs`.
-#[macro_export]
-macro_rules! try_init {
+macro_rules! try_pin_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::__init_internal!(
- @this($($this)?),
- @typ($t $(::<$($generics),*>)?),
- @fields($($fields)*),
- @error($crate::error::Error),
- @data(InitData, /*no use_data*/),
- @has_data(HasInitData, __init_data),
- @construct_closure(init_from_closure),
- @munch_fields($($fields)*),
- )
+ ::pin_init::try_pin_init!($(&$this in)? $t $(::<$($generics),* $(,)?>)? {
+ $($fields)*
+ }? $crate::error::Error)
};
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}? $err:ty) => {
- $crate::__init_internal!(
- @this($($this)?),
- @typ($t $(::<$($generics),*>)?),
- @fields($($fields)*),
- @error($err),
- @data(InitData, /*no use_data*/),
- @has_data(HasInitData, __init_data),
- @construct_closure(init_from_closure),
- @munch_fields($($fields)*),
- )
- };
-}
-
-/// A pin-initializer for the type `T`.
-///
-/// To use this initializer, you will need a suitable memory location that can hold a `T`. This can
-/// be [`Box<T>`], [`Arc<T>`], [`UniqueArc<T>`] or even the stack (see [`stack_pin_init!`]). Use the
-/// [`InPlaceInit::pin_init`] function of a smart pointer like [`Arc<T>`] on this.
-///
-/// Also see the [module description](self).
-///
-/// # Safety
-///
-/// When implementing this trait you will need to take great care. Also there are probably very few
-/// cases where a manual implementation is necessary. Use [`pin_init_from_closure`] where possible.
-///
-/// The [`PinInit::__pinned_init`] function:
-/// - returns `Ok(())` if it initialized every field of `slot`,
-/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
-/// - `slot` can be deallocated without UB occurring,
-/// - `slot` does not need to be dropped,
-/// - `slot` is not partially initialized.
-/// - while constructing the `T` at `slot` it upholds the pinning invariants of `T`.
-///
-/// [`Arc<T>`]: crate::sync::Arc
-/// [`Arc::pin_init`]: crate::sync::Arc::pin_init
-#[must_use = "An initializer must be used in order to create its value."]
-pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
- /// Initializes `slot`.
- ///
- /// # Safety
- ///
- /// - `slot` is a valid pointer to uninitialized memory.
- /// - the caller does not touch `slot` when `Err` is returned, they are only permitted to
- /// deallocate.
- /// - `slot` will not move until it is dropped, i.e. it will be pinned.
- unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E>;
-
- /// First initializes the value using `self` then calls the function `f` with the initialized
- /// value.
- ///
- /// If `f` returns an error the value is dropped and the initializer will forward the error.
- ///
- /// # Examples
- ///
- /// ```rust
- /// # #![allow(clippy::disallowed_names)]
- /// use kernel::{types::Opaque, init::pin_init_from_closure};
- /// #[repr(C)]
- /// struct RawFoo([u8; 16]);
- /// extern {
- /// fn init_foo(_: *mut RawFoo);
- /// }
- ///
- /// #[pin_data]
- /// struct Foo {
- /// #[pin]
- /// raw: Opaque<RawFoo>,
- /// }
- ///
- /// impl Foo {
- /// fn setup(self: Pin<&mut Self>) {
- /// pr_info!("Setting up foo");
- /// }
- /// }
- ///
- /// let foo = pin_init!(Foo {
- /// raw <- unsafe {
- /// Opaque::ffi_init(|s| {
- /// init_foo(s);
- /// })
- /// },
- /// }).pin_chain(|foo| {
- /// foo.setup();
- /// Ok(())
- /// });
- /// ```
- fn pin_chain<F>(self, f: F) -> ChainPinInit<Self, F, T, E>
- where
- F: FnOnce(Pin<&mut T>) -> Result<(), E>,
- {
- ChainPinInit(self, f, PhantomData)
- }
-}
-
-/// An initializer returned by [`PinInit::pin_chain`].
-pub struct ChainPinInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, Box<T>)>);
-
-// SAFETY: The `__pinned_init` function is implemented such that it
-// - returns `Ok(())` on successful initialization,
-// - returns `Err(err)` on error and in this case `slot` will be dropped.
-// - considers `slot` pinned.
-unsafe impl<T: ?Sized, E, I, F> PinInit<T, E> for ChainPinInit<I, F, T, E>
-where
- I: PinInit<T, E>,
- F: FnOnce(Pin<&mut T>) -> Result<(), E>,
-{
- unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
- // SAFETY: All requirements fulfilled since this function is `__pinned_init`.
- unsafe { self.0.__pinned_init(slot)? };
- // SAFETY: The above call initialized `slot` and we still have unique access.
- let val = unsafe { &mut *slot };
- // SAFETY: `slot` is considered pinned.
- let val = unsafe { Pin::new_unchecked(val) };
- (self.1)(val).map_err(|e| {
- // SAFETY: `slot` was initialized above.
- unsafe { core::ptr::drop_in_place(slot) };
- e
- })
- }
-}
-
-/// An initializer for `T`.
-///
-/// To use this initializer, you will need a suitable memory location that can hold a `T`. This can
-/// be [`Box<T>`], [`Arc<T>`], [`UniqueArc<T>`] or even the stack (see [`stack_pin_init!`]). Use the
-/// [`InPlaceInit::init`] function of a smart pointer like [`Arc<T>`] on this. Because
-/// [`PinInit<T, E>`] is a super trait, you can use every function that takes it as well.
-///
-/// Also see the [module description](self).
-///
-/// # Safety
-///
-/// When implementing this trait you will need to take great care. Also there are probably very few
-/// cases where a manual implementation is necessary. Use [`init_from_closure`] where possible.
-///
-/// The [`Init::__init`] function:
-/// - returns `Ok(())` if it initialized every field of `slot`,
-/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
-/// - `slot` can be deallocated without UB occurring,
-/// - `slot` does not need to be dropped,
-/// - `slot` is not partially initialized.
-/// - while constructing the `T` at `slot` it upholds the pinning invariants of `T`.
-///
-/// The `__pinned_init` function from the supertrait [`PinInit`] needs to execute the exact same
-/// code as `__init`.
-///
-/// Contrary to its supertype [`PinInit<T, E>`] the caller is allowed to
-/// move the pointee after initialization.
-///
-/// [`Arc<T>`]: crate::sync::Arc
-#[must_use = "An initializer must be used in order to create its value."]
-pub unsafe trait Init<T: ?Sized, E = Infallible>: PinInit<T, E> {
- /// Initializes `slot`.
- ///
- /// # Safety
- ///
- /// - `slot` is a valid pointer to uninitialized memory.
- /// - the caller does not touch `slot` when `Err` is returned, they are only permitted to
- /// deallocate.
- unsafe fn __init(self, slot: *mut T) -> Result<(), E>;
-
- /// First initializes the value using `self` then calls the function `f` with the initialized
- /// value.
- ///
- /// If `f` returns an error the value is dropped and the initializer will forward the error.
- ///
- /// # Examples
- ///
- /// ```rust
- /// # #![allow(clippy::disallowed_names)]
- /// use kernel::{types::Opaque, init::{self, init_from_closure}};
- /// struct Foo {
- /// buf: [u8; 1_000_000],
- /// }
- ///
- /// impl Foo {
- /// fn setup(&mut self) {
- /// pr_info!("Setting up foo");
- /// }
- /// }
- ///
- /// let foo = init!(Foo {
- /// buf <- init::zeroed()
- /// }).chain(|foo| {
- /// foo.setup();
- /// Ok(())
- /// });
- /// ```
- fn chain<F>(self, f: F) -> ChainInit<Self, F, T, E>
- where
- F: FnOnce(&mut T) -> Result<(), E>,
- {
- ChainInit(self, f, PhantomData)
- }
-}
-
-/// An initializer returned by [`Init::chain`].
-pub struct ChainInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, Box<T>)>);
-
-// SAFETY: The `__init` function is implemented such that it
-// - returns `Ok(())` on successful initialization,
-// - returns `Err(err)` on error and in this case `slot` will be dropped.
-unsafe impl<T: ?Sized, E, I, F> Init<T, E> for ChainInit<I, F, T, E>
-where
- I: Init<T, E>,
- F: FnOnce(&mut T) -> Result<(), E>,
-{
- unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
- // SAFETY: All requirements fulfilled since this function is `__init`.
- unsafe { self.0.__pinned_init(slot)? };
- // SAFETY: The above call initialized `slot` and we still have unique access.
- (self.1)(unsafe { &mut *slot }).map_err(|e| {
- // SAFETY: `slot` was initialized above.
- unsafe { core::ptr::drop_in_place(slot) };
- e
- })
- }
-}
-
-// SAFETY: `__pinned_init` behaves exactly the same as `__init`.
-unsafe impl<T: ?Sized, E, I, F> PinInit<T, E> for ChainInit<I, F, T, E>
-where
- I: Init<T, E>,
- F: FnOnce(&mut T) -> Result<(), E>,
-{
- unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
- // SAFETY: `__init` has less strict requirements compared to `__pinned_init`.
- unsafe { self.__init(slot) }
- }
-}
-
-/// Creates a new [`PinInit<T, E>`] from the given closure.
-///
-/// # Safety
-///
-/// The closure:
-/// - returns `Ok(())` if it initialized every field of `slot`,
-/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
-/// - `slot` can be deallocated without UB occurring,
-/// - `slot` does not need to be dropped,
-/// - `slot` is not partially initialized.
-/// - may assume that the `slot` does not move if `T: !Unpin`,
-/// - while constructing the `T` at `slot` it upholds the pinning invariants of `T`.
-#[inline]
-pub const unsafe fn pin_init_from_closure<T: ?Sized, E>(
- f: impl FnOnce(*mut T) -> Result<(), E>,
-) -> impl PinInit<T, E> {
- __internal::InitClosure(f, PhantomData)
-}
-
-/// Creates a new [`Init<T, E>`] from the given closure.
-///
-/// # Safety
-///
-/// The closure:
-/// - returns `Ok(())` if it initialized every field of `slot`,
-/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
-/// - `slot` can be deallocated without UB occurring,
-/// - `slot` does not need to be dropped,
-/// - `slot` is not partially initialized.
-/// - the `slot` may move after initialization.
-/// - while constructing the `T` at `slot` it upholds the pinning invariants of `T`.
-#[inline]
-pub const unsafe fn init_from_closure<T: ?Sized, E>(
- f: impl FnOnce(*mut T) -> Result<(), E>,
-) -> impl Init<T, E> {
- __internal::InitClosure(f, PhantomData)
-}
-
-/// An initializer that leaves the memory uninitialized.
-///
-/// The initializer is a no-op. The `slot` memory is not changed.
-#[inline]
-pub fn uninit<T, E>() -> impl Init<MaybeUninit<T>, E> {
- // SAFETY: The memory is allowed to be uninitialized.
- unsafe { init_from_closure(|_| Ok(())) }
-}
-
-/// Initializes an array by initializing each element via the provided initializer.
-///
-/// # Examples
-///
-/// ```rust
-/// use kernel::{error::Error, init::init_array_from_fn};
-/// let array: Box<[usize; 1_000]> = Box::init::<Error>(init_array_from_fn(|i| i)).unwrap();
-/// assert_eq!(array.len(), 1_000);
-/// ```
-pub fn init_array_from_fn<I, const N: usize, T, E>(
- mut make_init: impl FnMut(usize) -> I,
-) -> impl Init<[T; N], E>
-where
- I: Init<T, E>,
-{
- let init = move |slot: *mut [T; N]| {
- let slot = slot.cast::<T>();
- // Counts the number of initialized elements and when dropped drops that many elements from
- // `slot`.
- let mut init_count = ScopeGuard::new_with_data(0, |i| {
- // We now free every element that has been initialized before.
- // SAFETY: The loop initialized exactly the values from 0..i and since we
- // return `Err` below, the caller will consider the memory at `slot` as
- // uninitialized.
- unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(slot, i)) };
- });
- for i in 0..N {
- let init = make_init(i);
- // SAFETY: Since 0 <= `i` < N, it is still in bounds of `[T; N]`.
- let ptr = unsafe { slot.add(i) };
- // SAFETY: The pointer is derived from `slot` and thus satisfies the `__init`
- // requirements.
- unsafe { init.__init(ptr) }?;
- *init_count += 1;
- }
- init_count.dismiss();
- Ok(())
- };
- // SAFETY: The initializer above initializes every element of the array. On failure it drops
- // any initialized elements and returns `Err`.
- unsafe { init_from_closure(init) }
-}
-
-/// Initializes an array by initializing each element via the provided initializer.
-///
-/// # Examples
-///
-/// ```rust
-/// use kernel::{sync::{Arc, Mutex}, init::pin_init_array_from_fn, new_mutex};
-/// let array: Arc<[Mutex<usize>; 1_000]> =
-/// Arc::pin_init(pin_init_array_from_fn(|i| new_mutex!(i))).unwrap();
-/// assert_eq!(array.len(), 1_000);
-/// ```
-pub fn pin_init_array_from_fn<I, const N: usize, T, E>(
- mut make_init: impl FnMut(usize) -> I,
-) -> impl PinInit<[T; N], E>
-where
- I: PinInit<T, E>,
-{
- let init = move |slot: *mut [T; N]| {
- let slot = slot.cast::<T>();
- // Counts the number of initialized elements and when dropped drops that many elements from
- // `slot`.
- let mut init_count = ScopeGuard::new_with_data(0, |i| {
- // We now free every element that has been initialized before.
- // SAFETY: The loop initialized exactly the values from 0..i and since we
- // return `Err` below, the caller will consider the memory at `slot` as
- // uninitialized.
- unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(slot, i)) };
- });
- for i in 0..N {
- let init = make_init(i);
- // SAFETY: Since 0 <= `i` < N, it is still in bounds of `[T; N]`.
- let ptr = unsafe { slot.add(i) };
- // SAFETY: The pointer is derived from `slot` and thus satisfies the `__init`
- // requirements.
- unsafe { init.__pinned_init(ptr) }?;
- *init_count += 1;
- }
- init_count.dismiss();
- Ok(())
+ ::pin_init::try_pin_init!($(&$this in)? $t $(::<$($generics),* $(,)?>)? {
+ $($fields)*
+ }? $err)
};
- // SAFETY: The initializer above initializes every element of the array. On failure it drops
- // any initialized elements and returns `Err`.
- unsafe { pin_init_from_closure(init) }
}
-
-// SAFETY: Every type can be initialized by-value.
-unsafe impl<T, E> Init<T, E> for T {
- unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
- unsafe { slot.write(self) };
- Ok(())
- }
-}
-
-// SAFETY: Every type can be initialized by-value. `__pinned_init` calls `__init`.
-unsafe impl<T, E> PinInit<T, E> for T {
- unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
- unsafe { self.__init(slot) }
- }
-}
-
-/// Smart pointer that can initialize memory in-place.
-pub trait InPlaceInit<T>: Sized {
- /// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
- /// type.
- ///
- /// If `T: !Unpin` it will not be able to move afterwards.
- fn try_pin_init<E>(init: impl PinInit<T, E>) -> Result<Pin<Self>, E>
- where
- E: From<AllocError>;
-
- /// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
- /// type.
- ///
- /// If `T: !Unpin` it will not be able to move afterwards.
- fn pin_init<E>(init: impl PinInit<T, E>) -> error::Result<Pin<Self>>
- where
- Error: From<E>,
- {
- // SAFETY: We delegate to `init` and only change the error type.
- let init = unsafe {
- pin_init_from_closure(|slot| init.__pinned_init(slot).map_err(|e| Error::from(e)))
- };
- Self::try_pin_init(init)
- }
-
- /// Use the given initializer to in-place initialize a `T`.
- fn try_init<E>(init: impl Init<T, E>) -> Result<Self, E>
- where
- E: From<AllocError>;
-
- /// Use the given initializer to in-place initialize a `T`.
- fn init<E>(init: impl Init<T, E>) -> error::Result<Self>
- where
- Error: From<E>,
- {
- // SAFETY: We delegate to `init` and only change the error type.
- let init = unsafe {
- init_from_closure(|slot| init.__pinned_init(slot).map_err(|e| Error::from(e)))
- };
- Self::try_init(init)
- }
-}
-
-impl<T> InPlaceInit<T> for Box<T> {
- #[inline]
- fn try_pin_init<E>(init: impl PinInit<T, E>) -> Result<Pin<Self>, E>
- where
- E: From<AllocError>,
- {
- let mut this = Box::try_new_uninit()?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid and will not be moved, because we pin it later.
- unsafe { init.__pinned_init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() }.into())
- }
-
- #[inline]
- fn try_init<E>(init: impl Init<T, E>) -> Result<Self, E>
- where
- E: From<AllocError>,
- {
- let mut this = Box::try_new_uninit()?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid.
- unsafe { init.__init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() })
- }
-}
-
-impl<T> InPlaceInit<T> for UniqueArc<T> {
- #[inline]
- fn try_pin_init<E>(init: impl PinInit<T, E>) -> Result<Pin<Self>, E>
- where
- E: From<AllocError>,
- {
- let mut this = UniqueArc::try_new_uninit()?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid and will not be moved, because we pin it later.
- unsafe { init.__pinned_init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() }.into())
- }
-
- #[inline]
- fn try_init<E>(init: impl Init<T, E>) -> Result<Self, E>
- where
- E: From<AllocError>,
- {
- let mut this = UniqueArc::try_new_uninit()?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid.
- unsafe { init.__init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() })
- }
-}
-
-/// Trait facilitating pinned destruction.
-///
-/// Use [`pinned_drop`] to implement this trait safely:
-///
-/// ```rust
-/// # use kernel::sync::Mutex;
-/// use kernel::macros::pinned_drop;
-/// use core::pin::Pin;
-/// #[pin_data(PinnedDrop)]
-/// struct Foo {
-/// #[pin]
-/// mtx: Mutex<usize>,
-/// }
-///
-/// #[pinned_drop]
-/// impl PinnedDrop for Foo {
-/// fn drop(self: Pin<&mut Self>) {
-/// pr_info!("Foo is being dropped!");
-/// }
-/// }
-/// ```
-///
-/// # Safety
-///
-/// This trait must be implemented via the [`pinned_drop`] proc-macro attribute on the impl.
-///
-/// [`pinned_drop`]: kernel::macros::pinned_drop
-pub unsafe trait PinnedDrop: __internal::HasPinData {
- /// Executes the pinned destructor of this type.
- ///
- /// While this function is marked safe, it is actually unsafe to call it manually. For this
- /// reason it takes an additional parameter. This type can only be constructed by `unsafe` code
- /// and thus prevents this function from being called where it should not.
- ///
- /// This extra parameter will be generated by the `#[pinned_drop]` proc-macro attribute
- /// automatically.
- fn drop(self: Pin<&mut Self>, only_call_from_drop: __internal::OnlyCallFromDrop);
-}
-
-/// Marker trait for types that can be initialized by writing just zeroes.
-///
-/// # Safety
-///
-/// The bit pattern consisting of only zeroes is a valid bit pattern for this type. In other words,
-/// this is not UB:
-///
-/// ```rust,ignore
-/// let val: Self = unsafe { core::mem::zeroed() };
-/// ```
-pub unsafe trait Zeroable {}
-
-/// Create a new zeroed T.
-///
-/// The returned initializer will write `0x00` to every byte of the given `slot`.
-#[inline]
-pub fn zeroed<T: Zeroable>() -> impl Init<T> {
- // SAFETY: Because `T: Zeroable`, all bytes zero is a valid bit pattern for `T`
- // and because we write all zeroes, the memory is initialized.
- unsafe {
- init_from_closure(|slot: *mut T| {
- slot.write_bytes(0, 1);
- Ok(())
- })
- }
-}
-
-macro_rules! impl_zeroable {
- ($($({$($generics:tt)*})? $t:ty, )*) => {
- $(unsafe impl$($($generics)*)? Zeroable for $t {})*
- };
-}
-
-impl_zeroable! {
- // SAFETY: All primitives that are allowed to be zero.
- bool,
- char,
- u8, u16, u32, u64, u128, usize,
- i8, i16, i32, i64, i128, isize,
- f32, f64,
-
- // Note: do not add uninhabited types (such as `!` or `core::convert::Infallible`) to this list;
- // creating an instance of an uninhabited type is immediate undefined behavior. For more on
- // uninhabited/empty types, consult The Rustonomicon:
- // <https://doc.rust-lang.org/stable/nomicon/exotic-sizes.html#empty-types>. The Rust Reference
- // also has information on undefined behavior:
- // <https://doc.rust-lang.org/stable/reference/behavior-considered-undefined.html>.
- //
- // SAFETY: These are inhabited ZSTs; there is nothing to zero and a valid value exists.
- {<T: ?Sized>} PhantomData<T>, core::marker::PhantomPinned, (),
-
- // SAFETY: Type is allowed to take any value, including all zeros.
- {<T>} MaybeUninit<T>,
- // SAFETY: Type is allowed to take any value, including all zeros.
- {<T>} Opaque<T>,
-
- // SAFETY: `T: Zeroable` and `UnsafeCell` is `repr(transparent)`.
- {<T: ?Sized + Zeroable>} UnsafeCell<T>,
-
- // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee).
- Option<NonZeroU8>, Option<NonZeroU16>, Option<NonZeroU32>, Option<NonZeroU64>,
- Option<NonZeroU128>, Option<NonZeroUsize>,
- Option<NonZeroI8>, Option<NonZeroI16>, Option<NonZeroI32>, Option<NonZeroI64>,
- Option<NonZeroI128>, Option<NonZeroIsize>,
-
- // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee).
- //
- // In this case we are allowed to use `T: ?Sized`, since all zeros is the `None` variant.
- {<T: ?Sized>} Option<NonNull<T>>,
- {<T: ?Sized>} Option<Box<T>>,
-
- // SAFETY: `null` pointer is valid.
- //
- // We cannot use `T: ?Sized`, since the VTABLE pointer part of fat pointers is not allowed to be
- // null.
- //
- // When `Pointee` gets stabilized, we could use
- // `T: ?Sized where <T as Pointee>::Metadata: Zeroable`
- {<T>} *mut T, {<T>} *const T,
-
- // SAFETY: `null` pointer is valid and the metadata part of these fat pointers is allowed to be
- // zero.
- {<T>} *mut [T], {<T>} *const [T], *mut str, *const str,
-
- // SAFETY: `T` is `Zeroable`.
- {<const N: usize, T: Zeroable>} [T; N], {<T: Zeroable>} Wrapping<T>,
-}
-
-macro_rules! impl_tuple_zeroable {
- ($(,)?) => {};
- ($first:ident, $($t:ident),* $(,)?) => {
- // SAFETY: All elements are zeroable and padding can be zero.
- unsafe impl<$first: Zeroable, $($t: Zeroable),*> Zeroable for ($first, $($t),*) {}
- impl_tuple_zeroable!($($t),* ,);
- }
-}
-
-impl_tuple_zeroable!(A, B, C, D, E, F, G, H, I, J);
diff --git a/rust/kernel/io.rs b/rust/kernel/io.rs
new file mode 100644
index 000000000000..72d80a6f131e
--- /dev/null
+++ b/rust/kernel/io.rs
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Memory-mapped IO.
+//!
+//! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
+
+use crate::error::{code::EINVAL, Result};
+use crate::{bindings, build_assert};
+
+/// Raw representation of an MMIO region.
+///
+/// By itself, the existence of an instance of this structure does not provide any guarantees that
+/// the represented MMIO region does exist or is properly mapped.
+///
+/// Instead, the bus specific MMIO implementation must convert this raw representation into an `Io`
+/// instance providing the actual memory accessors. Only by the conversion into an `Io` structure
+/// any guarantees are given.
+pub struct IoRaw<const SIZE: usize = 0> {
+ addr: usize,
+ maxsize: usize,
+}
+
+impl<const SIZE: usize> IoRaw<SIZE> {
+ /// Returns a new `IoRaw` instance on success, an error otherwise.
+ pub fn new(addr: usize, maxsize: usize) -> Result<Self> {
+ if maxsize < SIZE {
+ return Err(EINVAL);
+ }
+
+ Ok(Self { addr, maxsize })
+ }
+
+ /// Returns the base address of the MMIO region.
+ #[inline]
+ pub fn addr(&self) -> usize {
+ self.addr
+ }
+
+ /// Returns the maximum size of the MMIO region.
+ #[inline]
+ pub fn maxsize(&self) -> usize {
+ self.maxsize
+ }
+}
+
+/// IO-mapped memory, starting at the base address @addr and spanning @maxlen bytes.
+///
+/// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
+/// mapping, performing an additional region request etc.
+///
+/// # Invariant
+///
+/// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size
+/// `maxsize`.
+///
+/// # Examples
+///
+/// ```no_run
+/// # use kernel::{bindings, io::{Io, IoRaw}};
+/// # use core::ops::Deref;
+///
+/// // See also [`pci::Bar`] for a real example.
+/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
+///
+/// impl<const SIZE: usize> IoMem<SIZE> {
+/// /// # Safety
+/// ///
+/// /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
+/// /// virtual address space.
+/// unsafe fn new(paddr: usize) -> Result<Self>{
+/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
+/// // valid for `ioremap`.
+/// let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) };
+/// if addr.is_null() {
+/// return Err(ENOMEM);
+/// }
+///
+/// Ok(IoMem(IoRaw::new(addr as _, SIZE)?))
+/// }
+/// }
+///
+/// impl<const SIZE: usize> Drop for IoMem<SIZE> {
+/// fn drop(&mut self) {
+/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
+/// unsafe { bindings::iounmap(self.0.addr() as _); };
+/// }
+/// }
+///
+/// impl<const SIZE: usize> Deref for IoMem<SIZE> {
+/// type Target = Io<SIZE>;
+///
+/// fn deref(&self) -> &Self::Target {
+/// // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
+/// unsafe { Io::from_raw(&self.0) }
+/// }
+/// }
+///
+///# fn no_run() -> Result<(), Error> {
+/// // SAFETY: Invalid usage for example purposes.
+/// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
+/// iomem.write32(0x42, 0x0);
+/// assert!(iomem.try_write32(0x42, 0x0).is_ok());
+/// assert!(iomem.try_write32(0x42, 0x4).is_err());
+/// # Ok(())
+/// # }
+/// ```
+#[repr(transparent)]
+pub struct Io<const SIZE: usize = 0>(IoRaw<SIZE>);
+
+macro_rules! define_read {
+ ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident -> $type_name:ty) => {
+ /// Read IO data from a given offset known at compile time.
+ ///
+ /// Bound checks are performed on compile time, hence if the offset is not known at compile
+ /// time, the build will fail.
+ $(#[$attr])*
+ #[inline]
+ pub fn $name(&self, offset: usize) -> $type_name {
+ let addr = self.io_addr_assert::<$type_name>(offset);
+
+ // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
+ unsafe { bindings::$c_fn(addr as _) }
+ }
+
+ /// Read IO data from a given offset.
+ ///
+ /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
+ /// out of bounds.
+ $(#[$attr])*
+ pub fn $try_name(&self, offset: usize) -> Result<$type_name> {
+ let addr = self.io_addr::<$type_name>(offset)?;
+
+ // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
+ Ok(unsafe { bindings::$c_fn(addr as _) })
+ }
+ };
+}
+
+macro_rules! define_write {
+ ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident <- $type_name:ty) => {
+ /// Write IO data from a given offset known at compile time.
+ ///
+ /// Bound checks are performed on compile time, hence if the offset is not known at compile
+ /// time, the build will fail.
+ $(#[$attr])*
+ #[inline]
+ pub fn $name(&self, value: $type_name, offset: usize) {
+ let addr = self.io_addr_assert::<$type_name>(offset);
+
+ // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
+ unsafe { bindings::$c_fn(value, addr as _, ) }
+ }
+
+ /// Write IO data from a given offset.
+ ///
+ /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
+ /// out of bounds.
+ $(#[$attr])*
+ pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {
+ let addr = self.io_addr::<$type_name>(offset)?;
+
+ // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
+ unsafe { bindings::$c_fn(value, addr as _) }
+ Ok(())
+ }
+ };
+}
+
+impl<const SIZE: usize> Io<SIZE> {
+ /// Converts an `IoRaw` into an `Io` instance, providing the accessors to the MMIO mapping.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
+ /// `maxsize`.
+ pub unsafe fn from_raw(raw: &IoRaw<SIZE>) -> &Self {
+ // SAFETY: `Io` is a transparent wrapper around `IoRaw`.
+ unsafe { &*core::ptr::from_ref(raw).cast() }
+ }
+
+ /// Returns the base address of this mapping.
+ #[inline]
+ pub fn addr(&self) -> usize {
+ self.0.addr()
+ }
+
+ /// Returns the maximum size of this mapping.
+ #[inline]
+ pub fn maxsize(&self) -> usize {
+ self.0.maxsize()
+ }
+
+ #[inline]
+ const fn offset_valid<U>(offset: usize, size: usize) -> bool {
+ let type_size = core::mem::size_of::<U>();
+ if let Some(end) = offset.checked_add(type_size) {
+ end <= size && offset % type_size == 0
+ } else {
+ false
+ }
+ }
+
+ #[inline]
+ fn io_addr<U>(&self, offset: usize) -> Result<usize> {
+ if !Self::offset_valid::<U>(offset, self.maxsize()) {
+ return Err(EINVAL);
+ }
+
+ // Probably no need to check, since the safety requirements of `Self::new` guarantee that
+ // this can't overflow.
+ self.addr().checked_add(offset).ok_or(EINVAL)
+ }
+
+ #[inline]
+ fn io_addr_assert<U>(&self, offset: usize) -> usize {
+ build_assert!(Self::offset_valid::<U>(offset, SIZE));
+
+ self.addr() + offset
+ }
+
+ define_read!(read8, try_read8, readb -> u8);
+ define_read!(read16, try_read16, readw -> u16);
+ define_read!(read32, try_read32, readl -> u32);
+ define_read!(
+ #[cfg(CONFIG_64BIT)]
+ read64,
+ try_read64,
+ readq -> u64
+ );
+
+ define_read!(read8_relaxed, try_read8_relaxed, readb_relaxed -> u8);
+ define_read!(read16_relaxed, try_read16_relaxed, readw_relaxed -> u16);
+ define_read!(read32_relaxed, try_read32_relaxed, readl_relaxed -> u32);
+ define_read!(
+ #[cfg(CONFIG_64BIT)]
+ read64_relaxed,
+ try_read64_relaxed,
+ readq_relaxed -> u64
+ );
+
+ define_write!(write8, try_write8, writeb <- u8);
+ define_write!(write16, try_write16, writew <- u16);
+ define_write!(write32, try_write32, writel <- u32);
+ define_write!(
+ #[cfg(CONFIG_64BIT)]
+ write64,
+ try_write64,
+ writeq <- u64
+ );
+
+ define_write!(write8_relaxed, try_write8_relaxed, writeb_relaxed <- u8);
+ define_write!(write16_relaxed, try_write16_relaxed, writew_relaxed <- u16);
+ define_write!(write32_relaxed, try_write32_relaxed, writel_relaxed <- u32);
+ define_write!(
+ #[cfg(CONFIG_64BIT)]
+ write64_relaxed,
+ try_write64_relaxed,
+ writeq_relaxed <- u64
+ );
+}
diff --git a/rust/kernel/ioctl.rs b/rust/kernel/ioctl.rs
index cfa7d080b531..2fc7662339e5 100644
--- a/rust/kernel/ioctl.rs
+++ b/rust/kernel/ioctl.rs
@@ -4,7 +4,7 @@
//!
//! C header: [`include/asm-generic/ioctl.h`](srctree/include/asm-generic/ioctl.h)
-#![allow(non_snake_case)]
+#![expect(non_snake_case)]
use crate::build_assert;
diff --git a/rust/kernel/jump_label.rs b/rust/kernel/jump_label.rs
new file mode 100644
index 000000000000..4e974c768dbd
--- /dev/null
+++ b/rust/kernel/jump_label.rs
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Logic for static keys.
+//!
+//! C header: [`include/linux/jump_label.h`](srctree/include/linux/jump_label.h).
+
+/// Branch based on a static key.
+///
+/// Takes three arguments:
+///
+/// * `key` - the path to the static variable containing the `static_key`.
+/// * `keytyp` - the type of `key`.
+/// * `field` - the name of the field of `key` that contains the `static_key`.
+///
+/// # Safety
+///
+/// The macro must be used with a real static key defined by C.
+#[macro_export]
+macro_rules! static_branch_unlikely {
+ ($key:path, $keytyp:ty, $field:ident) => {{
+ let _key: *const $keytyp = ::core::ptr::addr_of!($key);
+ let _key: *const $crate::bindings::static_key_false = ::core::ptr::addr_of!((*_key).$field);
+ let _key: *const $crate::bindings::static_key = _key.cast();
+
+ #[cfg(not(CONFIG_JUMP_LABEL))]
+ {
+ $crate::bindings::static_key_count(_key.cast_mut()) > 0
+ }
+
+ #[cfg(CONFIG_JUMP_LABEL)]
+ $crate::jump_label::arch_static_branch! { $key, $keytyp, $field, false }
+ }};
+}
+pub use static_branch_unlikely;
+
+/// Assert that the assembly block evaluates to a string literal.
+#[cfg(CONFIG_JUMP_LABEL)]
+const _: &str = include!(concat!(
+ env!("OBJTREE"),
+ "/rust/kernel/generated_arch_static_branch_asm.rs"
+));
+
+#[macro_export]
+#[doc(hidden)]
+#[cfg(CONFIG_JUMP_LABEL)]
+macro_rules! arch_static_branch {
+ ($key:path, $keytyp:ty, $field:ident, $branch:expr) => {'my_label: {
+ $crate::asm!(
+ include!(concat!(env!("OBJTREE"), "/rust/kernel/generated_arch_static_branch_asm.rs"));
+ l_yes = label {
+ break 'my_label true;
+ },
+ symb = sym $key,
+ off = const ::core::mem::offset_of!($keytyp, $field),
+ branch = const $crate::jump_label::bool_to_int($branch),
+ );
+
+ break 'my_label false;
+ }};
+}
+
+#[cfg(CONFIG_JUMP_LABEL)]
+pub use arch_static_branch;
+
+/// A helper used by inline assembly to pass a boolean to as a `const` parameter.
+///
+/// Using this function instead of a cast lets you assert that the input is a boolean, and not some
+/// other type that can also be cast to an integer.
+#[doc(hidden)]
+pub const fn bool_to_int(b: bool) -> i32 {
+ b as i32
+}
diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs
index 0ba77276ae7e..1604fb6a5b1b 100644
--- a/rust/kernel/kunit.rs
+++ b/rust/kernel/kunit.rs
@@ -18,7 +18,7 @@ pub fn err(args: fmt::Arguments<'_>) {
#[cfg(CONFIG_PRINTK)]
unsafe {
bindings::_printk(
- b"\x013%pA\0".as_ptr() as _,
+ c"\x013%pA".as_ptr() as _,
&args as *const _ as *const c_void,
);
}
@@ -34,12 +34,14 @@ pub fn info(args: fmt::Arguments<'_>) {
#[cfg(CONFIG_PRINTK)]
unsafe {
bindings::_printk(
- b"\x016%pA\0".as_ptr() as _,
+ c"\x016%pA".as_ptr() as _,
&args as *const _ as *const c_void,
);
}
}
+use macros::kunit_tests;
+
/// Asserts that a boolean expression is `true` at runtime.
///
/// Public but hidden since it should only be used from generated tests.
@@ -161,3 +163,172 @@ macro_rules! kunit_assert_eq {
$crate::kunit_assert!($name, $file, $diff, $left == $right);
}};
}
+
+/// Represents an individual test case.
+///
+/// The [`kunit_unsafe_test_suite!`] macro expects a NULL-terminated list of valid test cases.
+/// Use [`kunit_case_null`] to generate such a delimiter.
+#[doc(hidden)]
+pub const fn kunit_case(
+ name: &'static kernel::str::CStr,
+ run_case: unsafe extern "C" fn(*mut kernel::bindings::kunit),
+) -> kernel::bindings::kunit_case {
+ kernel::bindings::kunit_case {
+ run_case: Some(run_case),
+ name: name.as_char_ptr(),
+ attr: kernel::bindings::kunit_attributes {
+ speed: kernel::bindings::kunit_speed_KUNIT_SPEED_NORMAL,
+ },
+ generate_params: None,
+ status: kernel::bindings::kunit_status_KUNIT_SUCCESS,
+ module_name: core::ptr::null_mut(),
+ log: core::ptr::null_mut(),
+ }
+}
+
+/// Represents the NULL test case delimiter.
+///
+/// The [`kunit_unsafe_test_suite!`] macro expects a NULL-terminated list of test cases. This
+/// function returns such a delimiter.
+#[doc(hidden)]
+pub const fn kunit_case_null() -> kernel::bindings::kunit_case {
+ kernel::bindings::kunit_case {
+ run_case: None,
+ name: core::ptr::null_mut(),
+ generate_params: None,
+ attr: kernel::bindings::kunit_attributes {
+ speed: kernel::bindings::kunit_speed_KUNIT_SPEED_NORMAL,
+ },
+ status: kernel::bindings::kunit_status_KUNIT_SUCCESS,
+ module_name: core::ptr::null_mut(),
+ log: core::ptr::null_mut(),
+ }
+}
+
+/// Registers a KUnit test suite.
+///
+/// # Safety
+///
+/// `test_cases` must be a NULL terminated array of valid test cases,
+/// whose lifetime is at least that of the test suite (i.e., static).
+///
+/// # Examples
+///
+/// ```ignore
+/// extern "C" fn test_fn(_test: *mut kernel::bindings::kunit) {
+/// let actual = 1 + 1;
+/// let expected = 2;
+/// assert_eq!(actual, expected);
+/// }
+///
+/// static mut KUNIT_TEST_CASES: [kernel::bindings::kunit_case; 2] = [
+/// kernel::kunit::kunit_case(kernel::c_str!("name"), test_fn),
+/// kernel::kunit::kunit_case_null(),
+/// ];
+/// kernel::kunit_unsafe_test_suite!(suite_name, KUNIT_TEST_CASES);
+/// ```
+#[doc(hidden)]
+#[macro_export]
+macro_rules! kunit_unsafe_test_suite {
+ ($name:ident, $test_cases:ident) => {
+ const _: () = {
+ const KUNIT_TEST_SUITE_NAME: [::kernel::ffi::c_char; 256] = {
+ let name_u8 = ::core::stringify!($name).as_bytes();
+ let mut ret = [0; 256];
+
+ if name_u8.len() > 255 {
+ panic!(concat!(
+ "The test suite name `",
+ ::core::stringify!($name),
+ "` exceeds the maximum length of 255 bytes."
+ ));
+ }
+
+ let mut i = 0;
+ while i < name_u8.len() {
+ ret[i] = name_u8[i] as ::kernel::ffi::c_char;
+ i += 1;
+ }
+
+ ret
+ };
+
+ static mut KUNIT_TEST_SUITE: ::kernel::bindings::kunit_suite =
+ ::kernel::bindings::kunit_suite {
+ name: KUNIT_TEST_SUITE_NAME,
+ #[allow(unused_unsafe)]
+ // SAFETY: `$test_cases` is passed in by the user, and
+ // (as documented) must be valid for the lifetime of
+ // the suite (i.e., static).
+ test_cases: unsafe {
+ ::core::ptr::addr_of_mut!($test_cases)
+ .cast::<::kernel::bindings::kunit_case>()
+ },
+ suite_init: None,
+ suite_exit: None,
+ init: None,
+ exit: None,
+ attr: ::kernel::bindings::kunit_attributes {
+ speed: ::kernel::bindings::kunit_speed_KUNIT_SPEED_NORMAL,
+ },
+ status_comment: [0; 256usize],
+ debugfs: ::core::ptr::null_mut(),
+ log: ::core::ptr::null_mut(),
+ suite_init_err: 0,
+ is_init: false,
+ };
+
+ #[used]
+ #[allow(unused_unsafe)]
+ #[cfg_attr(not(target_os = "macos"), link_section = ".kunit_test_suites")]
+ static mut KUNIT_TEST_SUITE_ENTRY: *const ::kernel::bindings::kunit_suite =
+ // SAFETY: `KUNIT_TEST_SUITE` is static.
+ unsafe { ::core::ptr::addr_of_mut!(KUNIT_TEST_SUITE) };
+ };
+ };
+}
+
+/// Returns whether we are currently running a KUnit test.
+///
+/// In some cases, you need to call test-only code from outside the test case, for example, to
+/// create a function mock. This function allows to change behavior depending on whether we are
+/// currently running a KUnit test or not.
+///
+/// # Examples
+///
+/// This example shows how a function can be mocked to return a well-known value while testing:
+///
+/// ```
+/// # use kernel::kunit::in_kunit_test;
+/// fn fn_mock_example(n: i32) -> i32 {
+/// if in_kunit_test() {
+/// return 100;
+/// }
+///
+/// n + 1
+/// }
+///
+/// let mock_res = fn_mock_example(5);
+/// assert_eq!(mock_res, 100);
+/// ```
+pub fn in_kunit_test() -> bool {
+ // SAFETY: `kunit_get_current_test()` is always safe to call (it has fallbacks for
+ // when KUnit is not enabled).
+ !unsafe { bindings::kunit_get_current_test() }.is_null()
+}
+
+#[kunit_tests(rust_kernel_kunit)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn rust_test_kunit_example_test() {
+ #![expect(clippy::eq_op)]
+ assert_eq!(1 + 1, 2);
+ }
+
+ #[test]
+ fn rust_test_kunit_in_kunit_test() {
+ assert!(in_kunit_test());
+ }
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 6858e2f8a3ed..911c72a0fc21 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -6,19 +6,26 @@
//! usage by Rust code in the kernel and is shared by all of them.
//!
//! In other words, all the rest of the Rust code in the kernel (e.g. kernel
-//! modules written in Rust) depends on [`core`], [`alloc`] and this crate.
+//! modules written in Rust) depends on [`core`] and this crate.
//!
//! If you need a kernel C API that is not ported or wrapped yet here, then
//! do so first instead of bypassing this crate.
#![no_std]
-#![feature(allocator_api)]
-#![feature(coerce_unsized)]
-#![feature(dispatch_from_dyn)]
-#![feature(new_uninit)]
-#![feature(offset_of)]
-#![feature(receiver_trait)]
-#![feature(unsize)]
+#![feature(arbitrary_self_types)]
+#![cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, feature(derive_coerce_pointee))]
+#![cfg_attr(not(CONFIG_RUSTC_HAS_COERCE_POINTEE), feature(coerce_unsized))]
+#![cfg_attr(not(CONFIG_RUSTC_HAS_COERCE_POINTEE), feature(dispatch_from_dyn))]
+#![cfg_attr(not(CONFIG_RUSTC_HAS_COERCE_POINTEE), feature(unsize))]
+#![feature(inline_const)]
+#![feature(lint_reasons)]
+// Stable in Rust 1.82
+#![feature(raw_ref_op)]
+// Stable in Rust 1.83
+#![feature(const_maybe_uninit_as_mut_ptr)]
+#![feature(const_mut_refs)]
+#![feature(const_ptr_write)]
+#![feature(const_refs_to_cell)]
// Ensure conditional compilation based on the kernel configuration works;
// otherwise we may silently break things like initcall handling.
@@ -28,19 +35,61 @@ compile_error!("Missing kernel configuration for conditional compilation");
// Allow proc-macros to refer to `::kernel` inside the `kernel` crate (this crate).
extern crate self as kernel;
-#[cfg(not(test))]
-#[cfg(not(testlib))]
-mod allocator;
-mod build_assert;
+pub use ffi;
+
+pub mod alloc;
+#[cfg(CONFIG_AUXILIARY_BUS)]
+pub mod auxiliary;
+#[cfg(CONFIG_BLOCK)]
+pub mod block;
+#[doc(hidden)]
+pub mod build_assert;
+pub mod clk;
+#[cfg(CONFIG_CONFIGFS_FS)]
+pub mod configfs;
+pub mod cpu;
+#[cfg(CONFIG_CPU_FREQ)]
+pub mod cpufreq;
+pub mod cpumask;
+pub mod cred;
+pub mod device;
+pub mod device_id;
+pub mod devres;
+pub mod dma;
+pub mod driver;
+#[cfg(CONFIG_DRM = "y")]
+pub mod drm;
pub mod error;
+pub mod faux;
+#[cfg(CONFIG_RUST_FW_LOADER_ABSTRACTIONS)]
+pub mod firmware;
+pub mod fs;
pub mod init;
+pub mod io;
pub mod ioctl;
+pub mod jump_label;
#[cfg(CONFIG_KUNIT)]
pub mod kunit;
+pub mod list;
+pub mod miscdevice;
+pub mod mm;
#[cfg(CONFIG_NET)]
pub mod net;
+pub mod of;
+#[cfg(CONFIG_PM_OPP)]
+pub mod opp;
+pub mod page;
+#[cfg(CONFIG_PCI)]
+pub mod pci;
+pub mod pid_namespace;
+pub mod platform;
pub mod prelude;
pub mod print;
+pub mod rbtree;
+pub mod revocable;
+pub mod security;
+pub mod seq_file;
+pub mod sizes;
mod static_assert;
#[doc(hidden)]
pub mod std_vendor;
@@ -48,7 +97,10 @@ pub mod str;
pub mod sync;
pub mod task;
pub mod time;
+pub mod tracepoint;
+pub mod transmute;
pub mod types;
+pub mod uaccess;
pub mod workqueue;
#[doc(hidden)]
@@ -56,9 +108,6 @@ pub use bindings;
pub use macros;
pub use uapi;
-#[doc(hidden)]
-pub use build_error::build_error;
-
/// Prefix to appear before log messages printed from within the `kernel` crate.
const __LOG_PREFIX: &[u8] = b"rust_kernel\0";
@@ -75,9 +124,38 @@ pub trait Module: Sized + Sync + Send {
fn init(module: &'static ThisModule) -> error::Result<Self>;
}
+/// A module that is pinned and initialised in-place.
+pub trait InPlaceModule: Sync + Send {
+ /// Creates an initialiser for the module.
+ ///
+ /// It is called when the module is loaded.
+ fn init(module: &'static ThisModule) -> impl pin_init::PinInit<Self, error::Error>;
+}
+
+impl<T: Module> InPlaceModule for T {
+ fn init(module: &'static ThisModule) -> impl pin_init::PinInit<Self, error::Error> {
+ let initer = move |slot: *mut Self| {
+ let m = <Self as Module>::init(module)?;
+
+ // SAFETY: `slot` is valid for write per the contract with `pin_init_from_closure`.
+ unsafe { slot.write(m) };
+ Ok(())
+ };
+
+ // SAFETY: On success, `initer` always fully initialises an instance of `Self`.
+ unsafe { pin_init::pin_init_from_closure(initer) }
+ }
+}
+
+/// Metadata attached to a [`Module`] or [`InPlaceModule`].
+pub trait ModuleMetadata {
+ /// The name of the module as specified in the `module!` macro.
+ const NAME: &'static crate::str::CStr;
+}
+
/// Equivalent to `THIS_MODULE` in the C API.
///
-/// C header: [`include/linux/export.h`](srctree/include/linux/export.h)
+/// C header: [`include/linux/init.h`](srctree/include/linux/init.h)
pub struct ThisModule(*mut bindings::module);
// SAFETY: `THIS_MODULE` may be used from all threads within a module.
@@ -92,6 +170,13 @@ impl ThisModule {
pub const unsafe fn from_ptr(ptr: *mut bindings::module) -> ThisModule {
ThisModule(ptr)
}
+
+ /// Access the raw pointer for this module.
+ ///
+ /// It is up to the user to use it correctly.
+ pub const fn as_ptr(&self) -> *mut bindings::module {
+ self.0
+ }
}
#[cfg(not(any(testlib, test)))]
@@ -133,3 +218,38 @@ macro_rules! container_of {
ptr.sub(offset) as *const $type
}}
}
+
+/// Helper for `.rs.S` files.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! concat_literals {
+ ($( $asm:literal )* ) => {
+ ::core::concat!($($asm),*)
+ };
+}
+
+/// Wrapper around `asm!` configured for use in the kernel.
+///
+/// Uses a semicolon to avoid parsing ambiguities, even though this does not match native `asm!`
+/// syntax.
+// For x86, `asm!` uses intel syntax by default, but we want to use at&t syntax in the kernel.
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+#[macro_export]
+macro_rules! asm {
+ ($($asm:expr),* ; $($rest:tt)*) => {
+ ::core::arch::asm!( $($asm)*, options(att_syntax), $($rest)* )
+ };
+}
+
+/// Wrapper around `asm!` configured for use in the kernel.
+///
+/// Uses a semicolon to avoid parsing ambiguities, even though this does not match native `asm!`
+/// syntax.
+// For non-x86 arches we just pass through to `asm!`.
+#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+#[macro_export]
+macro_rules! asm {
+ ($($asm:expr),* ; $($rest:tt)*) => {
+ ::core::arch::asm!( $($asm)*, $($rest)* )
+ };
+}
diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs
new file mode 100644
index 000000000000..2054682c5724
--- /dev/null
+++ b/rust/kernel/list.rs
@@ -0,0 +1,977 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! A linked list implementation.
+
+// May not be needed in Rust 1.87.0 (pending beta backport).
+#![allow(clippy::ptr_eq)]
+
+use crate::sync::ArcBorrow;
+use crate::types::Opaque;
+use core::iter::{DoubleEndedIterator, FusedIterator};
+use core::marker::PhantomData;
+use core::ptr;
+use pin_init::PinInit;
+
+mod impl_list_item_mod;
+pub use self::impl_list_item_mod::{
+ impl_has_list_links, impl_has_list_links_self_ptr, impl_list_item, HasListLinks, HasSelfPtr,
+};
+
+mod arc;
+pub use self::arc::{impl_list_arc_safe, AtomicTracker, ListArc, ListArcSafe, TryNewListArc};
+
+mod arc_field;
+pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
+
+/// A linked list.
+///
+/// All elements in this linked list will be [`ListArc`] references to the value. Since a value can
+/// only have one `ListArc` (for each pair of prev/next pointers), this ensures that the same
+/// prev/next pointers are not used for several linked lists.
+///
+/// # Invariants
+///
+/// * If the list is empty, then `first` is null. Otherwise, `first` points at the `ListLinks`
+/// field of the first element in the list.
+/// * All prev/next pointers in `ListLinks` fields of items in the list are valid and form a cycle.
+/// * For every item in the list, the list owns the associated [`ListArc`] reference and has
+/// exclusive access to the `ListLinks` field.
+pub struct List<T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ first: *mut ListLinksFields,
+ _ty: PhantomData<ListArc<T, ID>>,
+}
+
+// SAFETY: This is a container of `ListArc<T, ID>`, and access to the container allows the same
+// type of access to the `ListArc<T, ID>` elements.
+unsafe impl<T, const ID: u64> Send for List<T, ID>
+where
+ ListArc<T, ID>: Send,
+ T: ?Sized + ListItem<ID>,
+{
+}
+// SAFETY: This is a container of `ListArc<T, ID>`, and access to the container allows the same
+// type of access to the `ListArc<T, ID>` elements.
+unsafe impl<T, const ID: u64> Sync for List<T, ID>
+where
+ ListArc<T, ID>: Sync,
+ T: ?Sized + ListItem<ID>,
+{
+}
+
+/// Implemented by types where a [`ListArc<Self>`] can be inserted into a [`List`].
+///
+/// # Safety
+///
+/// Implementers must ensure that they provide the guarantees documented on methods provided by
+/// this trait.
+///
+/// [`ListArc<Self>`]: ListArc
+pub unsafe trait ListItem<const ID: u64 = 0>: ListArcSafe<ID> {
+ /// Views the [`ListLinks`] for this value.
+ ///
+ /// # Guarantees
+ ///
+ /// If there is a previous call to `prepare_to_insert` and there is no call to `post_remove`
+ /// since the most recent such call, then this returns the same pointer as the one returned by
+ /// the most recent call to `prepare_to_insert`.
+ ///
+ /// Otherwise, the returned pointer points at a read-only [`ListLinks`] with two null pointers.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must point at a valid value. (It need not be in an `Arc`.)
+ unsafe fn view_links(me: *const Self) -> *mut ListLinks<ID>;
+
+ /// View the full value given its [`ListLinks`] field.
+ ///
+ /// Can only be used when the value is in a list.
+ ///
+ /// # Guarantees
+ ///
+ /// * Returns the same pointer as the one passed to the most recent call to `prepare_to_insert`.
+ /// * The returned pointer is valid until the next call to `post_remove`.
+ ///
+ /// # Safety
+ ///
+ /// * The provided pointer must originate from the most recent call to `prepare_to_insert`, or
+ /// from a call to `view_links` that happened after the most recent call to
+ /// `prepare_to_insert`.
+ /// * Since the most recent call to `prepare_to_insert`, the `post_remove` method must not have
+ /// been called.
+ unsafe fn view_value(me: *mut ListLinks<ID>) -> *const Self;
+
+ /// This is called when an item is inserted into a [`List`].
+ ///
+ /// # Guarantees
+ ///
+ /// The caller is granted exclusive access to the returned [`ListLinks`] until `post_remove` is
+ /// called.
+ ///
+ /// # Safety
+ ///
+ /// * The provided pointer must point at a valid value in an [`Arc`].
+ /// * Calls to `prepare_to_insert` and `post_remove` on the same value must alternate.
+ /// * The caller must own the [`ListArc`] for this value.
+ /// * The caller must not give up ownership of the [`ListArc`] unless `post_remove` has been
+ /// called after this call to `prepare_to_insert`.
+ ///
+ /// [`Arc`]: crate::sync::Arc
+ unsafe fn prepare_to_insert(me: *const Self) -> *mut ListLinks<ID>;
+
+ /// This undoes a previous call to `prepare_to_insert`.
+ ///
+ /// # Guarantees
+ ///
+ /// The returned pointer is the pointer that was originally passed to `prepare_to_insert`.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must be the pointer returned by the most recent call to
+ /// `prepare_to_insert`.
+ unsafe fn post_remove(me: *mut ListLinks<ID>) -> *const Self;
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+struct ListLinksFields {
+ next: *mut ListLinksFields,
+ prev: *mut ListLinksFields,
+}
+
+/// The prev/next pointers for an item in a linked list.
+///
+/// # Invariants
+///
+/// The fields are null if and only if this item is not in a list.
+#[repr(transparent)]
+pub struct ListLinks<const ID: u64 = 0> {
+ // This type is `!Unpin` for aliasing reasons as the pointers are part of an intrusive linked
+ // list.
+ inner: Opaque<ListLinksFields>,
+}
+
+// SAFETY: The only way to access/modify the pointers inside of `ListLinks<ID>` is via holding the
+// associated `ListArc<T, ID>`. Since that type correctly implements `Send`, it is impossible to
+// move this an instance of this type to a different thread if the pointees are `!Send`.
+unsafe impl<const ID: u64> Send for ListLinks<ID> {}
+// SAFETY: The type is opaque so immutable references to a ListLinks are useless. Therefore, it's
+// okay to have immutable access to a ListLinks from several threads at once.
+unsafe impl<const ID: u64> Sync for ListLinks<ID> {}
+
+impl<const ID: u64> ListLinks<ID> {
+ /// Creates a new initializer for this type.
+ pub fn new() -> impl PinInit<Self> {
+ // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
+ // not be constructed in an `Arc` that already has a `ListArc`.
+ ListLinks {
+ inner: Opaque::new(ListLinksFields {
+ prev: ptr::null_mut(),
+ next: ptr::null_mut(),
+ }),
+ }
+ }
+
+ /// # Safety
+ ///
+ /// `me` must be dereferenceable.
+ #[inline]
+ unsafe fn fields(me: *mut Self) -> *mut ListLinksFields {
+ // SAFETY: The caller promises that the pointer is valid.
+ unsafe { Opaque::raw_get(ptr::addr_of!((*me).inner)) }
+ }
+
+ /// # Safety
+ ///
+ /// `me` must be dereferenceable.
+ #[inline]
+ unsafe fn from_fields(me: *mut ListLinksFields) -> *mut Self {
+ me.cast()
+ }
+}
+
+/// Similar to [`ListLinks`], but also contains a pointer to the full value.
+///
+/// This type can be used instead of [`ListLinks`] to support lists with trait objects.
+#[repr(C)]
+pub struct ListLinksSelfPtr<T: ?Sized, const ID: u64 = 0> {
+ /// The `ListLinks` field inside this value.
+ ///
+ /// This is public so that it can be used with `impl_has_list_links!`.
+ pub inner: ListLinks<ID>,
+ // UnsafeCell is not enough here because we use `Opaque::uninit` as a dummy value, and
+ // `ptr::null()` doesn't work for `T: ?Sized`.
+ self_ptr: Opaque<*const T>,
+}
+
+// SAFETY: The fields of a ListLinksSelfPtr can be moved across thread boundaries.
+unsafe impl<T: ?Sized + Send, const ID: u64> Send for ListLinksSelfPtr<T, ID> {}
+// SAFETY: The type is opaque so immutable references to a ListLinksSelfPtr are useless. Therefore,
+// it's okay to have immutable access to a ListLinks from several threads at once.
+//
+// Note that `inner` being a public field does not prevent this type from being opaque, since
+// `inner` is a opaque type.
+unsafe impl<T: ?Sized + Sync, const ID: u64> Sync for ListLinksSelfPtr<T, ID> {}
+
+impl<T: ?Sized, const ID: u64> ListLinksSelfPtr<T, ID> {
+ /// The offset from the [`ListLinks`] to the self pointer field.
+ pub const LIST_LINKS_SELF_PTR_OFFSET: usize = core::mem::offset_of!(Self, self_ptr);
+
+ /// Creates a new initializer for this type.
+ pub fn new() -> impl PinInit<Self> {
+ // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
+ // not be constructed in an `Arc` that already has a `ListArc`.
+ Self {
+ inner: ListLinks {
+ inner: Opaque::new(ListLinksFields {
+ prev: ptr::null_mut(),
+ next: ptr::null_mut(),
+ }),
+ },
+ self_ptr: Opaque::uninit(),
+ }
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> List<T, ID> {
+ /// Creates a new empty list.
+ pub const fn new() -> Self {
+ Self {
+ first: ptr::null_mut(),
+ _ty: PhantomData,
+ }
+ }
+
+ /// Returns whether this list is empty.
+ pub fn is_empty(&self) -> bool {
+ self.first.is_null()
+ }
+
+ /// Inserts `item` before `next` in the cycle.
+ ///
+ /// Returns a pointer to the newly inserted element. Never changes `self.first` unless the list
+ /// is empty.
+ ///
+ /// # Safety
+ ///
+ /// * `next` must be an element in this list or null.
+ /// * if `next` is null, then the list must be empty.
+ unsafe fn insert_inner(
+ &mut self,
+ item: ListArc<T, ID>,
+ next: *mut ListLinksFields,
+ ) -> *mut ListLinksFields {
+ let raw_item = ListArc::into_raw(item);
+ // SAFETY:
+ // * We just got `raw_item` from a `ListArc`, so it's in an `Arc`.
+ // * Since we have ownership of the `ListArc`, `post_remove` must have been called after
+ // the most recent call to `prepare_to_insert`, if any.
+ // * We own the `ListArc`.
+ // * Removing items from this list is always done using `remove_internal_inner`, which
+ // calls `post_remove` before giving up ownership.
+ let list_links = unsafe { T::prepare_to_insert(raw_item) };
+ // SAFETY: We have not yet called `post_remove`, so `list_links` is still valid.
+ let item = unsafe { ListLinks::fields(list_links) };
+
+ // Check if the list is empty.
+ if next.is_null() {
+ // SAFETY: The caller just gave us ownership of these fields.
+ // INVARIANT: A linked list with one item should be cyclic.
+ unsafe {
+ (*item).next = item;
+ (*item).prev = item;
+ }
+ self.first = item;
+ } else {
+ // SAFETY: By the type invariant, this pointer is valid or null. We just checked that
+ // it's not null, so it must be valid.
+ let prev = unsafe { (*next).prev };
+ // SAFETY: Pointers in a linked list are never dangling, and the caller just gave us
+ // ownership of the fields on `item`.
+ // INVARIANT: This correctly inserts `item` between `prev` and `next`.
+ unsafe {
+ (*item).next = next;
+ (*item).prev = prev;
+ (*prev).next = item;
+ (*next).prev = item;
+ }
+ }
+
+ item
+ }
+
+ /// Add the provided item to the back of the list.
+ pub fn push_back(&mut self, item: ListArc<T, ID>) {
+ // SAFETY:
+ // * `self.first` is null or in the list.
+ // * `self.first` is only null if the list is empty.
+ unsafe { self.insert_inner(item, self.first) };
+ }
+
+ /// Add the provided item to the front of the list.
+ pub fn push_front(&mut self, item: ListArc<T, ID>) {
+ // SAFETY:
+ // * `self.first` is null or in the list.
+ // * `self.first` is only null if the list is empty.
+ let new_elem = unsafe { self.insert_inner(item, self.first) };
+
+ // INVARIANT: `new_elem` is in the list because we just inserted it.
+ self.first = new_elem;
+ }
+
+ /// Removes the last item from this list.
+ pub fn pop_back(&mut self) -> Option<ListArc<T, ID>> {
+ if self.first.is_null() {
+ return None;
+ }
+
+ // SAFETY: We just checked that the list is not empty.
+ let last = unsafe { (*self.first).prev };
+ // SAFETY: The last item of this list is in this list.
+ Some(unsafe { self.remove_internal(last) })
+ }
+
+ /// Removes the first item from this list.
+ pub fn pop_front(&mut self) -> Option<ListArc<T, ID>> {
+ if self.first.is_null() {
+ return None;
+ }
+
+ // SAFETY: The first item of this list is in this list.
+ Some(unsafe { self.remove_internal(self.first) })
+ }
+
+ /// Removes the provided item from this list and returns it.
+ ///
+ /// This returns `None` if the item is not in the list. (Note that by the safety requirements,
+ /// this means that the item is not in any list.)
+ ///
+ /// # Safety
+ ///
+ /// `item` must not be in a different linked list (with the same id).
+ pub unsafe fn remove(&mut self, item: &T) -> Option<ListArc<T, ID>> {
+ // SAFETY: TODO.
+ let mut item = unsafe { ListLinks::fields(T::view_links(item)) };
+ // SAFETY: The user provided a reference, and reference are never dangling.
+ //
+ // As for why this is not a data race, there are two cases:
+ //
+ // * If `item` is not in any list, then these fields are read-only and null.
+ // * If `item` is in this list, then we have exclusive access to these fields since we
+ // have a mutable reference to the list.
+ //
+ // In either case, there's no race.
+ let ListLinksFields { next, prev } = unsafe { *item };
+
+ debug_assert_eq!(next.is_null(), prev.is_null());
+ if !next.is_null() {
+ // This is really a no-op, but this ensures that `item` is a raw pointer that was
+ // obtained without going through a pointer->reference->pointer conversion roundtrip.
+ // This ensures that the list is valid under the more restrictive strict provenance
+ // ruleset.
+ //
+ // SAFETY: We just checked that `next` is not null, and it's not dangling by the
+ // list invariants.
+ unsafe {
+ debug_assert_eq!(item, (*next).prev);
+ item = (*next).prev;
+ }
+
+ // SAFETY: We just checked that `item` is in a list, so the caller guarantees that it
+ // is in this list. The pointers are in the right order.
+ Some(unsafe { self.remove_internal_inner(item, next, prev) })
+ } else {
+ None
+ }
+ }
+
+ /// Removes the provided item from the list.
+ ///
+ /// # Safety
+ ///
+ /// `item` must point at an item in this list.
+ unsafe fn remove_internal(&mut self, item: *mut ListLinksFields) -> ListArc<T, ID> {
+ // SAFETY: The caller promises that this pointer is not dangling, and there's no data race
+ // since we have a mutable reference to the list containing `item`.
+ let ListLinksFields { next, prev } = unsafe { *item };
+ // SAFETY: The pointers are ok and in the right order.
+ unsafe { self.remove_internal_inner(item, next, prev) }
+ }
+
+ /// Removes the provided item from the list.
+ ///
+ /// # Safety
+ ///
+ /// The `item` pointer must point at an item in this list, and we must have `(*item).next ==
+ /// next` and `(*item).prev == prev`.
+ unsafe fn remove_internal_inner(
+ &mut self,
+ item: *mut ListLinksFields,
+ next: *mut ListLinksFields,
+ prev: *mut ListLinksFields,
+ ) -> ListArc<T, ID> {
+ // SAFETY: We have exclusive access to the pointers of items in the list, and the prev/next
+ // pointers are always valid for items in a list.
+ //
+ // INVARIANT: There are three cases:
+ // * If the list has at least three items, then after removing the item, `prev` and `next`
+ // will be next to each other.
+ // * If the list has two items, then the remaining item will point at itself.
+ // * If the list has one item, then `next == prev == item`, so these writes have no
+ // effect. The list remains unchanged and `item` is still in the list for now.
+ unsafe {
+ (*next).prev = prev;
+ (*prev).next = next;
+ }
+ // SAFETY: We have exclusive access to items in the list.
+ // INVARIANT: `item` is being removed, so the pointers should be null.
+ unsafe {
+ (*item).prev = ptr::null_mut();
+ (*item).next = ptr::null_mut();
+ }
+ // INVARIANT: There are three cases:
+ // * If `item` was not the first item, then `self.first` should remain unchanged.
+ // * If `item` was the first item and there is another item, then we just updated
+ // `prev->next` to `next`, which is the new first item, and setting `item->next` to null
+ // did not modify `prev->next`.
+ // * If `item` was the only item in the list, then `prev == item`, and we just set
+ // `item->next` to null, so this correctly sets `first` to null now that the list is
+ // empty.
+ if self.first == item {
+ // SAFETY: The `prev` pointer is the value that `item->prev` had when it was in this
+ // list, so it must be valid. There is no race since `prev` is still in the list and we
+ // still have exclusive access to the list.
+ self.first = unsafe { (*prev).next };
+ }
+
+ // SAFETY: `item` used to be in the list, so it is dereferenceable by the type invariants
+ // of `List`.
+ let list_links = unsafe { ListLinks::from_fields(item) };
+ // SAFETY: Any pointer in the list originates from a `prepare_to_insert` call.
+ let raw_item = unsafe { T::post_remove(list_links) };
+ // SAFETY: The above call to `post_remove` guarantees that we can recreate the `ListArc`.
+ unsafe { ListArc::from_raw(raw_item) }
+ }
+
+ /// Moves all items from `other` into `self`.
+ ///
+ /// The items of `other` are added to the back of `self`, so the last item of `other` becomes
+ /// the last item of `self`.
+ pub fn push_all_back(&mut self, other: &mut List<T, ID>) {
+ // First, we insert the elements into `self`. At the end, we make `other` empty.
+ if self.is_empty() {
+ // INVARIANT: All of the elements in `other` become elements of `self`.
+ self.first = other.first;
+ } else if !other.is_empty() {
+ let other_first = other.first;
+ // SAFETY: The other list is not empty, so this pointer is valid.
+ let other_last = unsafe { (*other_first).prev };
+ let self_first = self.first;
+ // SAFETY: The self list is not empty, so this pointer is valid.
+ let self_last = unsafe { (*self_first).prev };
+
+ // SAFETY: We have exclusive access to both lists, so we can update the pointers.
+ // INVARIANT: This correctly sets the pointers to merge both lists. We do not need to
+ // update `self.first` because the first element of `self` does not change.
+ unsafe {
+ (*self_first).prev = other_last;
+ (*other_last).next = self_first;
+ (*self_last).next = other_first;
+ (*other_first).prev = self_last;
+ }
+ }
+
+ // INVARIANT: The other list is now empty, so update its pointer.
+ other.first = ptr::null_mut();
+ }
+
+ /// Returns a cursor that points before the first element of the list.
+ pub fn cursor_front(&mut self) -> Cursor<'_, T, ID> {
+ // INVARIANT: `self.first` is in this list.
+ Cursor {
+ next: self.first,
+ list: self,
+ }
+ }
+
+ /// Returns a cursor that points after the last element in the list.
+ pub fn cursor_back(&mut self) -> Cursor<'_, T, ID> {
+ // INVARIANT: `next` is allowed to be null.
+ Cursor {
+ next: core::ptr::null_mut(),
+ list: self,
+ }
+ }
+
+ /// Creates an iterator over the list.
+ pub fn iter(&self) -> Iter<'_, T, ID> {
+ // INVARIANT: If the list is empty, both pointers are null. Otherwise, both pointers point
+ // at the first element of the same list.
+ Iter {
+ current: self.first,
+ stop: self.first,
+ _ty: PhantomData,
+ }
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> Default for List<T, ID> {
+ fn default() -> Self {
+ List::new()
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> Drop for List<T, ID> {
+ fn drop(&mut self) {
+ while let Some(item) = self.pop_front() {
+ drop(item);
+ }
+ }
+}
+
+/// An iterator over a [`List`].
+///
+/// # Invariants
+///
+/// * There must be a [`List`] that is immutably borrowed for the duration of `'a`.
+/// * The `current` pointer is null or points at a value in that [`List`].
+/// * The `stop` pointer is equal to the `first` field of that [`List`].
+#[derive(Clone)]
+pub struct Iter<'a, T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ current: *mut ListLinksFields,
+ stop: *mut ListLinksFields,
+ _ty: PhantomData<&'a ListArc<T, ID>>,
+}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> Iterator for Iter<'a, T, ID> {
+ type Item = ArcBorrow<'a, T>;
+
+ fn next(&mut self) -> Option<ArcBorrow<'a, T>> {
+ if self.current.is_null() {
+ return None;
+ }
+
+ let current = self.current;
+
+ // SAFETY: We just checked that `current` is not null, so it is in a list, and hence not
+ // dangling. There's no race because the iterator holds an immutable borrow to the list.
+ let next = unsafe { (*current).next };
+ // INVARIANT: If `current` was the last element of the list, then this updates it to null.
+ // Otherwise, we update it to the next element.
+ self.current = if next != self.stop {
+ next
+ } else {
+ ptr::null_mut()
+ };
+
+ // SAFETY: The `current` pointer points at a value in the list.
+ let item = unsafe { T::view_value(ListLinks::from_fields(current)) };
+ // SAFETY:
+ // * All values in a list are stored in an `Arc`.
+ // * The value cannot be removed from the list for the duration of the lifetime annotated
+ // on the returned `ArcBorrow`, because removing it from the list would require mutable
+ // access to the list. However, the `ArcBorrow` is annotated with the iterator's
+ // lifetime, and the list is immutably borrowed for that lifetime.
+ // * Values in a list never have a `UniqueArc` reference.
+ Some(unsafe { ArcBorrow::from_raw(item) })
+ }
+}
+
+/// A cursor into a [`List`].
+///
+/// A cursor always rests between two elements in the list. This means that a cursor has a previous
+/// and next element, but no current element. It also means that it's possible to have a cursor
+/// into an empty list.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::prelude::*;
+/// use kernel::list::{List, ListArc, ListLinks};
+///
+/// #[pin_data]
+/// struct ListItem {
+/// value: u32,
+/// #[pin]
+/// links: ListLinks,
+/// }
+///
+/// impl ListItem {
+/// fn new(value: u32) -> Result<ListArc<Self>> {
+/// ListArc::pin_init(try_pin_init!(Self {
+/// value,
+/// links <- ListLinks::new(),
+/// }), GFP_KERNEL)
+/// }
+/// }
+///
+/// kernel::list::impl_has_list_links! {
+/// impl HasListLinks<0> for ListItem { self.links }
+/// }
+/// kernel::list::impl_list_arc_safe! {
+/// impl ListArcSafe<0> for ListItem { untracked; }
+/// }
+/// kernel::list::impl_list_item! {
+/// impl ListItem<0> for ListItem { using ListLinks; }
+/// }
+///
+/// // Use a cursor to remove the first element with the given value.
+/// fn remove_first(list: &mut List<ListItem>, value: u32) -> Option<ListArc<ListItem>> {
+/// let mut cursor = list.cursor_front();
+/// while let Some(next) = cursor.peek_next() {
+/// if next.value == value {
+/// return Some(next.remove());
+/// }
+/// cursor.move_next();
+/// }
+/// None
+/// }
+///
+/// // Use a cursor to remove the last element with the given value.
+/// fn remove_last(list: &mut List<ListItem>, value: u32) -> Option<ListArc<ListItem>> {
+/// let mut cursor = list.cursor_back();
+/// while let Some(prev) = cursor.peek_prev() {
+/// if prev.value == value {
+/// return Some(prev.remove());
+/// }
+/// cursor.move_prev();
+/// }
+/// None
+/// }
+///
+/// // Use a cursor to remove all elements with the given value. The removed elements are moved to
+/// // a new list.
+/// fn remove_all(list: &mut List<ListItem>, value: u32) -> List<ListItem> {
+/// let mut out = List::new();
+/// let mut cursor = list.cursor_front();
+/// while let Some(next) = cursor.peek_next() {
+/// if next.value == value {
+/// out.push_back(next.remove());
+/// } else {
+/// cursor.move_next();
+/// }
+/// }
+/// out
+/// }
+///
+/// // Use a cursor to insert a value at a specific index. Returns an error if the index is out of
+/// // bounds.
+/// fn insert_at(list: &mut List<ListItem>, new: ListArc<ListItem>, idx: usize) -> Result {
+/// let mut cursor = list.cursor_front();
+/// for _ in 0..idx {
+/// if !cursor.move_next() {
+/// return Err(EINVAL);
+/// }
+/// }
+/// cursor.insert_next(new);
+/// Ok(())
+/// }
+///
+/// // Merge two sorted lists into a single sorted list.
+/// fn merge_sorted(list: &mut List<ListItem>, merge: List<ListItem>) {
+/// let mut cursor = list.cursor_front();
+/// for to_insert in merge {
+/// while let Some(next) = cursor.peek_next() {
+/// if to_insert.value < next.value {
+/// break;
+/// }
+/// cursor.move_next();
+/// }
+/// cursor.insert_prev(to_insert);
+/// }
+/// }
+///
+/// let mut list = List::new();
+/// list.push_back(ListItem::new(14)?);
+/// list.push_back(ListItem::new(12)?);
+/// list.push_back(ListItem::new(10)?);
+/// list.push_back(ListItem::new(12)?);
+/// list.push_back(ListItem::new(15)?);
+/// list.push_back(ListItem::new(14)?);
+/// assert_eq!(remove_all(&mut list, 12).iter().count(), 2);
+/// // [14, 10, 15, 14]
+/// assert!(remove_first(&mut list, 14).is_some());
+/// // [10, 15, 14]
+/// insert_at(&mut list, ListItem::new(12)?, 2)?;
+/// // [10, 15, 12, 14]
+/// assert!(remove_last(&mut list, 15).is_some());
+/// // [10, 12, 14]
+///
+/// let mut list2 = List::new();
+/// list2.push_back(ListItem::new(11)?);
+/// list2.push_back(ListItem::new(13)?);
+/// merge_sorted(&mut list, list2);
+///
+/// let mut items = list.into_iter();
+/// assert_eq!(items.next().unwrap().value, 10);
+/// assert_eq!(items.next().unwrap().value, 11);
+/// assert_eq!(items.next().unwrap().value, 12);
+/// assert_eq!(items.next().unwrap().value, 13);
+/// assert_eq!(items.next().unwrap().value, 14);
+/// assert!(items.next().is_none());
+/// # Result::<(), Error>::Ok(())
+/// ```
+///
+/// # Invariants
+///
+/// The `next` pointer is null or points a value in `list`.
+pub struct Cursor<'a, T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ list: &'a mut List<T, ID>,
+ /// Points at the element after this cursor, or null if the cursor is after the last element.
+ next: *mut ListLinksFields,
+}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> Cursor<'a, T, ID> {
+ /// Returns a pointer to the element before the cursor.
+ ///
+ /// Returns null if there is no element before the cursor.
+ fn prev_ptr(&self) -> *mut ListLinksFields {
+ let mut next = self.next;
+ let first = self.list.first;
+ if next == first {
+ // We are before the first element.
+ return core::ptr::null_mut();
+ }
+
+ if next.is_null() {
+ // We are after the last element, so we need a pointer to the last element, which is
+ // the same as `(*first).prev`.
+ next = first;
+ }
+
+ // SAFETY: `next` can't be null, because then `first` must also be null, but in that case
+ // we would have exited at the `next == first` check. Thus, `next` is an element in the
+ // list, so we can access its `prev` pointer.
+ unsafe { (*next).prev }
+ }
+
+ /// Access the element after this cursor.
+ pub fn peek_next(&mut self) -> Option<CursorPeek<'_, 'a, T, true, ID>> {
+ if self.next.is_null() {
+ return None;
+ }
+
+ // INVARIANT:
+ // * We just checked that `self.next` is non-null, so it must be in `self.list`.
+ // * `ptr` is equal to `self.next`.
+ Some(CursorPeek {
+ ptr: self.next,
+ cursor: self,
+ })
+ }
+
+ /// Access the element before this cursor.
+ pub fn peek_prev(&mut self) -> Option<CursorPeek<'_, 'a, T, false, ID>> {
+ let prev = self.prev_ptr();
+
+ if prev.is_null() {
+ return None;
+ }
+
+ // INVARIANT:
+ // * We just checked that `prev` is non-null, so it must be in `self.list`.
+ // * `self.prev_ptr()` never returns `self.next`.
+ Some(CursorPeek {
+ ptr: prev,
+ cursor: self,
+ })
+ }
+
+ /// Move the cursor one element forward.
+ ///
+ /// If the cursor is after the last element, then this call does nothing. This call returns
+ /// `true` if the cursor's position was changed.
+ pub fn move_next(&mut self) -> bool {
+ if self.next.is_null() {
+ return false;
+ }
+
+ // SAFETY: `self.next` is an element in the list and we borrow the list mutably, so we can
+ // access the `next` field.
+ let mut next = unsafe { (*self.next).next };
+
+ if next == self.list.first {
+ next = core::ptr::null_mut();
+ }
+
+ // INVARIANT: `next` is either null or the next element after an element in the list.
+ self.next = next;
+ true
+ }
+
+ /// Move the cursor one element backwards.
+ ///
+ /// If the cursor is before the first element, then this call does nothing. This call returns
+ /// `true` if the cursor's position was changed.
+ pub fn move_prev(&mut self) -> bool {
+ if self.next == self.list.first {
+ return false;
+ }
+
+ // INVARIANT: `prev_ptr()` always returns a pointer that is null or in the list.
+ self.next = self.prev_ptr();
+ true
+ }
+
+ /// Inserts an element where the cursor is pointing and get a pointer to the new element.
+ fn insert_inner(&mut self, item: ListArc<T, ID>) -> *mut ListLinksFields {
+ let ptr = if self.next.is_null() {
+ self.list.first
+ } else {
+ self.next
+ };
+ // SAFETY:
+ // * `ptr` is an element in the list or null.
+ // * if `ptr` is null, then `self.list.first` is null so the list is empty.
+ let item = unsafe { self.list.insert_inner(item, ptr) };
+ if self.next == self.list.first {
+ // INVARIANT: We just inserted `item`, so it's a member of list.
+ self.list.first = item;
+ }
+ item
+ }
+
+ /// Insert an element at this cursor's location.
+ pub fn insert(mut self, item: ListArc<T, ID>) {
+ // This is identical to `insert_prev`, but consumes the cursor. This is helpful because it
+ // reduces confusion when the last operation on the cursor is an insertion; in that case,
+ // you just want to insert the element at the cursor, and it is confusing that the call
+ // involves the word prev or next.
+ self.insert_inner(item);
+ }
+
+ /// Inserts an element after this cursor.
+ ///
+ /// After insertion, the new element will be after the cursor.
+ pub fn insert_next(&mut self, item: ListArc<T, ID>) {
+ self.next = self.insert_inner(item);
+ }
+
+ /// Inserts an element before this cursor.
+ ///
+ /// After insertion, the new element will be before the cursor.
+ pub fn insert_prev(&mut self, item: ListArc<T, ID>) {
+ self.insert_inner(item);
+ }
+
+ /// Remove the next element from the list.
+ pub fn remove_next(&mut self) -> Option<ListArc<T, ID>> {
+ self.peek_next().map(|v| v.remove())
+ }
+
+ /// Remove the previous element from the list.
+ pub fn remove_prev(&mut self) -> Option<ListArc<T, ID>> {
+ self.peek_prev().map(|v| v.remove())
+ }
+}
+
+/// References the element in the list next to the cursor.
+///
+/// # Invariants
+///
+/// * `ptr` is an element in `self.cursor.list`.
+/// * `ISNEXT == (self.ptr == self.cursor.next)`.
+pub struct CursorPeek<'a, 'b, T: ?Sized + ListItem<ID>, const ISNEXT: bool, const ID: u64> {
+ cursor: &'a mut Cursor<'b, T, ID>,
+ ptr: *mut ListLinksFields,
+}
+
+impl<'a, 'b, T: ?Sized + ListItem<ID>, const ISNEXT: bool, const ID: u64>
+ CursorPeek<'a, 'b, T, ISNEXT, ID>
+{
+ /// Remove the element from the list.
+ pub fn remove(self) -> ListArc<T, ID> {
+ if ISNEXT {
+ self.cursor.move_next();
+ }
+
+ // INVARIANT: `self.ptr` is not equal to `self.cursor.next` due to the above `move_next`
+ // call.
+ // SAFETY: By the type invariants of `Self`, `next` is not null, so `next` is an element of
+ // `self.cursor.list` by the type invariants of `Cursor`.
+ unsafe { self.cursor.list.remove_internal(self.ptr) }
+ }
+
+ /// Access this value as an [`ArcBorrow`].
+ pub fn arc(&self) -> ArcBorrow<'_, T> {
+ // SAFETY: `self.ptr` points at an element in `self.cursor.list`.
+ let me = unsafe { T::view_value(ListLinks::from_fields(self.ptr)) };
+ // SAFETY:
+ // * All values in a list are stored in an `Arc`.
+ // * The value cannot be removed from the list for the duration of the lifetime annotated
+ // on the returned `ArcBorrow`, because removing it from the list would require mutable
+ // access to the `CursorPeek`, the `Cursor` or the `List`. However, the `ArcBorrow` holds
+ // an immutable borrow on the `CursorPeek`, which in turn holds a mutable borrow on the
+ // `Cursor`, which in turn holds a mutable borrow on the `List`, so any such mutable
+ // access requires first releasing the immutable borrow on the `CursorPeek`.
+ // * Values in a list never have a `UniqueArc` reference, because the list has a `ListArc`
+ // reference, and `UniqueArc` references must be unique.
+ unsafe { ArcBorrow::from_raw(me) }
+ }
+}
+
+impl<'a, 'b, T: ?Sized + ListItem<ID>, const ISNEXT: bool, const ID: u64> core::ops::Deref
+ for CursorPeek<'a, 'b, T, ISNEXT, ID>
+{
+ // If you change the `ptr` field to have type `ArcBorrow<'a, T>`, it might seem like you could
+ // get rid of the `CursorPeek::arc` method and change the deref target to `ArcBorrow<'a, T>`.
+ // However, that doesn't work because 'a is too long. You could obtain an `ArcBorrow<'a, T>`
+ // and then call `CursorPeek::remove` without giving up the `ArcBorrow<'a, T>`, which would be
+ // unsound.
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: `self.ptr` points at an element in `self.cursor.list`.
+ let me = unsafe { T::view_value(ListLinks::from_fields(self.ptr)) };
+
+ // SAFETY: The value cannot be removed from the list for the duration of the lifetime
+ // annotated on the returned `&T`, because removing it from the list would require mutable
+ // access to the `CursorPeek`, the `Cursor` or the `List`. However, the `&T` holds an
+ // immutable borrow on the `CursorPeek`, which in turn holds a mutable borrow on the
+ // `Cursor`, which in turn holds a mutable borrow on the `List`, so any such mutable access
+ // requires first releasing the immutable borrow on the `CursorPeek`.
+ unsafe { &*me }
+ }
+}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> FusedIterator for Iter<'a, T, ID> {}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> IntoIterator for &'a List<T, ID> {
+ type IntoIter = Iter<'a, T, ID>;
+ type Item = ArcBorrow<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T, ID> {
+ self.iter()
+ }
+}
+
+/// An owning iterator into a [`List`].
+pub struct IntoIter<T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ list: List<T, ID>,
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> Iterator for IntoIter<T, ID> {
+ type Item = ListArc<T, ID>;
+
+ fn next(&mut self) -> Option<ListArc<T, ID>> {
+ self.list.pop_front()
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> FusedIterator for IntoIter<T, ID> {}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> DoubleEndedIterator for IntoIter<T, ID> {
+ fn next_back(&mut self) -> Option<ListArc<T, ID>> {
+ self.list.pop_back()
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> IntoIterator for List<T, ID> {
+ type IntoIter = IntoIter<T, ID>;
+ type Item = ListArc<T, ID>;
+
+ fn into_iter(self) -> IntoIter<T, ID> {
+ IntoIter { list: self }
+ }
+}
diff --git a/rust/kernel/list/arc.rs b/rust/kernel/list/arc.rs
new file mode 100644
index 000000000000..13c50df37b89
--- /dev/null
+++ b/rust/kernel/list/arc.rs
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! A wrapper around `Arc` for linked lists.
+
+use crate::alloc::{AllocError, Flags};
+use crate::prelude::*;
+use crate::sync::{Arc, ArcBorrow, UniqueArc};
+use core::marker::PhantomPinned;
+use core::ops::Deref;
+use core::pin::Pin;
+use core::sync::atomic::{AtomicBool, Ordering};
+
+/// Declares that this type has some way to ensure that there is exactly one `ListArc` instance for
+/// this id.
+///
+/// Types that implement this trait should include some kind of logic for keeping track of whether
+/// a [`ListArc`] exists or not. We refer to this logic as "the tracking inside `T`".
+///
+/// We allow the case where the tracking inside `T` thinks that a [`ListArc`] exists, but actually,
+/// there isn't a [`ListArc`]. However, we do not allow the opposite situation where a [`ListArc`]
+/// exists, but the tracking thinks it doesn't. This is because the former can at most result in us
+/// failing to create a [`ListArc`] when the operation could succeed, whereas the latter can result
+/// in the creation of two [`ListArc`] references. Only the latter situation can lead to memory
+/// safety issues.
+///
+/// A consequence of the above is that you may implement the tracking inside `T` by not actually
+/// keeping track of anything. To do this, you always claim that a [`ListArc`] exists, even if
+/// there isn't one. This implementation is allowed by the above rule, but it means that
+/// [`ListArc`] references can only be created if you have ownership of *all* references to the
+/// refcounted object, as you otherwise have no way of knowing whether a [`ListArc`] exists.
+pub trait ListArcSafe<const ID: u64 = 0> {
+ /// Informs the tracking inside this type that it now has a [`ListArc`] reference.
+ ///
+ /// This method may be called even if the tracking inside this type thinks that a `ListArc`
+ /// reference exists. (But only if that's not actually the case.)
+ ///
+ /// # Safety
+ ///
+ /// Must not be called if a [`ListArc`] already exist for this value.
+ unsafe fn on_create_list_arc_from_unique(self: Pin<&mut Self>);
+
+ /// Informs the tracking inside this type that there is no [`ListArc`] reference anymore.
+ ///
+ /// # Safety
+ ///
+ /// Must only be called if there is no [`ListArc`] reference, but the tracking thinks there is.
+ unsafe fn on_drop_list_arc(&self);
+}
+
+/// Declares that this type is able to safely attempt to create `ListArc`s at any time.
+///
+/// # Safety
+///
+/// The guarantees of `try_new_list_arc` must be upheld.
+pub unsafe trait TryNewListArc<const ID: u64 = 0>: ListArcSafe<ID> {
+ /// Attempts to convert an `Arc<Self>` into an `ListArc<Self>`. Returns `true` if the
+ /// conversion was successful.
+ ///
+ /// This method should not be called directly. Use [`ListArc::try_from_arc`] instead.
+ ///
+ /// # Guarantees
+ ///
+ /// If this call returns `true`, then there is no [`ListArc`] pointing to this value.
+ /// Additionally, this call will have transitioned the tracking inside `Self` from not thinking
+ /// that a [`ListArc`] exists, to thinking that a [`ListArc`] exists.
+ fn try_new_list_arc(&self) -> bool;
+}
+
+/// Declares that this type supports [`ListArc`].
+///
+/// This macro supports a few different strategies for implementing the tracking inside the type:
+///
+/// * The `untracked` strategy does not actually keep track of whether a [`ListArc`] exists. When
+/// using this strategy, the only way to create a [`ListArc`] is using a [`UniqueArc`].
+/// * The `tracked_by` strategy defers the tracking to a field of the struct. The user much specify
+/// which field to defer the tracking to. The field must implement [`ListArcSafe`]. If the field
+/// implements [`TryNewListArc`], then the type will also implement [`TryNewListArc`].
+///
+/// The `tracked_by` strategy is usually used by deferring to a field of type
+/// [`AtomicTracker`]. However, it is also possible to defer the tracking to another struct
+/// using also using this macro.
+#[macro_export]
+macro_rules! impl_list_arc_safe {
+ (impl$({$($generics:tt)*})? ListArcSafe<$num:tt> for $t:ty { untracked; } $($rest:tt)*) => {
+ impl$(<$($generics)*>)? $crate::list::ListArcSafe<$num> for $t {
+ unsafe fn on_create_list_arc_from_unique(self: ::core::pin::Pin<&mut Self>) {}
+ unsafe fn on_drop_list_arc(&self) {}
+ }
+ $crate::list::impl_list_arc_safe! { $($rest)* }
+ };
+
+ (impl$({$($generics:tt)*})? ListArcSafe<$num:tt> for $t:ty {
+ tracked_by $field:ident : $fty:ty;
+ } $($rest:tt)*) => {
+ impl$(<$($generics)*>)? $crate::list::ListArcSafe<$num> for $t {
+ unsafe fn on_create_list_arc_from_unique(self: ::core::pin::Pin<&mut Self>) {
+ $crate::assert_pinned!($t, $field, $fty, inline);
+
+ // SAFETY: This field is structurally pinned as per the above assertion.
+ let field = unsafe {
+ ::core::pin::Pin::map_unchecked_mut(self, |me| &mut me.$field)
+ };
+ // SAFETY: The caller promises that there is no `ListArc`.
+ unsafe {
+ <$fty as $crate::list::ListArcSafe<$num>>::on_create_list_arc_from_unique(field)
+ };
+ }
+ unsafe fn on_drop_list_arc(&self) {
+ // SAFETY: The caller promises that there is no `ListArc` reference, and also
+ // promises that the tracking thinks there is a `ListArc` reference.
+ unsafe { <$fty as $crate::list::ListArcSafe<$num>>::on_drop_list_arc(&self.$field) };
+ }
+ }
+ unsafe impl$(<$($generics)*>)? $crate::list::TryNewListArc<$num> for $t
+ where
+ $fty: TryNewListArc<$num>,
+ {
+ fn try_new_list_arc(&self) -> bool {
+ <$fty as $crate::list::TryNewListArc<$num>>::try_new_list_arc(&self.$field)
+ }
+ }
+ $crate::list::impl_list_arc_safe! { $($rest)* }
+ };
+
+ () => {};
+}
+pub use impl_list_arc_safe;
+
+/// A wrapper around [`Arc`] that's guaranteed unique for the given id.
+///
+/// The `ListArc` type can be thought of as a special reference to a refcounted object that owns the
+/// permission to manipulate the `next`/`prev` pointers stored in the refcounted object. By ensuring
+/// that each object has only one `ListArc` reference, the owner of that reference is assured
+/// exclusive access to the `next`/`prev` pointers. When a `ListArc` is inserted into a [`List`],
+/// the [`List`] takes ownership of the `ListArc` reference.
+///
+/// There are various strategies to ensuring that a value has only one `ListArc` reference. The
+/// simplest is to convert a [`UniqueArc`] into a `ListArc`. However, the refcounted object could
+/// also keep track of whether a `ListArc` exists using a boolean, which could allow for the
+/// creation of new `ListArc` references from an [`Arc`] reference. Whatever strategy is used, the
+/// relevant tracking is referred to as "the tracking inside `T`", and the [`ListArcSafe`] trait
+/// (and its subtraits) are used to update the tracking when a `ListArc` is created or destroyed.
+///
+/// Note that we allow the case where the tracking inside `T` thinks that a `ListArc` exists, but
+/// actually, there isn't a `ListArc`. However, we do not allow the opposite situation where a
+/// `ListArc` exists, but the tracking thinks it doesn't. This is because the former can at most
+/// result in us failing to create a `ListArc` when the operation could succeed, whereas the latter
+/// can result in the creation of two `ListArc` references.
+///
+/// While this `ListArc` is unique for the given id, there still might exist normal `Arc`
+/// references to the object.
+///
+/// # Invariants
+///
+/// * Each reference counted object has at most one `ListArc` for each value of `ID`.
+/// * The tracking inside `T` is aware that a `ListArc` reference exists.
+///
+/// [`List`]: crate::list::List
+#[repr(transparent)]
+#[cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, derive(core::marker::CoercePointee))]
+pub struct ListArc<T, const ID: u64 = 0>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ arc: Arc<T>,
+}
+
+impl<T: ListArcSafe<ID>, const ID: u64> ListArc<T, ID> {
+ /// Constructs a new reference counted instance of `T`.
+ #[inline]
+ pub fn new(contents: T, flags: Flags) -> Result<Self, AllocError> {
+ Ok(Self::from(UniqueArc::new(contents, flags)?))
+ }
+
+ /// Use the given initializer to in-place initialize a `T`.
+ ///
+ /// If `T: !Unpin` it will not be able to move afterwards.
+ // We don't implement `InPlaceInit` because `ListArc` is implicitly pinned. This is similar to
+ // what we do for `Arc`.
+ #[inline]
+ pub fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ Ok(Self::from(UniqueArc::try_pin_init(init, flags)?))
+ }
+
+ /// Use the given initializer to in-place initialize a `T`.
+ ///
+ /// This is equivalent to [`ListArc<T>::pin_init`], since a [`ListArc`] is always pinned.
+ #[inline]
+ pub fn init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ Ok(Self::from(UniqueArc::try_init(init, flags)?))
+ }
+}
+
+impl<T, const ID: u64> From<UniqueArc<T>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ /// Convert a [`UniqueArc`] into a [`ListArc`].
+ #[inline]
+ fn from(unique: UniqueArc<T>) -> Self {
+ Self::from(Pin::from(unique))
+ }
+}
+
+impl<T, const ID: u64> From<Pin<UniqueArc<T>>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ /// Convert a pinned [`UniqueArc`] into a [`ListArc`].
+ #[inline]
+ fn from(mut unique: Pin<UniqueArc<T>>) -> Self {
+ // SAFETY: We have a `UniqueArc`, so there is no `ListArc`.
+ unsafe { T::on_create_list_arc_from_unique(unique.as_mut()) };
+ let arc = Arc::from(unique);
+ // SAFETY: We just called `on_create_list_arc_from_unique` on an arc without a `ListArc`,
+ // so we can create a `ListArc`.
+ unsafe { Self::transmute_from_arc(arc) }
+ }
+}
+
+impl<T, const ID: u64> ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ /// Creates two `ListArc`s from a [`UniqueArc`].
+ ///
+ /// The two ids must be different.
+ #[inline]
+ pub fn pair_from_unique<const ID2: u64>(unique: UniqueArc<T>) -> (Self, ListArc<T, ID2>)
+ where
+ T: ListArcSafe<ID2>,
+ {
+ Self::pair_from_pin_unique(Pin::from(unique))
+ }
+
+ /// Creates two `ListArc`s from a pinned [`UniqueArc`].
+ ///
+ /// The two ids must be different.
+ #[inline]
+ pub fn pair_from_pin_unique<const ID2: u64>(
+ mut unique: Pin<UniqueArc<T>>,
+ ) -> (Self, ListArc<T, ID2>)
+ where
+ T: ListArcSafe<ID2>,
+ {
+ build_assert!(ID != ID2);
+
+ // SAFETY: We have a `UniqueArc`, so there is no `ListArc`.
+ unsafe { <T as ListArcSafe<ID>>::on_create_list_arc_from_unique(unique.as_mut()) };
+ // SAFETY: We have a `UniqueArc`, so there is no `ListArc`.
+ unsafe { <T as ListArcSafe<ID2>>::on_create_list_arc_from_unique(unique.as_mut()) };
+
+ let arc1 = Arc::from(unique);
+ let arc2 = Arc::clone(&arc1);
+
+ // SAFETY: We just called `on_create_list_arc_from_unique` on an arc without a `ListArc`
+ // for both IDs (which are different), so we can create two `ListArc`s.
+ unsafe {
+ (
+ Self::transmute_from_arc(arc1),
+ ListArc::transmute_from_arc(arc2),
+ )
+ }
+ }
+
+ /// Try to create a new `ListArc`.
+ ///
+ /// This fails if this value already has a `ListArc`.
+ pub fn try_from_arc(arc: Arc<T>) -> Result<Self, Arc<T>>
+ where
+ T: TryNewListArc<ID>,
+ {
+ if arc.try_new_list_arc() {
+ // SAFETY: The `try_new_list_arc` method returned true, so we made the tracking think
+ // that a `ListArc` exists. This lets us create a `ListArc`.
+ Ok(unsafe { Self::transmute_from_arc(arc) })
+ } else {
+ Err(arc)
+ }
+ }
+
+ /// Try to create a new `ListArc`.
+ ///
+ /// This fails if this value already has a `ListArc`.
+ pub fn try_from_arc_borrow(arc: ArcBorrow<'_, T>) -> Option<Self>
+ where
+ T: TryNewListArc<ID>,
+ {
+ if arc.try_new_list_arc() {
+ // SAFETY: The `try_new_list_arc` method returned true, so we made the tracking think
+ // that a `ListArc` exists. This lets us create a `ListArc`.
+ Some(unsafe { Self::transmute_from_arc(Arc::from(arc)) })
+ } else {
+ None
+ }
+ }
+
+ /// Try to create a new `ListArc`.
+ ///
+ /// If it's not possible to create a new `ListArc`, then the `Arc` is dropped. This will never
+ /// run the destructor of the value.
+ pub fn try_from_arc_or_drop(arc: Arc<T>) -> Option<Self>
+ where
+ T: TryNewListArc<ID>,
+ {
+ match Self::try_from_arc(arc) {
+ Ok(list_arc) => Some(list_arc),
+ Err(arc) => Arc::into_unique_or_drop(arc).map(Self::from),
+ }
+ }
+
+ /// Transmutes an [`Arc`] into a `ListArc` without updating the tracking inside `T`.
+ ///
+ /// # Safety
+ ///
+ /// * The value must not already have a `ListArc` reference.
+ /// * The tracking inside `T` must think that there is a `ListArc` reference.
+ #[inline]
+ unsafe fn transmute_from_arc(arc: Arc<T>) -> Self {
+ // INVARIANT: By the safety requirements, the invariants on `ListArc` are satisfied.
+ Self { arc }
+ }
+
+ /// Transmutes a `ListArc` into an [`Arc`] without updating the tracking inside `T`.
+ ///
+ /// After this call, the tracking inside `T` will still think that there is a `ListArc`
+ /// reference.
+ #[inline]
+ fn transmute_to_arc(self) -> Arc<T> {
+ // Use a transmute to skip destructor.
+ //
+ // SAFETY: ListArc is repr(transparent).
+ unsafe { core::mem::transmute(self) }
+ }
+
+ /// Convert ownership of this `ListArc` into a raw pointer.
+ ///
+ /// The returned pointer is indistinguishable from pointers returned by [`Arc::into_raw`]. The
+ /// tracking inside `T` will still think that a `ListArc` exists after this call.
+ #[inline]
+ pub fn into_raw(self) -> *const T {
+ Arc::into_raw(Self::transmute_to_arc(self))
+ }
+
+ /// Take ownership of the `ListArc` from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must satisfy the safety requirements of [`Arc::from_raw`].
+ /// * The value must not already have a `ListArc` reference.
+ /// * The tracking inside `T` must think that there is a `ListArc` reference.
+ #[inline]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // SAFETY: The pointer satisfies the safety requirements for `Arc::from_raw`.
+ let arc = unsafe { Arc::from_raw(ptr) };
+ // SAFETY: The value doesn't already have a `ListArc` reference, but the tracking thinks it
+ // does.
+ unsafe { Self::transmute_from_arc(arc) }
+ }
+
+ /// Converts the `ListArc` into an [`Arc`].
+ #[inline]
+ pub fn into_arc(self) -> Arc<T> {
+ let arc = Self::transmute_to_arc(self);
+ // SAFETY: There is no longer a `ListArc`, but the tracking thinks there is.
+ unsafe { T::on_drop_list_arc(&arc) };
+ arc
+ }
+
+ /// Clone a `ListArc` into an [`Arc`].
+ #[inline]
+ pub fn clone_arc(&self) -> Arc<T> {
+ self.arc.clone()
+ }
+
+ /// Returns a reference to an [`Arc`] from the given [`ListArc`].
+ ///
+ /// This is useful when the argument of a function call is an [`&Arc`] (e.g., in a method
+ /// receiver), but we have a [`ListArc`] instead.
+ ///
+ /// [`&Arc`]: Arc
+ #[inline]
+ pub fn as_arc(&self) -> &Arc<T> {
+ &self.arc
+ }
+
+ /// Returns an [`ArcBorrow`] from the given [`ListArc`].
+ ///
+ /// This is useful when the argument of a function call is an [`ArcBorrow`] (e.g., in a method
+ /// receiver), but we have an [`Arc`] instead. Getting an [`ArcBorrow`] is free when optimised.
+ #[inline]
+ pub fn as_arc_borrow(&self) -> ArcBorrow<'_, T> {
+ self.arc.as_arc_borrow()
+ }
+
+ /// Compare whether two [`ListArc`] pointers reference the same underlying object.
+ #[inline]
+ pub fn ptr_eq(this: &Self, other: &Self) -> bool {
+ Arc::ptr_eq(&this.arc, &other.arc)
+ }
+}
+
+impl<T, const ID: u64> Deref for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.arc.deref()
+ }
+}
+
+impl<T, const ID: u64> Drop for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: There is no longer a `ListArc`, but the tracking thinks there is by the type
+ // invariants on `Self`.
+ unsafe { T::on_drop_list_arc(&self.arc) };
+ }
+}
+
+impl<T, const ID: u64> AsRef<Arc<T>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ #[inline]
+ fn as_ref(&self) -> &Arc<T> {
+ self.as_arc()
+ }
+}
+
+// This is to allow coercion from `ListArc<T>` to `ListArc<U>` if `T` can be converted to the
+// dynamically-sized type (DST) `U`.
+#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))]
+impl<T, U, const ID: u64> core::ops::CoerceUnsized<ListArc<U, ID>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + core::marker::Unsize<U> + ?Sized,
+ U: ListArcSafe<ID> + ?Sized,
+{
+}
+
+// This is to allow `ListArc<U>` to be dispatched on when `ListArc<T>` can be coerced into
+// `ListArc<U>`.
+#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))]
+impl<T, U, const ID: u64> core::ops::DispatchFromDyn<ListArc<U, ID>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + core::marker::Unsize<U> + ?Sized,
+ U: ListArcSafe<ID> + ?Sized,
+{
+}
+
+/// A utility for tracking whether a [`ListArc`] exists using an atomic.
+///
+/// # Invariant
+///
+/// If the boolean is `false`, then there is no [`ListArc`] for this value.
+#[repr(transparent)]
+pub struct AtomicTracker<const ID: u64 = 0> {
+ inner: AtomicBool,
+ // This value needs to be pinned to justify the INVARIANT: comment in `AtomicTracker::new`.
+ _pin: PhantomPinned,
+}
+
+impl<const ID: u64> AtomicTracker<ID> {
+ /// Creates a new initializer for this type.
+ pub fn new() -> impl PinInit<Self> {
+ // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
+ // not be constructed in an `Arc` that already has a `ListArc`.
+ Self {
+ inner: AtomicBool::new(false),
+ _pin: PhantomPinned,
+ }
+ }
+
+ fn project_inner(self: Pin<&mut Self>) -> &mut AtomicBool {
+ // SAFETY: The `inner` field is not structurally pinned, so we may obtain a mutable
+ // reference to it even if we only have a pinned reference to `self`.
+ unsafe { &mut Pin::into_inner_unchecked(self).inner }
+ }
+}
+
+impl<const ID: u64> ListArcSafe<ID> for AtomicTracker<ID> {
+ unsafe fn on_create_list_arc_from_unique(self: Pin<&mut Self>) {
+ // INVARIANT: We just created a ListArc, so the boolean should be true.
+ *self.project_inner().get_mut() = true;
+ }
+
+ unsafe fn on_drop_list_arc(&self) {
+ // INVARIANT: We just dropped a ListArc, so the boolean should be false.
+ self.inner.store(false, Ordering::Release);
+ }
+}
+
+// SAFETY: If this method returns `true`, then by the type invariant there is no `ListArc` before
+// this call, so it is okay to create a new `ListArc`.
+//
+// The acquire ordering will synchronize with the release store from the destruction of any
+// previous `ListArc`, so if there was a previous `ListArc`, then the destruction of the previous
+// `ListArc` happens-before the creation of the new `ListArc`.
+unsafe impl<const ID: u64> TryNewListArc<ID> for AtomicTracker<ID> {
+ fn try_new_list_arc(&self) -> bool {
+ // INVARIANT: If this method returns true, then the boolean used to be false, and is no
+ // longer false, so it is okay for the caller to create a new [`ListArc`].
+ self.inner
+ .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ }
+}
diff --git a/rust/kernel/list/arc_field.rs b/rust/kernel/list/arc_field.rs
new file mode 100644
index 000000000000..c4b9dd503982
--- /dev/null
+++ b/rust/kernel/list/arc_field.rs
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! A field that is exclusively owned by a [`ListArc`].
+//!
+//! This can be used to have reference counted struct where one of the reference counted pointers
+//! has exclusive access to a field of the struct.
+//!
+//! [`ListArc`]: crate::list::ListArc
+
+use core::cell::UnsafeCell;
+
+/// A field owned by a specific [`ListArc`].
+///
+/// [`ListArc`]: crate::list::ListArc
+pub struct ListArcField<T, const ID: u64 = 0> {
+ value: UnsafeCell<T>,
+}
+
+// SAFETY: If the inner type is thread-safe, then it's also okay for `ListArc` to be thread-safe.
+unsafe impl<T: Send + Sync, const ID: u64> Send for ListArcField<T, ID> {}
+// SAFETY: If the inner type is thread-safe, then it's also okay for `ListArc` to be thread-safe.
+unsafe impl<T: Send + Sync, const ID: u64> Sync for ListArcField<T, ID> {}
+
+impl<T, const ID: u64> ListArcField<T, ID> {
+ /// Creates a new `ListArcField`.
+ pub fn new(value: T) -> Self {
+ Self {
+ value: UnsafeCell::new(value),
+ }
+ }
+
+ /// Access the value when we have exclusive access to the `ListArcField`.
+ ///
+ /// This allows access to the field using an `UniqueArc` instead of a `ListArc`.
+ pub fn get_mut(&mut self) -> &mut T {
+ self.value.get_mut()
+ }
+
+ /// Unsafely assert that you have shared access to the `ListArc` for this field.
+ ///
+ /// # Safety
+ ///
+ /// The caller must have shared access to the `ListArc<ID>` containing the struct with this
+ /// field for the duration of the returned reference.
+ pub unsafe fn assert_ref(&self) -> &T {
+ // SAFETY: The caller has shared access to the `ListArc`, so they also have shared access
+ // to this field.
+ unsafe { &*self.value.get() }
+ }
+
+ /// Unsafely assert that you have mutable access to the `ListArc` for this field.
+ ///
+ /// # Safety
+ ///
+ /// The caller must have mutable access to the `ListArc<ID>` containing the struct with this
+ /// field for the duration of the returned reference.
+ #[expect(clippy::mut_from_ref)]
+ pub unsafe fn assert_mut(&self) -> &mut T {
+ // SAFETY: The caller has exclusive access to the `ListArc`, so they also have exclusive
+ // access to this field.
+ unsafe { &mut *self.value.get() }
+ }
+}
+
+/// Defines getters for a [`ListArcField`].
+#[macro_export]
+macro_rules! define_list_arc_field_getter {
+ ($pub:vis fn $name:ident(&self $(<$id:tt>)?) -> &$typ:ty { $field:ident }
+ $($rest:tt)*
+ ) => {
+ $pub fn $name<'a>(self: &'a $crate::list::ListArc<Self $(, $id)?>) -> &'a $typ {
+ let field = &(&**self).$field;
+ // SAFETY: We have a shared reference to the `ListArc`.
+ unsafe { $crate::list::ListArcField::<$typ $(, $id)?>::assert_ref(field) }
+ }
+
+ $crate::list::define_list_arc_field_getter!($($rest)*);
+ };
+
+ ($pub:vis fn $name:ident(&mut self $(<$id:tt>)?) -> &mut $typ:ty { $field:ident }
+ $($rest:tt)*
+ ) => {
+ $pub fn $name<'a>(self: &'a mut $crate::list::ListArc<Self $(, $id)?>) -> &'a mut $typ {
+ let field = &(&**self).$field;
+ // SAFETY: We have a mutable reference to the `ListArc`.
+ unsafe { $crate::list::ListArcField::<$typ $(, $id)?>::assert_mut(field) }
+ }
+
+ $crate::list::define_list_arc_field_getter!($($rest)*);
+ };
+
+ () => {};
+}
+pub use define_list_arc_field_getter;
diff --git a/rust/kernel/list/impl_list_item_mod.rs b/rust/kernel/list/impl_list_item_mod.rs
new file mode 100644
index 000000000000..a0438537cee1
--- /dev/null
+++ b/rust/kernel/list/impl_list_item_mod.rs
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Helpers for implementing list traits safely.
+
+use crate::list::ListLinks;
+
+/// Declares that this type has a `ListLinks<ID>` field at a fixed offset.
+///
+/// This trait is only used to help implement `ListItem` safely. If `ListItem` is implemented
+/// manually, then this trait is not needed. Use the [`impl_has_list_links!`] macro to implement
+/// this trait.
+///
+/// # Safety
+///
+/// All values of this type must have a `ListLinks<ID>` field at the given offset.
+///
+/// The behavior of `raw_get_list_links` must not be changed.
+pub unsafe trait HasListLinks<const ID: u64 = 0> {
+ /// The offset of the `ListLinks` field.
+ const OFFSET: usize;
+
+ /// Returns a pointer to the [`ListLinks<T, ID>`] field.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must point at a valid struct of type `Self`.
+ ///
+ /// [`ListLinks<T, ID>`]: ListLinks
+ // We don't really need this method, but it's necessary for the implementation of
+ // `impl_has_list_links!` to be correct.
+ #[inline]
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut ListLinks<ID> {
+ // SAFETY: The caller promises that the pointer is valid. The implementer promises that the
+ // `OFFSET` constant is correct.
+ unsafe { (ptr as *mut u8).add(Self::OFFSET) as *mut ListLinks<ID> }
+ }
+}
+
+/// Implements the [`HasListLinks`] trait for the given type.
+#[macro_export]
+macro_rules! impl_has_list_links {
+ ($(impl$(<$($implarg:ident),*>)?
+ HasListLinks$(<$id:tt>)?
+ for $self:ident $(<$($selfarg:ty),*>)?
+ { self$(.$field:ident)* }
+ )*) => {$(
+ // SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the
+ // right type.
+ //
+ // The behavior of `raw_get_list_links` is not changed since the `addr_of_mut!` macro is
+ // equivalent to the pointer offset operation in the trait definition.
+ unsafe impl$(<$($implarg),*>)? $crate::list::HasListLinks$(<$id>)? for
+ $self $(<$($selfarg),*>)?
+ {
+ const OFFSET: usize = ::core::mem::offset_of!(Self, $($field).*) as usize;
+
+ #[inline]
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? {
+ // SAFETY: The caller promises that the pointer is not dangling. We know that this
+ // expression doesn't follow any pointers, as the `offset_of!` invocation above
+ // would otherwise not compile.
+ unsafe { ::core::ptr::addr_of_mut!((*ptr)$(.$field)*) }
+ }
+ }
+ )*};
+}
+pub use impl_has_list_links;
+
+/// Declares that the `ListLinks<ID>` field in this struct is inside a `ListLinksSelfPtr<T, ID>`.
+///
+/// # Safety
+///
+/// The `ListLinks<ID>` field of this struct at the offset `HasListLinks<ID>::OFFSET` must be
+/// inside a `ListLinksSelfPtr<T, ID>`.
+pub unsafe trait HasSelfPtr<T: ?Sized, const ID: u64 = 0>
+where
+ Self: HasListLinks<ID>,
+{
+}
+
+/// Implements the [`HasListLinks`] and [`HasSelfPtr`] traits for the given type.
+#[macro_export]
+macro_rules! impl_has_list_links_self_ptr {
+ ($(impl$({$($implarg:tt)*})?
+ HasSelfPtr<$item_type:ty $(, $id:tt)?>
+ for $self:ident $(<$($selfarg:ty),*>)?
+ { self.$field:ident }
+ )*) => {$(
+ // SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the
+ // right type.
+ unsafe impl$(<$($implarg)*>)? $crate::list::HasSelfPtr<$item_type $(, $id)?> for
+ $self $(<$($selfarg),*>)?
+ {}
+
+ unsafe impl$(<$($implarg)*>)? $crate::list::HasListLinks$(<$id>)? for
+ $self $(<$($selfarg),*>)?
+ {
+ const OFFSET: usize = ::core::mem::offset_of!(Self, $field) as usize;
+
+ #[inline]
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? {
+ // SAFETY: The caller promises that the pointer is not dangling.
+ let ptr: *mut $crate::list::ListLinksSelfPtr<$item_type $(, $id)?> =
+ unsafe { ::core::ptr::addr_of_mut!((*ptr).$field) };
+ ptr.cast()
+ }
+ }
+ )*};
+}
+pub use impl_has_list_links_self_ptr;
+
+/// Implements the [`ListItem`] trait for the given type.
+///
+/// Requires that the type implements [`HasListLinks`]. Use the [`impl_has_list_links!`] macro to
+/// implement that trait.
+///
+/// [`ListItem`]: crate::list::ListItem
+#[macro_export]
+macro_rules! impl_list_item {
+ (
+ $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty {
+ using ListLinks;
+ })*
+ ) => {$(
+ // SAFETY: See GUARANTEES comment on each method.
+ unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t {
+ // GUARANTEES:
+ // * This returns the same pointer as `prepare_to_insert` because `prepare_to_insert`
+ // is implemented in terms of `view_links`.
+ // * By the type invariants of `ListLinks`, the `ListLinks` has two null pointers when
+ // this value is not in a list.
+ unsafe fn view_links(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller guarantees that `me` points at a valid value of type `Self`.
+ unsafe {
+ <Self as $crate::list::HasListLinks<$num>>::raw_get_list_links(me.cast_mut())
+ }
+ }
+
+ // GUARANTEES:
+ // * `me` originates from the most recent call to `prepare_to_insert`, which just added
+ // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts
+ // `offset` from `me` so it returns the pointer originally passed to
+ // `prepare_to_insert`.
+ // * The pointer remains valid until the next call to `post_remove` because the caller
+ // of the most recent call to `prepare_to_insert` promised to retain ownership of the
+ // `ListArc` containing `Self` until the next call to `post_remove`. The value cannot
+ // be destroyed while a `ListArc` reference exists.
+ unsafe fn view_value(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ let offset = <Self as $crate::list::HasListLinks<$num>>::OFFSET;
+ // SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it
+ // points at the field at offset `offset` in a value of type `Self`. Thus,
+ // subtracting `offset` from `me` is still in-bounds of the allocation.
+ unsafe { (me as *const u8).sub(offset) as *const Self }
+ }
+
+ // GUARANTEES:
+ // This implementation of `ListItem` will not give out exclusive access to the same
+ // `ListLinks` several times because calls to `prepare_to_insert` and `post_remove`
+ // must alternate and exclusive access is given up when `post_remove` is called.
+ //
+ // Other invocations of `impl_list_item!` also cannot give out exclusive access to the
+ // same `ListLinks` because you can only implement `ListItem` once for each value of
+ // `ID`, and the `ListLinks` fields only work with the specified `ID`.
+ unsafe fn prepare_to_insert(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller promises that `me` points at a valid value.
+ unsafe { <Self as $crate::list::ListItem<$num>>::view_links(me) }
+ }
+
+ // GUARANTEES:
+ // * `me` originates from the most recent call to `prepare_to_insert`, which just added
+ // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts
+ // `offset` from `me` so it returns the pointer originally passed to
+ // `prepare_to_insert`.
+ unsafe fn post_remove(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ let offset = <Self as $crate::list::HasListLinks<$num>>::OFFSET;
+ // SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it
+ // points at the field at offset `offset` in a value of type `Self`. Thus,
+ // subtracting `offset` from `me` is still in-bounds of the allocation.
+ unsafe { (me as *const u8).sub(offset) as *const Self }
+ }
+ }
+ )*};
+
+ (
+ $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty {
+ using ListLinksSelfPtr;
+ })*
+ ) => {$(
+ // SAFETY: See GUARANTEES comment on each method.
+ unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t {
+ // GUARANTEES:
+ // This implementation of `ListItem` will not give out exclusive access to the same
+ // `ListLinks` several times because calls to `prepare_to_insert` and `post_remove`
+ // must alternate and exclusive access is given up when `post_remove` is called.
+ //
+ // Other invocations of `impl_list_item!` also cannot give out exclusive access to the
+ // same `ListLinks` because you can only implement `ListItem` once for each value of
+ // `ID`, and the `ListLinks` fields only work with the specified `ID`.
+ unsafe fn prepare_to_insert(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller promises that `me` points at a valid value of type `Self`.
+ let links_field = unsafe { <Self as $crate::list::ListItem<$num>>::view_links(me) };
+
+ let spoff = $crate::list::ListLinksSelfPtr::<Self, $num>::LIST_LINKS_SELF_PTR_OFFSET;
+ // Goes via the offset as the field is private.
+ //
+ // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so
+ // the pointer stays in bounds of the allocation.
+ let self_ptr = unsafe { (links_field as *const u8).add(spoff) }
+ as *const $crate::types::Opaque<*const Self>;
+ let cell_inner = $crate::types::Opaque::raw_get(self_ptr);
+
+ // SAFETY: This value is not accessed in any other places than `prepare_to_insert`,
+ // `post_remove`, or `view_value`. By the safety requirements of those methods,
+ // none of these three methods may be called in parallel with this call to
+ // `prepare_to_insert`, so this write will not race with any other access to the
+ // value.
+ unsafe { ::core::ptr::write(cell_inner, me) };
+
+ links_field
+ }
+
+ // GUARANTEES:
+ // * This returns the same pointer as `prepare_to_insert` because `prepare_to_insert`
+ // returns the return value of `view_links`.
+ // * By the type invariants of `ListLinks`, the `ListLinks` has two null pointers when
+ // this value is not in a list.
+ unsafe fn view_links(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller promises that `me` points at a valid value of type `Self`.
+ unsafe { <Self as HasListLinks<$num>>::raw_get_list_links(me.cast_mut()) }
+ }
+
+ // This function is also used as the implementation of `post_remove`, so the caller
+ // may choose to satisfy the safety requirements of `post_remove` instead of the safety
+ // requirements for `view_value`.
+ //
+ // GUARANTEES: (always)
+ // * This returns the same pointer as the one passed to the most recent call to
+ // `prepare_to_insert` since that call wrote that pointer to this location. The value
+ // is only modified in `prepare_to_insert`, so it has not been modified since the
+ // most recent call.
+ //
+ // GUARANTEES: (only when using the `view_value` safety requirements)
+ // * The pointer remains valid until the next call to `post_remove` because the caller
+ // of the most recent call to `prepare_to_insert` promised to retain ownership of the
+ // `ListArc` containing `Self` until the next call to `post_remove`. The value cannot
+ // be destroyed while a `ListArc` reference exists.
+ unsafe fn view_value(links_field: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ let spoff = $crate::list::ListLinksSelfPtr::<Self, $num>::LIST_LINKS_SELF_PTR_OFFSET;
+ // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so
+ // the pointer stays in bounds of the allocation.
+ let self_ptr = unsafe { (links_field as *const u8).add(spoff) }
+ as *const ::core::cell::UnsafeCell<*const Self>;
+ let cell_inner = ::core::cell::UnsafeCell::raw_get(self_ptr);
+ // SAFETY: This is not a data race, because the only function that writes to this
+ // value is `prepare_to_insert`, but by the safety requirements the
+ // `prepare_to_insert` method may not be called in parallel with `view_value` or
+ // `post_remove`.
+ unsafe { ::core::ptr::read(cell_inner) }
+ }
+
+ // GUARANTEES:
+ // The first guarantee of `view_value` is exactly what `post_remove` guarantees.
+ unsafe fn post_remove(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ // SAFETY: This specific implementation of `view_value` allows the caller to
+ // promise the safety requirements of `post_remove` instead of the safety
+ // requirements for `view_value`.
+ unsafe { <Self as $crate::list::ListItem<$num>>::view_value(me) }
+ }
+ }
+ )*};
+}
+pub use impl_list_item;
diff --git a/rust/kernel/miscdevice.rs b/rust/kernel/miscdevice.rs
new file mode 100644
index 000000000000..9d9771247c38
--- /dev/null
+++ b/rust/kernel/miscdevice.rs
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Miscdevice support.
+//!
+//! C headers: [`include/linux/miscdevice.h`](srctree/include/linux/miscdevice.h).
+//!
+//! Reference: <https://www.kernel.org/doc/html/latest/driver-api/misc_devices.html>
+
+use crate::{
+ bindings,
+ device::Device,
+ error::{to_result, Error, Result, VTABLE_DEFAULT_ERROR},
+ ffi::{c_int, c_long, c_uint, c_ulong},
+ fs::File,
+ mm::virt::VmaNew,
+ prelude::*,
+ seq_file::SeqFile,
+ str::CStr,
+ types::{ForeignOwnable, Opaque},
+};
+use core::{marker::PhantomData, mem::MaybeUninit, pin::Pin};
+
+/// Options for creating a misc device.
+#[derive(Copy, Clone)]
+pub struct MiscDeviceOptions {
+ /// The name of the miscdevice.
+ pub name: &'static CStr,
+}
+
+impl MiscDeviceOptions {
+ /// Create a raw `struct miscdev` ready for registration.
+ pub const fn into_raw<T: MiscDevice>(self) -> bindings::miscdevice {
+ // SAFETY: All zeros is valid for this C type.
+ let mut result: bindings::miscdevice = unsafe { MaybeUninit::zeroed().assume_init() };
+ result.minor = bindings::MISC_DYNAMIC_MINOR as _;
+ result.name = self.name.as_char_ptr();
+ result.fops = MiscdeviceVTable::<T>::build();
+ result
+ }
+}
+
+/// A registration of a miscdevice.
+///
+/// # Invariants
+///
+/// `inner` is a registered misc device.
+#[repr(transparent)]
+#[pin_data(PinnedDrop)]
+pub struct MiscDeviceRegistration<T> {
+ #[pin]
+ inner: Opaque<bindings::miscdevice>,
+ _t: PhantomData<T>,
+}
+
+// SAFETY: It is allowed to call `misc_deregister` on a different thread from where you called
+// `misc_register`.
+unsafe impl<T> Send for MiscDeviceRegistration<T> {}
+// SAFETY: All `&self` methods on this type are written to ensure that it is safe to call them in
+// parallel.
+unsafe impl<T> Sync for MiscDeviceRegistration<T> {}
+
+impl<T: MiscDevice> MiscDeviceRegistration<T> {
+ /// Register a misc device.
+ pub fn register(opts: MiscDeviceOptions) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ inner <- Opaque::try_ffi_init(move |slot: *mut bindings::miscdevice| {
+ // SAFETY: The initializer can write to the provided `slot`.
+ unsafe { slot.write(opts.into_raw::<T>()) };
+
+ // SAFETY: We just wrote the misc device options to the slot. The miscdevice will
+ // get unregistered before `slot` is deallocated because the memory is pinned and
+ // the destructor of this type deallocates the memory.
+ // INVARIANT: If this returns `Ok(())`, then the `slot` will contain a registered
+ // misc device.
+ to_result(unsafe { bindings::misc_register(slot) })
+ }),
+ _t: PhantomData,
+ })
+ }
+
+ /// Returns a raw pointer to the misc device.
+ pub fn as_raw(&self) -> *mut bindings::miscdevice {
+ self.inner.get()
+ }
+
+ /// Access the `this_device` field.
+ pub fn device(&self) -> &Device {
+ // SAFETY: This can only be called after a successful register(), which always
+ // initialises `this_device` with a valid device. Furthermore, the signature of this
+ // function tells the borrow-checker that the `&Device` reference must not outlive the
+ // `&MiscDeviceRegistration<T>` used to obtain it, so the last use of the reference must be
+ // before the underlying `struct miscdevice` is destroyed.
+ unsafe { Device::as_ref((*self.as_raw()).this_device) }
+ }
+}
+
+#[pinned_drop]
+impl<T> PinnedDrop for MiscDeviceRegistration<T> {
+ fn drop(self: Pin<&mut Self>) {
+ // SAFETY: We know that the device is registered by the type invariants.
+ unsafe { bindings::misc_deregister(self.inner.get()) };
+ }
+}
+
+/// Trait implemented by the private data of an open misc device.
+#[vtable]
+pub trait MiscDevice: Sized {
+ /// What kind of pointer should `Self` be wrapped in.
+ type Ptr: ForeignOwnable + Send + Sync;
+
+ /// Called when the misc device is opened.
+ ///
+ /// The returned pointer will be stored as the private data for the file.
+ fn open(_file: &File, _misc: &MiscDeviceRegistration<Self>) -> Result<Self::Ptr>;
+
+ /// Called when the misc device is released.
+ fn release(device: Self::Ptr, _file: &File) {
+ drop(device);
+ }
+
+ /// Handle for mmap.
+ ///
+ /// This function is invoked when a user space process invokes the `mmap` system call on
+ /// `file`. The function is a callback that is part of the VMA initializer. The kernel will do
+ /// initial setup of the VMA before calling this function. The function can then interact with
+ /// the VMA initialization by calling methods of `vma`. If the function does not return an
+ /// error, the kernel will complete initialization of the VMA according to the properties of
+ /// `vma`.
+ fn mmap(
+ _device: <Self::Ptr as ForeignOwnable>::Borrowed<'_>,
+ _file: &File,
+ _vma: &VmaNew,
+ ) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Handler for ioctls.
+ ///
+ /// The `cmd` argument is usually manipulated using the utilties in [`kernel::ioctl`].
+ ///
+ /// [`kernel::ioctl`]: mod@crate::ioctl
+ fn ioctl(
+ _device: <Self::Ptr as ForeignOwnable>::Borrowed<'_>,
+ _file: &File,
+ _cmd: u32,
+ _arg: usize,
+ ) -> Result<isize> {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Handler for ioctls.
+ ///
+ /// Used for 32-bit userspace on 64-bit platforms.
+ ///
+ /// This method is optional and only needs to be provided if the ioctl relies on structures
+ /// that have different layout on 32-bit and 64-bit userspace. If no implementation is
+ /// provided, then `compat_ptr_ioctl` will be used instead.
+ #[cfg(CONFIG_COMPAT)]
+ fn compat_ioctl(
+ _device: <Self::Ptr as ForeignOwnable>::Borrowed<'_>,
+ _file: &File,
+ _cmd: u32,
+ _arg: usize,
+ ) -> Result<isize> {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Show info for this fd.
+ fn show_fdinfo(
+ _device: <Self::Ptr as ForeignOwnable>::Borrowed<'_>,
+ _m: &SeqFile,
+ _file: &File,
+ ) {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+}
+
+/// A vtable for the file operations of a Rust miscdevice.
+struct MiscdeviceVTable<T: MiscDevice>(PhantomData<T>);
+
+impl<T: MiscDevice> MiscdeviceVTable<T> {
+ /// # Safety
+ ///
+ /// `file` and `inode` must be the file and inode for a file that is undergoing initialization.
+ /// The file must be associated with a `MiscDeviceRegistration<T>`.
+ unsafe extern "C" fn open(inode: *mut bindings::inode, raw_file: *mut bindings::file) -> c_int {
+ // SAFETY: The pointers are valid and for a file being opened.
+ let ret = unsafe { bindings::generic_file_open(inode, raw_file) };
+ if ret != 0 {
+ return ret;
+ }
+
+ // SAFETY: The open call of a file can access the private data.
+ let misc_ptr = unsafe { (*raw_file).private_data };
+
+ // SAFETY: This is a miscdevice, so `misc_open()` set the private data to a pointer to the
+ // associated `struct miscdevice` before calling into this method. Furthermore,
+ // `misc_open()` ensures that the miscdevice can't be unregistered and freed during this
+ // call to `fops_open`.
+ let misc = unsafe { &*misc_ptr.cast::<MiscDeviceRegistration<T>>() };
+
+ // SAFETY:
+ // * This underlying file is valid for (much longer than) the duration of `T::open`.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(raw_file) };
+
+ let ptr = match T::open(file, misc) {
+ Ok(ptr) => ptr,
+ Err(err) => return err.to_errno(),
+ };
+
+ // This overwrites the private data with the value specified by the user, changing the type
+ // of this file's private data. All future accesses to the private data is performed by
+ // other fops_* methods in this file, which all correctly cast the private data to the new
+ // type.
+ //
+ // SAFETY: The open call of a file can access the private data.
+ unsafe { (*raw_file).private_data = ptr.into_foreign() };
+
+ 0
+ }
+
+ /// # Safety
+ ///
+ /// `file` and `inode` must be the file and inode for a file that is being released. The file
+ /// must be associated with a `MiscDeviceRegistration<T>`.
+ unsafe extern "C" fn release(_inode: *mut bindings::inode, file: *mut bindings::file) -> c_int {
+ // SAFETY: The release call of a file owns the private data.
+ let private = unsafe { (*file).private_data };
+ // SAFETY: The release call of a file owns the private data.
+ let ptr = unsafe { <T::Ptr as ForeignOwnable>::from_foreign(private) };
+
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ T::release(ptr, unsafe { File::from_raw_file(file) });
+
+ 0
+ }
+
+ /// # Safety
+ ///
+ /// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
+ /// `vma` must be a vma that is currently being mmap'ed with this file.
+ unsafe extern "C" fn mmap(
+ file: *mut bindings::file,
+ vma: *mut bindings::vm_area_struct,
+ ) -> c_int {
+ // SAFETY: The mmap call of a file can access the private data.
+ let private = unsafe { (*file).private_data };
+ // SAFETY: This is a Rust Miscdevice, so we call `into_foreign` in `open` and
+ // `from_foreign` in `release`, and `fops_mmap` is guaranteed to be called between those
+ // two operations.
+ let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+ // SAFETY: The caller provides a vma that is undergoing initial VMA setup.
+ let area = unsafe { VmaNew::from_raw(vma) };
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(file) };
+
+ match T::mmap(device, file, area) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+ }
+
+ /// # Safety
+ ///
+ /// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
+ unsafe extern "C" fn ioctl(file: *mut bindings::file, cmd: c_uint, arg: c_ulong) -> c_long {
+ // SAFETY: The ioctl call of a file can access the private data.
+ let private = unsafe { (*file).private_data };
+ // SAFETY: Ioctl calls can borrow the private data of the file.
+ let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(file) };
+
+ match T::ioctl(device, file, cmd, arg) {
+ Ok(ret) => ret as c_long,
+ Err(err) => err.to_errno() as c_long,
+ }
+ }
+
+ /// # Safety
+ ///
+ /// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
+ #[cfg(CONFIG_COMPAT)]
+ unsafe extern "C" fn compat_ioctl(
+ file: *mut bindings::file,
+ cmd: c_uint,
+ arg: c_ulong,
+ ) -> c_long {
+ // SAFETY: The compat ioctl call of a file can access the private data.
+ let private = unsafe { (*file).private_data };
+ // SAFETY: Ioctl calls can borrow the private data of the file.
+ let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(file) };
+
+ match T::compat_ioctl(device, file, cmd, arg) {
+ Ok(ret) => ret as c_long,
+ Err(err) => err.to_errno() as c_long,
+ }
+ }
+
+ /// # Safety
+ ///
+ /// - `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
+ /// - `seq_file` must be a valid `struct seq_file` that we can write to.
+ unsafe extern "C" fn show_fdinfo(seq_file: *mut bindings::seq_file, file: *mut bindings::file) {
+ // SAFETY: The release call of a file owns the private data.
+ let private = unsafe { (*file).private_data };
+ // SAFETY: Ioctl calls can borrow the private data of the file.
+ let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(file) };
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in
+ // which this method is called.
+ let m = unsafe { SeqFile::from_raw(seq_file) };
+
+ T::show_fdinfo(device, m, file);
+ }
+
+ const VTABLE: bindings::file_operations = bindings::file_operations {
+ open: Some(Self::open),
+ release: Some(Self::release),
+ mmap: if T::HAS_MMAP { Some(Self::mmap) } else { None },
+ unlocked_ioctl: if T::HAS_IOCTL {
+ Some(Self::ioctl)
+ } else {
+ None
+ },
+ #[cfg(CONFIG_COMPAT)]
+ compat_ioctl: if T::HAS_COMPAT_IOCTL {
+ Some(Self::compat_ioctl)
+ } else if T::HAS_IOCTL {
+ Some(bindings::compat_ptr_ioctl)
+ } else {
+ None
+ },
+ show_fdinfo: if T::HAS_SHOW_FDINFO {
+ Some(Self::show_fdinfo)
+ } else {
+ None
+ },
+ // SAFETY: All zeros is a valid value for `bindings::file_operations`.
+ ..unsafe { MaybeUninit::zeroed().assume_init() }
+ };
+
+ const fn build() -> &'static bindings::file_operations {
+ &Self::VTABLE
+ }
+}
diff --git a/rust/kernel/mm.rs b/rust/kernel/mm.rs
new file mode 100644
index 000000000000..615907a0f3b4
--- /dev/null
+++ b/rust/kernel/mm.rs
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Memory management.
+//!
+//! This module deals with managing the address space of userspace processes. Each process has an
+//! instance of [`Mm`], which keeps track of multiple VMAs (virtual memory areas). Each VMA
+//! corresponds to a region of memory that the userspace process can access, and the VMA lets you
+//! control what happens when userspace reads or writes to that region of memory.
+//!
+//! C header: [`include/linux/mm.h`](srctree/include/linux/mm.h)
+#![cfg(CONFIG_MMU)]
+
+use crate::{
+ bindings,
+ types::{ARef, AlwaysRefCounted, NotThreadSafe, Opaque},
+};
+use core::{ops::Deref, ptr::NonNull};
+
+pub mod virt;
+use virt::VmaRef;
+
+/// A wrapper for the kernel's `struct mm_struct`.
+///
+/// This represents the address space of a userspace process, so each process has one `Mm`
+/// instance. It may hold many VMAs internally.
+///
+/// There is a counter called `mm_users` that counts the users of the address space; this includes
+/// the userspace process itself, but can also include kernel threads accessing the address space.
+/// Once `mm_users` reaches zero, this indicates that the address space can be destroyed. To access
+/// the address space, you must prevent `mm_users` from reaching zero while you are accessing it.
+/// The [`MmWithUser`] type represents an address space where this is guaranteed, and you can
+/// create one using [`mmget_not_zero`].
+///
+/// The `ARef<Mm>` smart pointer holds an `mmgrab` refcount. Its destructor may sleep.
+///
+/// # Invariants
+///
+/// Values of this type are always refcounted using `mmgrab`.
+///
+/// [`mmget_not_zero`]: Mm::mmget_not_zero
+#[repr(transparent)]
+pub struct Mm {
+ mm: Opaque<bindings::mm_struct>,
+}
+
+// SAFETY: It is safe to call `mmdrop` on another thread than where `mmgrab` was called.
+unsafe impl Send for Mm {}
+// SAFETY: All methods on `Mm` can be called in parallel from several threads.
+unsafe impl Sync for Mm {}
+
+// SAFETY: By the type invariants, this type is always refcounted.
+unsafe impl AlwaysRefCounted for Mm {
+ #[inline]
+ fn inc_ref(&self) {
+ // SAFETY: The pointer is valid since self is a reference.
+ unsafe { bindings::mmgrab(self.as_raw()) };
+ }
+
+ #[inline]
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The caller is giving up their refcount.
+ unsafe { bindings::mmdrop(obj.cast().as_ptr()) };
+ }
+}
+
+/// A wrapper for the kernel's `struct mm_struct`.
+///
+/// This type is like [`Mm`], but with non-zero `mm_users`. It can only be used when `mm_users` can
+/// be proven to be non-zero at compile-time, usually because the relevant code holds an `mmget`
+/// refcount. It can be used to access the associated address space.
+///
+/// The `ARef<MmWithUser>` smart pointer holds an `mmget` refcount. Its destructor may sleep.
+///
+/// # Invariants
+///
+/// Values of this type are always refcounted using `mmget`. The value of `mm_users` is non-zero.
+#[repr(transparent)]
+pub struct MmWithUser {
+ mm: Mm,
+}
+
+// SAFETY: It is safe to call `mmput` on another thread than where `mmget` was called.
+unsafe impl Send for MmWithUser {}
+// SAFETY: All methods on `MmWithUser` can be called in parallel from several threads.
+unsafe impl Sync for MmWithUser {}
+
+// SAFETY: By the type invariants, this type is always refcounted.
+unsafe impl AlwaysRefCounted for MmWithUser {
+ #[inline]
+ fn inc_ref(&self) {
+ // SAFETY: The pointer is valid since self is a reference.
+ unsafe { bindings::mmget(self.as_raw()) };
+ }
+
+ #[inline]
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The caller is giving up their refcount.
+ unsafe { bindings::mmput(obj.cast().as_ptr()) };
+ }
+}
+
+// Make all `Mm` methods available on `MmWithUser`.
+impl Deref for MmWithUser {
+ type Target = Mm;
+
+ #[inline]
+ fn deref(&self) -> &Mm {
+ &self.mm
+ }
+}
+
+/// A wrapper for the kernel's `struct mm_struct`.
+///
+/// This type is identical to `MmWithUser` except that it uses `mmput_async` when dropping a
+/// refcount. This means that the destructor of `ARef<MmWithUserAsync>` is safe to call in atomic
+/// context.
+///
+/// # Invariants
+///
+/// Values of this type are always refcounted using `mmget`. The value of `mm_users` is non-zero.
+#[repr(transparent)]
+pub struct MmWithUserAsync {
+ mm: MmWithUser,
+}
+
+// SAFETY: It is safe to call `mmput_async` on another thread than where `mmget` was called.
+unsafe impl Send for MmWithUserAsync {}
+// SAFETY: All methods on `MmWithUserAsync` can be called in parallel from several threads.
+unsafe impl Sync for MmWithUserAsync {}
+
+// SAFETY: By the type invariants, this type is always refcounted.
+unsafe impl AlwaysRefCounted for MmWithUserAsync {
+ #[inline]
+ fn inc_ref(&self) {
+ // SAFETY: The pointer is valid since self is a reference.
+ unsafe { bindings::mmget(self.as_raw()) };
+ }
+
+ #[inline]
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The caller is giving up their refcount.
+ unsafe { bindings::mmput_async(obj.cast().as_ptr()) };
+ }
+}
+
+// Make all `MmWithUser` methods available on `MmWithUserAsync`.
+impl Deref for MmWithUserAsync {
+ type Target = MmWithUser;
+
+ #[inline]
+ fn deref(&self) -> &MmWithUser {
+ &self.mm
+ }
+}
+
+// These methods are safe to call even if `mm_users` is zero.
+impl Mm {
+ /// Returns a raw pointer to the inner `mm_struct`.
+ #[inline]
+ pub fn as_raw(&self) -> *mut bindings::mm_struct {
+ self.mm.get()
+ }
+
+ /// Obtain a reference from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` points at an `mm_struct`, and that it is not deallocated
+ /// during the lifetime 'a.
+ #[inline]
+ pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a Mm {
+ // SAFETY: Caller promises that the pointer is valid for 'a. Layouts are compatible due to
+ // repr(transparent).
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Calls `mmget_not_zero` and returns a handle if it succeeds.
+ #[inline]
+ pub fn mmget_not_zero(&self) -> Option<ARef<MmWithUser>> {
+ // SAFETY: The pointer is valid since self is a reference.
+ let success = unsafe { bindings::mmget_not_zero(self.as_raw()) };
+
+ if success {
+ // SAFETY: We just created an `mmget` refcount.
+ Some(unsafe { ARef::from_raw(NonNull::new_unchecked(self.as_raw().cast())) })
+ } else {
+ None
+ }
+ }
+}
+
+// These methods require `mm_users` to be non-zero.
+impl MmWithUser {
+ /// Obtain a reference from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` points at an `mm_struct`, and that `mm_users` remains
+ /// non-zero for the duration of the lifetime 'a.
+ #[inline]
+ pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser {
+ // SAFETY: Caller promises that the pointer is valid for 'a. The layout is compatible due
+ // to repr(transparent).
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Use `mmput_async` when dropping this refcount.
+ #[inline]
+ pub fn into_mmput_async(me: ARef<MmWithUser>) -> ARef<MmWithUserAsync> {
+ // SAFETY: The layouts and invariants are compatible.
+ unsafe { ARef::from_raw(ARef::into_raw(me).cast()) }
+ }
+
+ /// Attempt to access a vma using the vma read lock.
+ ///
+ /// This is an optimistic trylock operation, so it may fail if there is contention. In that
+ /// case, you should fall back to taking the mmap read lock.
+ ///
+ /// When per-vma locks are disabled, this always returns `None`.
+ #[inline]
+ pub fn lock_vma_under_rcu(&self, vma_addr: usize) -> Option<VmaReadGuard<'_>> {
+ #[cfg(CONFIG_PER_VMA_LOCK)]
+ {
+ // SAFETY: Calling `bindings::lock_vma_under_rcu` is always okay given an mm where
+ // `mm_users` is non-zero.
+ let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr) };
+ if !vma.is_null() {
+ return Some(VmaReadGuard {
+ // SAFETY: If `lock_vma_under_rcu` returns a non-null ptr, then it points at a
+ // valid vma. The vma is stable for as long as the vma read lock is held.
+ vma: unsafe { VmaRef::from_raw(vma) },
+ _nts: NotThreadSafe,
+ });
+ }
+ }
+
+ // Silence warnings about unused variables.
+ #[cfg(not(CONFIG_PER_VMA_LOCK))]
+ let _ = vma_addr;
+
+ None
+ }
+
+ /// Lock the mmap read lock.
+ #[inline]
+ pub fn mmap_read_lock(&self) -> MmapReadGuard<'_> {
+ // SAFETY: The pointer is valid since self is a reference.
+ unsafe { bindings::mmap_read_lock(self.as_raw()) };
+
+ // INVARIANT: We just acquired the read lock.
+ MmapReadGuard {
+ mm: self,
+ _nts: NotThreadSafe,
+ }
+ }
+
+ /// Try to lock the mmap read lock.
+ #[inline]
+ pub fn mmap_read_trylock(&self) -> Option<MmapReadGuard<'_>> {
+ // SAFETY: The pointer is valid since self is a reference.
+ let success = unsafe { bindings::mmap_read_trylock(self.as_raw()) };
+
+ if success {
+ // INVARIANT: We just acquired the read lock.
+ Some(MmapReadGuard {
+ mm: self,
+ _nts: NotThreadSafe,
+ })
+ } else {
+ None
+ }
+ }
+}
+
+/// A guard for the mmap read lock.
+///
+/// # Invariants
+///
+/// This `MmapReadGuard` guard owns the mmap read lock.
+pub struct MmapReadGuard<'a> {
+ mm: &'a MmWithUser,
+ // `mmap_read_lock` and `mmap_read_unlock` must be called on the same thread
+ _nts: NotThreadSafe,
+}
+
+impl<'a> MmapReadGuard<'a> {
+ /// Look up a vma at the given address.
+ #[inline]
+ pub fn vma_lookup(&self, vma_addr: usize) -> Option<&virt::VmaRef> {
+ // SAFETY: By the type invariants we hold the mmap read guard, so we can safely call this
+ // method. Any value is okay for `vma_addr`.
+ let vma = unsafe { bindings::vma_lookup(self.mm.as_raw(), vma_addr) };
+
+ if vma.is_null() {
+ None
+ } else {
+ // SAFETY: We just checked that a vma was found, so the pointer references a valid vma.
+ //
+ // Furthermore, the returned vma is still under the protection of the read lock guard
+ // and can be used while the mmap read lock is still held. That the vma is not used
+ // after the MmapReadGuard gets dropped is enforced by the borrow-checker.
+ unsafe { Some(virt::VmaRef::from_raw(vma)) }
+ }
+ }
+}
+
+impl Drop for MmapReadGuard<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: We hold the read lock by the type invariants.
+ unsafe { bindings::mmap_read_unlock(self.mm.as_raw()) };
+ }
+}
+
+/// A guard for the vma read lock.
+///
+/// # Invariants
+///
+/// This `VmaReadGuard` guard owns the vma read lock.
+pub struct VmaReadGuard<'a> {
+ vma: &'a VmaRef,
+ // `vma_end_read` must be called on the same thread as where the lock was taken
+ _nts: NotThreadSafe,
+}
+
+// Make all `VmaRef` methods available on `VmaReadGuard`.
+impl Deref for VmaReadGuard<'_> {
+ type Target = VmaRef;
+
+ #[inline]
+ fn deref(&self) -> &VmaRef {
+ self.vma
+ }
+}
+
+impl Drop for VmaReadGuard<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: We hold the read lock by the type invariants.
+ unsafe { bindings::vma_end_read(self.vma.as_ptr()) };
+ }
+}
diff --git a/rust/kernel/mm/virt.rs b/rust/kernel/mm/virt.rs
new file mode 100644
index 000000000000..31803674aecc
--- /dev/null
+++ b/rust/kernel/mm/virt.rs
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Virtual memory.
+//!
+//! This module deals with managing a single VMA in the address space of a userspace process. Each
+//! VMA corresponds to a region of memory that the userspace process can access, and the VMA lets
+//! you control what happens when userspace reads or writes to that region of memory.
+//!
+//! The module has several different Rust types that all correspond to the C type called
+//! `vm_area_struct`. The different structs represent what kind of access you have to the VMA, e.g.
+//! [`VmaRef`] is used when you hold the mmap or vma read lock. Using the appropriate struct
+//! ensures that you can't, for example, accidentally call a function that requires holding the
+//! write lock when you only hold the read lock.
+
+use crate::{
+ bindings,
+ error::{code::EINVAL, to_result, Result},
+ mm::MmWithUser,
+ page::Page,
+ types::Opaque,
+};
+
+use core::ops::Deref;
+
+/// A wrapper for the kernel's `struct vm_area_struct` with read access.
+///
+/// It represents an area of virtual memory.
+///
+/// # Invariants
+///
+/// The caller must hold the mmap read lock or the vma read lock.
+#[repr(transparent)]
+pub struct VmaRef {
+ vma: Opaque<bindings::vm_area_struct>,
+}
+
+// Methods you can call when holding the mmap or vma read lock (or stronger). They must be usable
+// no matter what the vma flags are.
+impl VmaRef {
+ /// Access a virtual memory area given a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `vma` is valid for the duration of 'a, and that the mmap or vma
+ /// read lock (or stronger) is held for at least the duration of 'a.
+ #[inline]
+ pub unsafe fn from_raw<'a>(vma: *const bindings::vm_area_struct) -> &'a Self {
+ // SAFETY: The caller ensures that the invariants are satisfied for the duration of 'a.
+ unsafe { &*vma.cast() }
+ }
+
+ /// Returns a raw pointer to this area.
+ #[inline]
+ pub fn as_ptr(&self) -> *mut bindings::vm_area_struct {
+ self.vma.get()
+ }
+
+ /// Access the underlying `mm_struct`.
+ #[inline]
+ pub fn mm(&self) -> &MmWithUser {
+ // SAFETY: By the type invariants, this `vm_area_struct` is valid and we hold the mmap/vma
+ // read lock or stronger. This implies that the underlying mm has a non-zero value of
+ // `mm_users`.
+ unsafe { MmWithUser::from_raw((*self.as_ptr()).vm_mm) }
+ }
+
+ /// Returns the flags associated with the virtual memory area.
+ ///
+ /// The possible flags are a combination of the constants in [`flags`].
+ #[inline]
+ pub fn flags(&self) -> vm_flags_t {
+ // SAFETY: By the type invariants, the caller holds at least the mmap read lock, so this
+ // access is not a data race.
+ unsafe { (*self.as_ptr()).__bindgen_anon_2.vm_flags }
+ }
+
+ /// Returns the (inclusive) start address of the virtual memory area.
+ #[inline]
+ pub fn start(&self) -> usize {
+ // SAFETY: By the type invariants, the caller holds at least the mmap read lock, so this
+ // access is not a data race.
+ unsafe { (*self.as_ptr()).__bindgen_anon_1.__bindgen_anon_1.vm_start }
+ }
+
+ /// Returns the (exclusive) end address of the virtual memory area.
+ #[inline]
+ pub fn end(&self) -> usize {
+ // SAFETY: By the type invariants, the caller holds at least the mmap read lock, so this
+ // access is not a data race.
+ unsafe { (*self.as_ptr()).__bindgen_anon_1.__bindgen_anon_1.vm_end }
+ }
+
+ /// Zap pages in the given page range.
+ ///
+ /// This clears page table mappings for the range at the leaf level, leaving all other page
+ /// tables intact, and freeing any memory referenced by the VMA in this range. That is,
+ /// anonymous memory is completely freed, file-backed memory has its reference count on page
+ /// cache folio's dropped, any dirty data will still be written back to disk as usual.
+ ///
+ /// It may seem odd that we clear at the leaf level, this is however a product of the page
+ /// table structure used to map physical memory into a virtual address space - each virtual
+ /// address actually consists of a bitmap of array indices into page tables, which form a
+ /// hierarchical page table level structure.
+ ///
+ /// As a result, each page table level maps a multiple of page table levels below, and thus
+ /// span ever increasing ranges of pages. At the leaf or PTE level, we map the actual physical
+ /// memory.
+ ///
+ /// It is here where a zap operates, as it the only place we can be certain of clearing without
+ /// impacting any other virtual mappings. It is an implementation detail as to whether the
+ /// kernel goes further in freeing unused page tables, but for the purposes of this operation
+ /// we must only assume that the leaf level is cleared.
+ #[inline]
+ pub fn zap_page_range_single(&self, address: usize, size: usize) {
+ let (end, did_overflow) = address.overflowing_add(size);
+ if did_overflow || address < self.start() || self.end() < end {
+ // TODO: call WARN_ONCE once Rust version of it is added
+ return;
+ }
+
+ // SAFETY: By the type invariants, the caller has read access to this VMA, which is
+ // sufficient for this method call. This method has no requirements on the vma flags. The
+ // address range is checked to be within the vma.
+ unsafe {
+ bindings::zap_page_range_single(self.as_ptr(), address, size, core::ptr::null_mut())
+ };
+ }
+
+ /// If the [`VM_MIXEDMAP`] flag is set, returns a [`VmaMixedMap`] to this VMA, otherwise
+ /// returns `None`.
+ ///
+ /// This can be used to access methods that require [`VM_MIXEDMAP`] to be set.
+ ///
+ /// [`VM_MIXEDMAP`]: flags::MIXEDMAP
+ #[inline]
+ pub fn as_mixedmap_vma(&self) -> Option<&VmaMixedMap> {
+ if self.flags() & flags::MIXEDMAP != 0 {
+ // SAFETY: We just checked that `VM_MIXEDMAP` is set. All other requirements are
+ // satisfied by the type invariants of `VmaRef`.
+ Some(unsafe { VmaMixedMap::from_raw(self.as_ptr()) })
+ } else {
+ None
+ }
+ }
+}
+
+/// A wrapper for the kernel's `struct vm_area_struct` with read access and [`VM_MIXEDMAP`] set.
+///
+/// It represents an area of virtual memory.
+///
+/// This struct is identical to [`VmaRef`] except that it must only be used when the
+/// [`VM_MIXEDMAP`] flag is set on the vma.
+///
+/// # Invariants
+///
+/// The caller must hold the mmap read lock or the vma read lock. The `VM_MIXEDMAP` flag must be
+/// set.
+///
+/// [`VM_MIXEDMAP`]: flags::MIXEDMAP
+#[repr(transparent)]
+pub struct VmaMixedMap {
+ vma: VmaRef,
+}
+
+// Make all `VmaRef` methods available on `VmaMixedMap`.
+impl Deref for VmaMixedMap {
+ type Target = VmaRef;
+
+ #[inline]
+ fn deref(&self) -> &VmaRef {
+ &self.vma
+ }
+}
+
+impl VmaMixedMap {
+ /// Access a virtual memory area given a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `vma` is valid for the duration of 'a, and that the mmap read lock
+ /// (or stronger) is held for at least the duration of 'a. The `VM_MIXEDMAP` flag must be set.
+ #[inline]
+ pub unsafe fn from_raw<'a>(vma: *const bindings::vm_area_struct) -> &'a Self {
+ // SAFETY: The caller ensures that the invariants are satisfied for the duration of 'a.
+ unsafe { &*vma.cast() }
+ }
+
+ /// Maps a single page at the given address within the virtual memory area.
+ ///
+ /// This operation does not take ownership of the page.
+ #[inline]
+ pub fn vm_insert_page(&self, address: usize, page: &Page) -> Result {
+ // SAFETY: By the type invariant of `Self` caller has read access and has verified that
+ // `VM_MIXEDMAP` is set. By invariant on `Page` the page has order 0.
+ to_result(unsafe { bindings::vm_insert_page(self.as_ptr(), address, page.as_ptr()) })
+ }
+}
+
+/// A configuration object for setting up a VMA in an `f_ops->mmap()` hook.
+///
+/// The `f_ops->mmap()` hook is called when a new VMA is being created, and the hook is able to
+/// configure the VMA in various ways to fit the driver that owns it. Using `VmaNew` indicates that
+/// you are allowed to perform operations on the VMA that can only be performed before the VMA is
+/// fully initialized.
+///
+/// # Invariants
+///
+/// For the duration of 'a, the referenced vma must be undergoing initialization in an
+/// `f_ops->mmap()` hook.
+pub struct VmaNew {
+ vma: VmaRef,
+}
+
+// Make all `VmaRef` methods available on `VmaNew`.
+impl Deref for VmaNew {
+ type Target = VmaRef;
+
+ #[inline]
+ fn deref(&self) -> &VmaRef {
+ &self.vma
+ }
+}
+
+impl VmaNew {
+ /// Access a virtual memory area given a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `vma` is undergoing initial vma setup for the duration of 'a.
+ #[inline]
+ pub unsafe fn from_raw<'a>(vma: *mut bindings::vm_area_struct) -> &'a Self {
+ // SAFETY: The caller ensures that the invariants are satisfied for the duration of 'a.
+ unsafe { &*vma.cast() }
+ }
+
+ /// Internal method for updating the vma flags.
+ ///
+ /// # Safety
+ ///
+ /// This must not be used to set the flags to an invalid value.
+ #[inline]
+ unsafe fn update_flags(&self, set: vm_flags_t, unset: vm_flags_t) {
+ let mut flags = self.flags();
+ flags |= set;
+ flags &= !unset;
+
+ // SAFETY: This is not a data race: the vma is undergoing initial setup, so it's not yet
+ // shared. Additionally, `VmaNew` is `!Sync`, so it cannot be used to write in parallel.
+ // The caller promises that this does not set the flags to an invalid value.
+ unsafe { (*self.as_ptr()).__bindgen_anon_2.__vm_flags = flags };
+ }
+
+ /// Set the `VM_MIXEDMAP` flag on this vma.
+ ///
+ /// This enables the vma to contain both `struct page` and pure PFN pages. Returns a reference
+ /// that can be used to call `vm_insert_page` on the vma.
+ #[inline]
+ pub fn set_mixedmap(&self) -> &VmaMixedMap {
+ // SAFETY: We don't yet provide a way to set VM_PFNMAP, so this cannot put the flags in an
+ // invalid state.
+ unsafe { self.update_flags(flags::MIXEDMAP, 0) };
+
+ // SAFETY: We just set `VM_MIXEDMAP` on the vma.
+ unsafe { VmaMixedMap::from_raw(self.vma.as_ptr()) }
+ }
+
+ /// Set the `VM_IO` flag on this vma.
+ ///
+ /// This is used for memory mapped IO and similar. The flag tells other parts of the kernel to
+ /// avoid looking at the pages. For memory mapped IO this is useful as accesses to the pages
+ /// could have side effects.
+ #[inline]
+ pub fn set_io(&self) {
+ // SAFETY: Setting the VM_IO flag is always okay.
+ unsafe { self.update_flags(flags::IO, 0) };
+ }
+
+ /// Set the `VM_DONTEXPAND` flag on this vma.
+ ///
+ /// This prevents the vma from being expanded with `mremap()`.
+ #[inline]
+ pub fn set_dontexpand(&self) {
+ // SAFETY: Setting the VM_DONTEXPAND flag is always okay.
+ unsafe { self.update_flags(flags::DONTEXPAND, 0) };
+ }
+
+ /// Set the `VM_DONTCOPY` flag on this vma.
+ ///
+ /// This prevents the vma from being copied on fork. This option is only permanent if `VM_IO`
+ /// is set.
+ #[inline]
+ pub fn set_dontcopy(&self) {
+ // SAFETY: Setting the VM_DONTCOPY flag is always okay.
+ unsafe { self.update_flags(flags::DONTCOPY, 0) };
+ }
+
+ /// Set the `VM_DONTDUMP` flag on this vma.
+ ///
+ /// This prevents the vma from being included in core dumps. This option is only permanent if
+ /// `VM_IO` is set.
+ #[inline]
+ pub fn set_dontdump(&self) {
+ // SAFETY: Setting the VM_DONTDUMP flag is always okay.
+ unsafe { self.update_flags(flags::DONTDUMP, 0) };
+ }
+
+ /// Returns whether `VM_READ` is set.
+ ///
+ /// This flag indicates whether userspace is mapping this vma as readable.
+ #[inline]
+ pub fn readable(&self) -> bool {
+ (self.flags() & flags::READ) != 0
+ }
+
+ /// Try to clear the `VM_MAYREAD` flag, failing if `VM_READ` is set.
+ ///
+ /// This flag indicates whether userspace is allowed to make this vma readable with
+ /// `mprotect()`.
+ ///
+ /// Note that this operation is irreversible. Once `VM_MAYREAD` has been cleared, it can never
+ /// be set again.
+ #[inline]
+ pub fn try_clear_mayread(&self) -> Result {
+ if self.readable() {
+ return Err(EINVAL);
+ }
+ // SAFETY: Clearing `VM_MAYREAD` is okay when `VM_READ` is not set.
+ unsafe { self.update_flags(0, flags::MAYREAD) };
+ Ok(())
+ }
+
+ /// Returns whether `VM_WRITE` is set.
+ ///
+ /// This flag indicates whether userspace is mapping this vma as writable.
+ #[inline]
+ pub fn writable(&self) -> bool {
+ (self.flags() & flags::WRITE) != 0
+ }
+
+ /// Try to clear the `VM_MAYWRITE` flag, failing if `VM_WRITE` is set.
+ ///
+ /// This flag indicates whether userspace is allowed to make this vma writable with
+ /// `mprotect()`.
+ ///
+ /// Note that this operation is irreversible. Once `VM_MAYWRITE` has been cleared, it can never
+ /// be set again.
+ #[inline]
+ pub fn try_clear_maywrite(&self) -> Result {
+ if self.writable() {
+ return Err(EINVAL);
+ }
+ // SAFETY: Clearing `VM_MAYWRITE` is okay when `VM_WRITE` is not set.
+ unsafe { self.update_flags(0, flags::MAYWRITE) };
+ Ok(())
+ }
+
+ /// Returns whether `VM_EXEC` is set.
+ ///
+ /// This flag indicates whether userspace is mapping this vma as executable.
+ #[inline]
+ pub fn executable(&self) -> bool {
+ (self.flags() & flags::EXEC) != 0
+ }
+
+ /// Try to clear the `VM_MAYEXEC` flag, failing if `VM_EXEC` is set.
+ ///
+ /// This flag indicates whether userspace is allowed to make this vma executable with
+ /// `mprotect()`.
+ ///
+ /// Note that this operation is irreversible. Once `VM_MAYEXEC` has been cleared, it can never
+ /// be set again.
+ #[inline]
+ pub fn try_clear_mayexec(&self) -> Result {
+ if self.executable() {
+ return Err(EINVAL);
+ }
+ // SAFETY: Clearing `VM_MAYEXEC` is okay when `VM_EXEC` is not set.
+ unsafe { self.update_flags(0, flags::MAYEXEC) };
+ Ok(())
+ }
+}
+
+/// The integer type used for vma flags.
+#[doc(inline)]
+pub use bindings::vm_flags_t;
+
+/// All possible flags for [`VmaRef`].
+pub mod flags {
+ use super::vm_flags_t;
+ use crate::bindings;
+
+ /// No flags are set.
+ pub const NONE: vm_flags_t = bindings::VM_NONE as _;
+
+ /// Mapping allows reads.
+ pub const READ: vm_flags_t = bindings::VM_READ as _;
+
+ /// Mapping allows writes.
+ pub const WRITE: vm_flags_t = bindings::VM_WRITE as _;
+
+ /// Mapping allows execution.
+ pub const EXEC: vm_flags_t = bindings::VM_EXEC as _;
+
+ /// Mapping is shared.
+ pub const SHARED: vm_flags_t = bindings::VM_SHARED as _;
+
+ /// Mapping may be updated to allow reads.
+ pub const MAYREAD: vm_flags_t = bindings::VM_MAYREAD as _;
+
+ /// Mapping may be updated to allow writes.
+ pub const MAYWRITE: vm_flags_t = bindings::VM_MAYWRITE as _;
+
+ /// Mapping may be updated to allow execution.
+ pub const MAYEXEC: vm_flags_t = bindings::VM_MAYEXEC as _;
+
+ /// Mapping may be updated to be shared.
+ pub const MAYSHARE: vm_flags_t = bindings::VM_MAYSHARE as _;
+
+ /// Page-ranges managed without `struct page`, just pure PFN.
+ pub const PFNMAP: vm_flags_t = bindings::VM_PFNMAP as _;
+
+ /// Memory mapped I/O or similar.
+ pub const IO: vm_flags_t = bindings::VM_IO as _;
+
+ /// Do not copy this vma on fork.
+ pub const DONTCOPY: vm_flags_t = bindings::VM_DONTCOPY as _;
+
+ /// Cannot expand with mremap().
+ pub const DONTEXPAND: vm_flags_t = bindings::VM_DONTEXPAND as _;
+
+ /// Lock the pages covered when they are faulted in.
+ pub const LOCKONFAULT: vm_flags_t = bindings::VM_LOCKONFAULT as _;
+
+ /// Is a VM accounted object.
+ pub const ACCOUNT: vm_flags_t = bindings::VM_ACCOUNT as _;
+
+ /// Should the VM suppress accounting.
+ pub const NORESERVE: vm_flags_t = bindings::VM_NORESERVE as _;
+
+ /// Huge TLB Page VM.
+ pub const HUGETLB: vm_flags_t = bindings::VM_HUGETLB as _;
+
+ /// Synchronous page faults. (DAX-specific)
+ pub const SYNC: vm_flags_t = bindings::VM_SYNC as _;
+
+ /// Architecture-specific flag.
+ pub const ARCH_1: vm_flags_t = bindings::VM_ARCH_1 as _;
+
+ /// Wipe VMA contents in child on fork.
+ pub const WIPEONFORK: vm_flags_t = bindings::VM_WIPEONFORK as _;
+
+ /// Do not include in the core dump.
+ pub const DONTDUMP: vm_flags_t = bindings::VM_DONTDUMP as _;
+
+ /// Not soft dirty clean area.
+ pub const SOFTDIRTY: vm_flags_t = bindings::VM_SOFTDIRTY as _;
+
+ /// Can contain `struct page` and pure PFN pages.
+ pub const MIXEDMAP: vm_flags_t = bindings::VM_MIXEDMAP as _;
+
+ /// MADV_HUGEPAGE marked this vma.
+ pub const HUGEPAGE: vm_flags_t = bindings::VM_HUGEPAGE as _;
+
+ /// MADV_NOHUGEPAGE marked this vma.
+ pub const NOHUGEPAGE: vm_flags_t = bindings::VM_NOHUGEPAGE as _;
+
+ /// KSM may merge identical pages.
+ pub const MERGEABLE: vm_flags_t = bindings::VM_MERGEABLE as _;
+}
diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs
index 265d0e1c1371..32ea43ece646 100644
--- a/rust/kernel/net/phy.rs
+++ b/rust/kernel/net/phy.rs
@@ -6,9 +6,10 @@
//!
//! C headers: [`include/linux/phy.h`](srctree/include/linux/phy.h).
-use crate::{bindings, error::*, prelude::*, str::CStr, types::Opaque};
+use crate::{error::*, prelude::*, types::Opaque};
+use core::{marker::PhantomData, ptr::addr_of_mut};
-use core::marker::PhantomData;
+pub mod reg;
/// PHY state machine states.
///
@@ -58,8 +59,9 @@ pub enum DuplexMode {
///
/// # Invariants
///
-/// Referencing a `phy_device` using this struct asserts that you are in
-/// a context where all methods defined on this struct are safe to call.
+/// - Referencing a `phy_device` using this struct asserts that you are in
+/// a context where all methods defined on this struct are safe to call.
+/// - This struct always has a valid `self.0.mdio.dev`.
///
/// [`struct phy_device`]: srctree/include/linux/phy.h
// During the calls to most functions in [`Driver`], the C side (`PHYLIB`) holds a lock that is
@@ -76,9 +78,11 @@ impl Device {
///
/// # Safety
///
- /// For the duration of 'a, the pointer must point at a valid `phy_device`,
- /// and the caller must be in a context where all methods defined on this struct
- /// are safe to call.
+ /// For the duration of `'a`,
+ /// - the pointer must point at a valid `phy_device`, and the caller
+ /// must be in a context where all methods defined on this struct
+ /// are safe to call.
+ /// - `(*ptr).mdio.dev` must be a valid.
unsafe fn from_raw<'a>(ptr: *mut bindings::phy_device) -> &'a mut Self {
// CAST: `Self` is a `repr(transparent)` wrapper around `bindings::phy_device`.
let ptr = ptr.cast::<Self>();
@@ -175,32 +179,15 @@ impl Device {
unsafe { (*phydev).duplex = v };
}
- /// Reads a given C22 PHY register.
+ /// Reads a PHY register.
// This function reads a hardware register and updates the stats so takes `&mut self`.
- pub fn read(&mut self, regnum: u16) -> Result<u16> {
- let phydev = self.0.get();
- // SAFETY: `phydev` is pointing to a valid object by the type invariant of `Self`.
- // So it's just an FFI call, open code of `phy_read()` with a valid `phy_device` pointer
- // `phydev`.
- let ret = unsafe {
- bindings::mdiobus_read((*phydev).mdio.bus, (*phydev).mdio.addr, regnum.into())
- };
- if ret < 0 {
- Err(Error::from_errno(ret))
- } else {
- Ok(ret as u16)
- }
+ pub fn read<R: reg::Register>(&mut self, reg: R) -> Result<u16> {
+ reg.read(self)
}
- /// Writes a given C22 PHY register.
- pub fn write(&mut self, regnum: u16, val: u16) -> Result {
- let phydev = self.0.get();
- // SAFETY: `phydev` is pointing to a valid object by the type invariant of `Self`.
- // So it's just an FFI call, open code of `phy_write()` with a valid `phy_device` pointer
- // `phydev`.
- to_result(unsafe {
- bindings::mdiobus_write((*phydev).mdio.bus, (*phydev).mdio.addr, regnum.into(), val)
- })
+ /// Writes a PHY register.
+ pub fn write<R: reg::Register>(&mut self, reg: R, val: u16) -> Result {
+ reg.write(self, val)
}
/// Reads a paged register.
@@ -265,16 +252,8 @@ impl Device {
}
/// Checks the link status and updates current link state.
- pub fn genphy_read_status(&mut self) -> Result<u16> {
- let phydev = self.0.get();
- // SAFETY: `phydev` is pointing to a valid object by the type invariant of `Self`.
- // So it's just an FFI call.
- let ret = unsafe { bindings::genphy_read_status(phydev) };
- if ret < 0 {
- Err(Error::from_errno(ret))
- } else {
- Ok(ret as u16)
- }
+ pub fn genphy_read_status<R: reg::Register>(&mut self) -> Result<u16> {
+ R::read_status(self)
}
/// Updates the link status.
@@ -302,6 +281,14 @@ impl Device {
}
}
+impl AsRef<kernel::device::Device> for Device {
+ fn as_ref(&self) -> &kernel::device::Device {
+ let phydev = self.0.get();
+ // SAFETY: The struct invariant ensures that `mdio.dev` is valid.
+ unsafe { kernel::device::Device::as_ref(addr_of_mut!((*phydev).mdio.dev)) }
+ }
+}
+
/// Defines certain other features this PHY supports (like interrupts).
///
/// These flag values are used in [`Driver::FLAGS`].
@@ -327,7 +314,7 @@ impl<T: Driver> Adapter<T> {
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
unsafe extern "C" fn soft_reset_callback(
phydev: *mut bindings::phy_device,
- ) -> core::ffi::c_int {
+ ) -> crate::ffi::c_int {
from_result(|| {
// SAFETY: This callback is called only in contexts
// where we hold `phy_device->lock`, so the accessors on
@@ -341,9 +328,24 @@ impl<T: Driver> Adapter<T> {
/// # Safety
///
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
+ unsafe extern "C" fn probe_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int {
+ from_result(|| {
+ // SAFETY: This callback is called only in contexts
+ // where we can exclusively access `phy_device` because
+ // it's not published yet, so the accessors on `Device` are okay
+ // to call.
+ let dev = unsafe { Device::from_raw(phydev) };
+ T::probe(dev)?;
+ Ok(0)
+ })
+ }
+
+ /// # Safety
+ ///
+ /// `phydev` must be passed by the corresponding callback in `phy_driver`.
unsafe extern "C" fn get_features_callback(
phydev: *mut bindings::phy_device,
- ) -> core::ffi::c_int {
+ ) -> crate::ffi::c_int {
from_result(|| {
// SAFETY: This callback is called only in contexts
// where we hold `phy_device->lock`, so the accessors on
@@ -357,7 +359,7 @@ impl<T: Driver> Adapter<T> {
/// # Safety
///
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
- unsafe extern "C" fn suspend_callback(phydev: *mut bindings::phy_device) -> core::ffi::c_int {
+ unsafe extern "C" fn suspend_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int {
from_result(|| {
// SAFETY: The C core code ensures that the accessors on
// `Device` are okay to call even though `phy_device->lock`
@@ -371,7 +373,7 @@ impl<T: Driver> Adapter<T> {
/// # Safety
///
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
- unsafe extern "C" fn resume_callback(phydev: *mut bindings::phy_device) -> core::ffi::c_int {
+ unsafe extern "C" fn resume_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int {
from_result(|| {
// SAFETY: The C core code ensures that the accessors on
// `Device` are okay to call even though `phy_device->lock`
@@ -387,7 +389,7 @@ impl<T: Driver> Adapter<T> {
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
unsafe extern "C" fn config_aneg_callback(
phydev: *mut bindings::phy_device,
- ) -> core::ffi::c_int {
+ ) -> crate::ffi::c_int {
from_result(|| {
// SAFETY: This callback is called only in contexts
// where we hold `phy_device->lock`, so the accessors on
@@ -403,7 +405,7 @@ impl<T: Driver> Adapter<T> {
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
unsafe extern "C" fn read_status_callback(
phydev: *mut bindings::phy_device,
- ) -> core::ffi::c_int {
+ ) -> crate::ffi::c_int {
from_result(|| {
// SAFETY: This callback is called only in contexts
// where we hold `phy_device->lock`, so the accessors on
@@ -419,7 +421,8 @@ impl<T: Driver> Adapter<T> {
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
unsafe extern "C" fn match_phy_device_callback(
phydev: *mut bindings::phy_device,
- ) -> core::ffi::c_int {
+ _phydrv: *const bindings::phy_driver,
+ ) -> crate::ffi::c_int {
// SAFETY: This callback is called only in contexts
// where we hold `phy_device->lock`, so the accessors on
// `Device` are okay to call.
@@ -491,7 +494,7 @@ impl<T: Driver> Adapter<T> {
pub struct DriverVTable(Opaque<bindings::phy_driver>);
// SAFETY: `DriverVTable` doesn't expose any &self method to access internal data, so it's safe to
-// share `&DriverVTable` across execution context boundries.
+// share `&DriverVTable` across execution context boundaries.
unsafe impl Sync for DriverVTable {}
/// Creates a [`DriverVTable`] instance from [`Driver`].
@@ -511,6 +514,11 @@ pub const fn create_phy_driver<T: Driver>() -> DriverVTable {
} else {
None
},
+ probe: if T::HAS_PROBE {
+ Some(Adapter::<T>::probe_callback)
+ } else {
+ None
+ },
get_features: if T::HAS_GET_FEATURES {
Some(Adapter::<T>::get_features_callback)
} else {
@@ -580,12 +588,17 @@ pub trait Driver {
/// Issues a PHY software reset.
fn soft_reset(_dev: &mut Device) -> Result {
- kernel::build_error(VTABLE_DEFAULT_ERROR)
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Sets up device-specific structures during discovery.
+ fn probe(_dev: &mut Device) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
}
/// Probes the hardware to determine what abilities it has.
fn get_features(_dev: &mut Device) -> Result {
- kernel::build_error(VTABLE_DEFAULT_ERROR)
+ build_error!(VTABLE_DEFAULT_ERROR)
}
/// Returns true if this is a suitable driver for the given phydev.
@@ -597,32 +610,32 @@ pub trait Driver {
/// Configures the advertisement and resets auto-negotiation
/// if auto-negotiation is enabled.
fn config_aneg(_dev: &mut Device) -> Result {
- kernel::build_error(VTABLE_DEFAULT_ERROR)
+ build_error!(VTABLE_DEFAULT_ERROR)
}
/// Determines the negotiated speed and duplex.
fn read_status(_dev: &mut Device) -> Result<u16> {
- kernel::build_error(VTABLE_DEFAULT_ERROR)
+ build_error!(VTABLE_DEFAULT_ERROR)
}
/// Suspends the hardware, saving state if needed.
fn suspend(_dev: &mut Device) -> Result {
- kernel::build_error(VTABLE_DEFAULT_ERROR)
+ build_error!(VTABLE_DEFAULT_ERROR)
}
/// Resumes the hardware, restoring state if needed.
fn resume(_dev: &mut Device) -> Result {
- kernel::build_error(VTABLE_DEFAULT_ERROR)
+ build_error!(VTABLE_DEFAULT_ERROR)
}
/// Overrides the default MMD read function for reading a MMD register.
fn read_mmd(_dev: &mut Device, _devnum: u8, _regnum: u16) -> Result<u16> {
- kernel::build_error(VTABLE_DEFAULT_ERROR)
+ build_error!(VTABLE_DEFAULT_ERROR)
}
/// Overrides the default MMD write function for writing a MMD register.
fn write_mmd(_dev: &mut Device, _devnum: u8, _regnum: u16, _val: u16) -> Result {
- kernel::build_error(VTABLE_DEFAULT_ERROR)
+ build_error!(VTABLE_DEFAULT_ERROR)
}
/// Callback for notification of link change.
@@ -778,7 +791,7 @@ impl DeviceMask {
/// DeviceId::new_with_driver::<PhySample>()
/// ],
/// name: "rust_sample_phy",
-/// author: "Rust for Linux Contributors",
+/// authors: ["Rust for Linux Contributors"],
/// description: "Rust sample PHYs driver",
/// license: "GPL",
/// }
@@ -807,7 +820,7 @@ impl DeviceMask {
/// module! {
/// type: Module,
/// name: "rust_sample_phy",
-/// author: "Rust for Linux Contributors",
+/// authors: ["Rust for Linux Contributors"],
/// description: "Rust sample PHYs driver",
/// license: "GPL",
/// }
@@ -825,7 +838,7 @@ impl DeviceMask {
/// [::kernel::net::phy::create_phy_driver::<PhySample>()];
///
/// impl ::kernel::Module for Module {
-/// fn init(module: &'static ThisModule) -> Result<Self> {
+/// fn init(module: &'static ::kernel::ThisModule) -> Result<Self> {
/// let drivers = unsafe { &mut DRIVERS };
/// let mut reg = ::kernel::net::phy::Registration::register(
/// module,
@@ -836,9 +849,7 @@ impl DeviceMask {
/// }
/// };
///
-/// #[cfg(MODULE)]
-/// #[no_mangle]
-/// static __mod_mdio__phydev_device_table: [::kernel::bindings::mdio_device_id; 2] = [
+/// const _DEVICE_TABLE: [::kernel::bindings::mdio_device_id; 2] = [
/// ::kernel::bindings::mdio_device_id {
/// phy_id: 0x00000001,
/// phy_id_mask: 0xffffffff,
@@ -848,6 +859,9 @@ impl DeviceMask {
/// phy_id_mask: 0,
/// },
/// ];
+/// #[cfg(MODULE)]
+/// #[no_mangle]
+/// static __mod_device_table__mdio__phydev: [::kernel::bindings::mdio_device_id; 2] = _DEVICE_TABLE;
/// ```
#[macro_export]
macro_rules! module_phy_driver {
@@ -859,9 +873,7 @@ macro_rules! module_phy_driver {
(@device_table [$($dev:expr),+]) => {
// SAFETY: C will not read off the end of this constant since the last element is zero.
- #[cfg(MODULE)]
- #[no_mangle]
- static __mod_mdio__phydev_device_table: [$crate::bindings::mdio_device_id;
+ const _DEVICE_TABLE: [$crate::bindings::mdio_device_id;
$crate::module_phy_driver!(@count_devices $($dev),+) + 1] = [
$($dev.mdio_device_id()),+,
$crate::bindings::mdio_device_id {
@@ -869,6 +881,11 @@ macro_rules! module_phy_driver {
phy_id_mask: 0
}
];
+
+ #[cfg(MODULE)]
+ #[no_mangle]
+ static __mod_device_table__mdio__phydev: [$crate::bindings::mdio_device_id;
+ $crate::module_phy_driver!(@count_devices $($dev),+) + 1] = _DEVICE_TABLE;
};
(drivers: [$($driver:ident),+ $(,)?], device_table: [$($dev:expr),+ $(,)?], $($f:tt)*) => {
@@ -887,7 +904,7 @@ macro_rules! module_phy_driver {
[$($crate::net::phy::create_phy_driver::<$driver>()),+];
impl $crate::Module for Module {
- fn init(module: &'static ThisModule) -> Result<Self> {
+ fn init(module: &'static $crate::ThisModule) -> Result<Self> {
// SAFETY: The anonymous constant guarantees that nobody else can access
// the `DRIVERS` static. The array is used only in the C side.
let drivers = unsafe { &mut DRIVERS };
diff --git a/rust/kernel/net/phy/reg.rs b/rust/kernel/net/phy/reg.rs
new file mode 100644
index 000000000000..a7db0064cb7d
--- /dev/null
+++ b/rust/kernel/net/phy/reg.rs
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 FUJITA Tomonori <fujita.tomonori@gmail.com>
+
+//! PHY register interfaces.
+//!
+//! This module provides support for accessing PHY registers in the
+//! Ethernet management interface clauses 22 and 45 register namespaces, as
+//! defined in IEEE 802.3.
+
+use super::Device;
+use crate::build_assert;
+use crate::error::*;
+use crate::uapi;
+
+mod private {
+ /// Marker that a trait cannot be implemented outside of this crate
+ pub trait Sealed {}
+}
+
+/// Accesses PHY registers.
+///
+/// This trait is used to implement the unified interface to access
+/// C22 and C45 PHY registers.
+///
+/// # Examples
+///
+/// ```ignore
+/// fn link_change_notify(dev: &mut Device) {
+/// // read C22 BMCR register
+/// dev.read(C22::BMCR);
+/// // read C45 PMA/PMD control 1 register
+/// dev.read(C45::new(Mmd::PMAPMD, 0));
+///
+/// // Checks the link status as reported by registers in the C22 namespace
+/// // and updates current link state.
+/// dev.genphy_read_status::<phy::C22>();
+/// // Checks the link status as reported by registers in the C45 namespace
+/// // and updates current link state.
+/// dev.genphy_read_status::<phy::C45>();
+/// }
+/// ```
+pub trait Register: private::Sealed {
+ /// Reads a PHY register.
+ fn read(&self, dev: &mut Device) -> Result<u16>;
+
+ /// Writes a PHY register.
+ fn write(&self, dev: &mut Device, val: u16) -> Result;
+
+ /// Checks the link status and updates current link state.
+ fn read_status(dev: &mut Device) -> Result<u16>;
+}
+
+/// A single MDIO clause 22 register address (5 bits).
+#[derive(Copy, Clone, Debug)]
+pub struct C22(u8);
+
+impl C22 {
+ /// Basic mode control.
+ pub const BMCR: Self = C22(0x00);
+ /// Basic mode status.
+ pub const BMSR: Self = C22(0x01);
+ /// PHY identifier 1.
+ pub const PHYSID1: Self = C22(0x02);
+ /// PHY identifier 2.
+ pub const PHYSID2: Self = C22(0x03);
+ /// Auto-negotiation advertisement.
+ pub const ADVERTISE: Self = C22(0x04);
+ /// Auto-negotiation link partner base page ability.
+ pub const LPA: Self = C22(0x05);
+ /// Auto-negotiation expansion.
+ pub const EXPANSION: Self = C22(0x06);
+ /// Auto-negotiation next page transmit.
+ pub const NEXT_PAGE_TRANSMIT: Self = C22(0x07);
+ /// Auto-negotiation link partner received next page.
+ pub const LP_RECEIVED_NEXT_PAGE: Self = C22(0x08);
+ /// Master-slave control.
+ pub const MASTER_SLAVE_CONTROL: Self = C22(0x09);
+ /// Master-slave status.
+ pub const MASTER_SLAVE_STATUS: Self = C22(0x0a);
+ /// PSE Control.
+ pub const PSE_CONTROL: Self = C22(0x0b);
+ /// PSE Status.
+ pub const PSE_STATUS: Self = C22(0x0c);
+ /// MMD Register control.
+ pub const MMD_CONTROL: Self = C22(0x0d);
+ /// MMD Register address data.
+ pub const MMD_DATA: Self = C22(0x0e);
+ /// Extended status.
+ pub const EXTENDED_STATUS: Self = C22(0x0f);
+
+ /// Creates a new instance of `C22` with a vendor specific register.
+ pub const fn vendor_specific<const N: u8>() -> Self {
+ build_assert!(
+ N > 0x0f && N < 0x20,
+ "Vendor-specific register address must be between 16 and 31"
+ );
+ C22(N)
+ }
+}
+
+impl private::Sealed for C22 {}
+
+impl Register for C22 {
+ fn read(&self, dev: &mut Device) -> Result<u16> {
+ let phydev = dev.0.get();
+ // SAFETY: `phydev` is pointing to a valid object by the type invariant of `Device`.
+ // So it's just an FFI call, open code of `phy_read()` with a valid `phy_device` pointer
+ // `phydev`.
+ let ret = unsafe {
+ bindings::mdiobus_read((*phydev).mdio.bus, (*phydev).mdio.addr, self.0.into())
+ };
+ to_result(ret)?;
+ Ok(ret as u16)
+ }
+
+ fn write(&self, dev: &mut Device, val: u16) -> Result {
+ let phydev = dev.0.get();
+ // SAFETY: `phydev` is pointing to a valid object by the type invariant of `Device`.
+ // So it's just an FFI call, open code of `phy_write()` with a valid `phy_device` pointer
+ // `phydev`.
+ to_result(unsafe {
+ bindings::mdiobus_write((*phydev).mdio.bus, (*phydev).mdio.addr, self.0.into(), val)
+ })
+ }
+
+ fn read_status(dev: &mut Device) -> Result<u16> {
+ let phydev = dev.0.get();
+ // SAFETY: `phydev` is pointing to a valid object by the type invariant of `Self`.
+ // So it's just an FFI call.
+ let ret = unsafe { bindings::genphy_read_status(phydev) };
+ to_result(ret)?;
+ Ok(ret as u16)
+ }
+}
+
+/// A single MDIO clause 45 register device and address.
+#[derive(Copy, Clone, Debug)]
+pub struct Mmd(u8);
+
+impl Mmd {
+ /// Physical Medium Attachment/Dependent.
+ pub const PMAPMD: Self = Mmd(uapi::MDIO_MMD_PMAPMD as u8);
+ /// WAN interface sublayer.
+ pub const WIS: Self = Mmd(uapi::MDIO_MMD_WIS as u8);
+ /// Physical coding sublayer.
+ pub const PCS: Self = Mmd(uapi::MDIO_MMD_PCS as u8);
+ /// PHY Extender sublayer.
+ pub const PHYXS: Self = Mmd(uapi::MDIO_MMD_PHYXS as u8);
+ /// DTE Extender sublayer.
+ pub const DTEXS: Self = Mmd(uapi::MDIO_MMD_DTEXS as u8);
+ /// Transmission convergence.
+ pub const TC: Self = Mmd(uapi::MDIO_MMD_TC as u8);
+ /// Auto negotiation.
+ pub const AN: Self = Mmd(uapi::MDIO_MMD_AN as u8);
+ /// Separated PMA (1).
+ pub const SEPARATED_PMA1: Self = Mmd(8);
+ /// Separated PMA (2).
+ pub const SEPARATED_PMA2: Self = Mmd(9);
+ /// Separated PMA (3).
+ pub const SEPARATED_PMA3: Self = Mmd(10);
+ /// Separated PMA (4).
+ pub const SEPARATED_PMA4: Self = Mmd(11);
+ /// OFDM PMA/PMD.
+ pub const OFDM_PMAPMD: Self = Mmd(12);
+ /// Power unit.
+ pub const POWER_UNIT: Self = Mmd(13);
+ /// Clause 22 extension.
+ pub const C22_EXT: Self = Mmd(uapi::MDIO_MMD_C22EXT as u8);
+ /// Vendor specific 1.
+ pub const VEND1: Self = Mmd(uapi::MDIO_MMD_VEND1 as u8);
+ /// Vendor specific 2.
+ pub const VEND2: Self = Mmd(uapi::MDIO_MMD_VEND2 as u8);
+}
+
+/// A single MDIO clause 45 register device and address.
+///
+/// Clause 45 uses a 5-bit device address to access a specific MMD within
+/// a port, then a 16-bit register address to access a location within
+/// that device. `C45` represents this by storing a [`Mmd`] and
+/// a register number.
+pub struct C45 {
+ devad: Mmd,
+ regnum: u16,
+}
+
+impl C45 {
+ /// Creates a new instance of `C45`.
+ pub fn new(devad: Mmd, regnum: u16) -> Self {
+ Self { devad, regnum }
+ }
+}
+
+impl private::Sealed for C45 {}
+
+impl Register for C45 {
+ fn read(&self, dev: &mut Device) -> Result<u16> {
+ let phydev = dev.0.get();
+ // SAFETY: `phydev` is pointing to a valid object by the type invariant of `Device`.
+ // So it's just an FFI call.
+ let ret =
+ unsafe { bindings::phy_read_mmd(phydev, self.devad.0.into(), self.regnum.into()) };
+ to_result(ret)?;
+ Ok(ret as u16)
+ }
+
+ fn write(&self, dev: &mut Device, val: u16) -> Result {
+ let phydev = dev.0.get();
+ // SAFETY: `phydev` is pointing to a valid object by the type invariant of `Device`.
+ // So it's just an FFI call.
+ to_result(unsafe {
+ bindings::phy_write_mmd(phydev, self.devad.0.into(), self.regnum.into(), val)
+ })
+ }
+
+ fn read_status(dev: &mut Device) -> Result<u16> {
+ let phydev = dev.0.get();
+ // SAFETY: `phydev` is pointing to a valid object by the type invariant of `Self`.
+ // So it's just an FFI call.
+ let ret = unsafe { bindings::genphy_c45_read_status(phydev) };
+ to_result(ret)?;
+ Ok(ret as u16)
+ }
+}
diff --git a/rust/kernel/of.rs b/rust/kernel/of.rs
new file mode 100644
index 000000000000..04f2d8ef29cb
--- /dev/null
+++ b/rust/kernel/of.rs
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Device Tree / Open Firmware abstractions.
+
+use crate::{bindings, device_id::RawDeviceId, prelude::*};
+
+/// IdTable type for OF drivers.
+pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>;
+
+/// An open firmware device id.
+#[repr(transparent)]
+#[derive(Clone, Copy)]
+pub struct DeviceId(bindings::of_device_id);
+
+// SAFETY:
+// * `DeviceId` is a `#[repr(transparent)` wrapper of `struct of_device_id` and does not add
+// additional invariants, so it's safe to transmute to `RawType`.
+// * `DRIVER_DATA_OFFSET` is the offset to the `data` field.
+unsafe impl RawDeviceId for DeviceId {
+ type RawType = bindings::of_device_id;
+
+ const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::of_device_id, data);
+
+ fn index(&self) -> usize {
+ self.0.data as _
+ }
+}
+
+impl DeviceId {
+ /// Create a new device id from an OF 'compatible' string.
+ pub const fn new(compatible: &'static CStr) -> Self {
+ let src = compatible.as_bytes_with_nul();
+ // Replace with `bindings::of_device_id::default()` once stabilized for `const`.
+ // SAFETY: FFI type is valid to be zero-initialized.
+ let mut of: bindings::of_device_id = unsafe { core::mem::zeroed() };
+
+ // TODO: Use `clone_from_slice` once the corresponding types do match.
+ let mut i = 0;
+ while i < src.len() {
+ of.compatible[i] = src[i] as _;
+ i += 1;
+ }
+
+ Self(of)
+ }
+}
+
+/// Create an OF `IdTable` with an "alias" for modpost.
+#[macro_export]
+macro_rules! of_device_table {
+ ($table_name:ident, $module_table_name:ident, $id_info_type: ty, $table_data: expr) => {
+ const $table_name: $crate::device_id::IdArray<
+ $crate::of::DeviceId,
+ $id_info_type,
+ { $table_data.len() },
+ > = $crate::device_id::IdArray::new($table_data);
+
+ $crate::module_device_table!("of", $module_table_name, $table_name);
+ };
+}
diff --git a/rust/kernel/opp.rs b/rust/kernel/opp.rs
new file mode 100644
index 000000000000..a566fc3e7dcb
--- /dev/null
+++ b/rust/kernel/opp.rs
@@ -0,0 +1,1146 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Operating performance points.
+//!
+//! This module provides rust abstractions for interacting with the OPP subsystem.
+//!
+//! C header: [`include/linux/pm_opp.h`](srctree/include/linux/pm_opp.h)
+//!
+//! Reference: <https://docs.kernel.org/power/opp.html>
+
+use crate::{
+ clk::Hertz,
+ cpumask::{Cpumask, CpumaskVar},
+ device::Device,
+ error::{code::*, from_err_ptr, from_result, to_result, Error, Result, VTABLE_DEFAULT_ERROR},
+ ffi::c_ulong,
+ prelude::*,
+ str::CString,
+ types::{ARef, AlwaysRefCounted, Opaque},
+};
+
+#[cfg(CONFIG_CPU_FREQ)]
+/// Frequency table implementation.
+mod freq {
+ use super::*;
+ use crate::cpufreq;
+ use core::ops::Deref;
+
+ /// OPP frequency table.
+ ///
+ /// A [`cpufreq::Table`] created from [`Table`].
+ pub struct FreqTable {
+ dev: ARef<Device>,
+ ptr: *mut bindings::cpufreq_frequency_table,
+ }
+
+ impl FreqTable {
+ /// Creates a new instance of [`FreqTable`] from [`Table`].
+ pub(crate) fn new(table: &Table) -> Result<Self> {
+ let mut ptr: *mut bindings::cpufreq_frequency_table = ptr::null_mut();
+
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ to_result(unsafe {
+ bindings::dev_pm_opp_init_cpufreq_table(table.dev.as_raw(), &mut ptr)
+ })?;
+
+ Ok(Self {
+ dev: table.dev.clone(),
+ ptr,
+ })
+ }
+
+ /// Returns a reference to the underlying [`cpufreq::Table`].
+ #[inline]
+ fn table(&self) -> &cpufreq::Table {
+ // SAFETY: The `ptr` is guaranteed by the C code to be valid.
+ unsafe { cpufreq::Table::from_raw(self.ptr) }
+ }
+ }
+
+ impl Deref for FreqTable {
+ type Target = cpufreq::Table;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.table()
+ }
+ }
+
+ impl Drop for FreqTable {
+ fn drop(&mut self) {
+ // SAFETY: The pointer was created via `dev_pm_opp_init_cpufreq_table`, and is only
+ // freed here.
+ unsafe {
+ bindings::dev_pm_opp_free_cpufreq_table(self.dev.as_raw(), &mut self.as_raw())
+ };
+ }
+ }
+}
+
+#[cfg(CONFIG_CPU_FREQ)]
+pub use freq::FreqTable;
+
+use core::{marker::PhantomData, ptr};
+
+use macros::vtable;
+
+/// Creates a null-terminated slice of pointers to [`Cstring`]s.
+fn to_c_str_array(names: &[CString]) -> Result<KVec<*const u8>> {
+ // Allocated a null-terminated vector of pointers.
+ let mut list = KVec::with_capacity(names.len() + 1, GFP_KERNEL)?;
+
+ for name in names.iter() {
+ list.push(name.as_ptr() as _, GFP_KERNEL)?;
+ }
+
+ list.push(ptr::null(), GFP_KERNEL)?;
+ Ok(list)
+}
+
+/// The voltage unit.
+///
+/// Represents voltage in microvolts, wrapping a [`c_ulong`] value.
+///
+/// ## Examples
+///
+/// ```
+/// use kernel::opp::MicroVolt;
+///
+/// let raw = 90500;
+/// let volt = MicroVolt(raw);
+///
+/// assert_eq!(usize::from(volt), raw);
+/// assert_eq!(volt, MicroVolt(raw));
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct MicroVolt(pub c_ulong);
+
+impl From<MicroVolt> for c_ulong {
+ #[inline]
+ fn from(volt: MicroVolt) -> Self {
+ volt.0
+ }
+}
+
+/// The power unit.
+///
+/// Represents power in microwatts, wrapping a [`c_ulong`] value.
+///
+/// ## Examples
+///
+/// ```
+/// use kernel::opp::MicroWatt;
+///
+/// let raw = 1000000;
+/// let power = MicroWatt(raw);
+///
+/// assert_eq!(usize::from(power), raw);
+/// assert_eq!(power, MicroWatt(raw));
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct MicroWatt(pub c_ulong);
+
+impl From<MicroWatt> for c_ulong {
+ #[inline]
+ fn from(power: MicroWatt) -> Self {
+ power.0
+ }
+}
+
+/// Handle for a dynamically created [`OPP`].
+///
+/// The associated [`OPP`] is automatically removed when the [`Token`] is dropped.
+///
+/// ## Examples
+///
+/// The following example demonstrates how to create an [`OPP`] dynamically.
+///
+/// ```
+/// use kernel::clk::Hertz;
+/// use kernel::device::Device;
+/// use kernel::error::Result;
+/// use kernel::opp::{Data, MicroVolt, Token};
+/// use kernel::types::ARef;
+///
+/// fn create_opp(dev: &ARef<Device>, freq: Hertz, volt: MicroVolt, level: u32) -> Result<Token> {
+/// let data = Data::new(freq, volt, level, false);
+///
+/// // OPP is removed once token goes out of scope.
+/// data.add_opp(dev)
+/// }
+/// ```
+pub struct Token {
+ dev: ARef<Device>,
+ freq: Hertz,
+}
+
+impl Token {
+ /// Dynamically adds an [`OPP`] and returns a [`Token`] that removes it on drop.
+ fn new(dev: &ARef<Device>, mut data: Data) -> Result<Self> {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ to_result(unsafe { bindings::dev_pm_opp_add_dynamic(dev.as_raw(), &mut data.0) })?;
+ Ok(Self {
+ dev: dev.clone(),
+ freq: data.freq(),
+ })
+ }
+}
+
+impl Drop for Token {
+ fn drop(&mut self) {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ unsafe { bindings::dev_pm_opp_remove(self.dev.as_raw(), self.freq.into()) };
+ }
+}
+
+/// OPP data.
+///
+/// Rust abstraction for the C `struct dev_pm_opp_data`, used to define operating performance
+/// points (OPPs) dynamically.
+///
+/// ## Examples
+///
+/// The following example demonstrates how to create an [`OPP`] with [`Data`].
+///
+/// ```
+/// use kernel::clk::Hertz;
+/// use kernel::device::Device;
+/// use kernel::error::Result;
+/// use kernel::opp::{Data, MicroVolt, Token};
+/// use kernel::types::ARef;
+///
+/// fn create_opp(dev: &ARef<Device>, freq: Hertz, volt: MicroVolt, level: u32) -> Result<Token> {
+/// let data = Data::new(freq, volt, level, false);
+///
+/// // OPP is removed once token goes out of scope.
+/// data.add_opp(dev)
+/// }
+/// ```
+#[repr(transparent)]
+pub struct Data(bindings::dev_pm_opp_data);
+
+impl Data {
+ /// Creates a new instance of [`Data`].
+ ///
+ /// This can be used to define a dynamic OPP to be added to a device.
+ pub fn new(freq: Hertz, volt: MicroVolt, level: u32, turbo: bool) -> Self {
+ Self(bindings::dev_pm_opp_data {
+ turbo,
+ freq: freq.into(),
+ u_volt: volt.into(),
+ level,
+ })
+ }
+
+ /// Adds an [`OPP`] dynamically.
+ ///
+ /// Returns a [`Token`] that ensures the OPP is automatically removed
+ /// when it goes out of scope.
+ #[inline]
+ pub fn add_opp(self, dev: &ARef<Device>) -> Result<Token> {
+ Token::new(dev, self)
+ }
+
+ /// Returns the frequency associated with this OPP data.
+ #[inline]
+ fn freq(&self) -> Hertz {
+ Hertz(self.0.freq)
+ }
+}
+
+/// [`OPP`] search options.
+///
+/// ## Examples
+///
+/// Defines how to search for an [`OPP`] in a [`Table`] relative to a frequency.
+///
+/// ```
+/// use kernel::clk::Hertz;
+/// use kernel::error::Result;
+/// use kernel::opp::{OPP, SearchType, Table};
+/// use kernel::types::ARef;
+///
+/// fn find_opp(table: &Table, freq: Hertz) -> Result<ARef<OPP>> {
+/// let opp = table.opp_from_freq(freq, Some(true), None, SearchType::Exact)?;
+///
+/// pr_info!("OPP frequency is: {:?}\n", opp.freq(None));
+/// pr_info!("OPP voltage is: {:?}\n", opp.voltage());
+/// pr_info!("OPP level is: {}\n", opp.level());
+/// pr_info!("OPP power is: {:?}\n", opp.power());
+///
+/// Ok(opp)
+/// }
+/// ```
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum SearchType {
+ /// Match the exact frequency.
+ Exact,
+ /// Find the highest frequency less than or equal to the given value.
+ Floor,
+ /// Find the lowest frequency greater than or equal to the given value.
+ Ceil,
+}
+
+/// OPP configuration callbacks.
+///
+/// Implement this trait to customize OPP clock and regulator setup for your device.
+#[vtable]
+pub trait ConfigOps {
+ /// This is typically used to scale clocks when transitioning between OPPs.
+ #[inline]
+ fn config_clks(_dev: &Device, _table: &Table, _opp: &OPP, _scaling_down: bool) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// This provides access to the old and new OPPs, allowing for safe regulator adjustments.
+ #[inline]
+ fn config_regulators(
+ _dev: &Device,
+ _opp_old: &OPP,
+ _opp_new: &OPP,
+ _data: *mut *mut bindings::regulator,
+ _count: u32,
+ ) -> Result {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+}
+
+/// OPP configuration token.
+///
+/// Returned by the OPP core when configuration is applied to a [`Device`]. The associated
+/// configuration is automatically cleared when the token is dropped.
+pub struct ConfigToken(i32);
+
+impl Drop for ConfigToken {
+ fn drop(&mut self) {
+ // SAFETY: This is the same token value returned by the C code via `dev_pm_opp_set_config`.
+ unsafe { bindings::dev_pm_opp_clear_config(self.0) };
+ }
+}
+
+/// OPP configurations.
+///
+/// Rust abstraction for the C `struct dev_pm_opp_config`.
+///
+/// ## Examples
+///
+/// The following example demonstrates how to set OPP property-name configuration for a [`Device`].
+///
+/// ```
+/// use kernel::device::Device;
+/// use kernel::error::Result;
+/// use kernel::opp::{Config, ConfigOps, ConfigToken};
+/// use kernel::str::CString;
+/// use kernel::types::ARef;
+/// use kernel::macros::vtable;
+///
+/// #[derive(Default)]
+/// struct Driver;
+///
+/// #[vtable]
+/// impl ConfigOps for Driver {}
+///
+/// fn configure(dev: &ARef<Device>) -> Result<ConfigToken> {
+/// let name = CString::try_from_fmt(fmt!("{}", "slow"))?;
+///
+/// // The OPP configuration is cleared once the [`ConfigToken`] goes out of scope.
+/// Config::<Driver>::new()
+/// .set_prop_name(name)?
+/// .set(dev)
+/// }
+/// ```
+#[derive(Default)]
+pub struct Config<T: ConfigOps>
+where
+ T: Default,
+{
+ clk_names: Option<KVec<CString>>,
+ prop_name: Option<CString>,
+ regulator_names: Option<KVec<CString>>,
+ supported_hw: Option<KVec<u32>>,
+
+ // Tuple containing (required device, index)
+ required_dev: Option<(ARef<Device>, u32)>,
+ _data: PhantomData<T>,
+}
+
+impl<T: ConfigOps + Default> Config<T> {
+ /// Creates a new instance of [`Config`].
+ #[inline]
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ /// Initializes clock names.
+ pub fn set_clk_names(mut self, names: KVec<CString>) -> Result<Self> {
+ if self.clk_names.is_some() {
+ return Err(EBUSY);
+ }
+
+ if names.is_empty() {
+ return Err(EINVAL);
+ }
+
+ self.clk_names = Some(names);
+ Ok(self)
+ }
+
+ /// Initializes property name.
+ pub fn set_prop_name(mut self, name: CString) -> Result<Self> {
+ if self.prop_name.is_some() {
+ return Err(EBUSY);
+ }
+
+ self.prop_name = Some(name);
+ Ok(self)
+ }
+
+ /// Initializes regulator names.
+ pub fn set_regulator_names(mut self, names: KVec<CString>) -> Result<Self> {
+ if self.regulator_names.is_some() {
+ return Err(EBUSY);
+ }
+
+ if names.is_empty() {
+ return Err(EINVAL);
+ }
+
+ self.regulator_names = Some(names);
+
+ Ok(self)
+ }
+
+ /// Initializes required devices.
+ pub fn set_required_dev(mut self, dev: ARef<Device>, index: u32) -> Result<Self> {
+ if self.required_dev.is_some() {
+ return Err(EBUSY);
+ }
+
+ self.required_dev = Some((dev, index));
+ Ok(self)
+ }
+
+ /// Initializes supported hardware.
+ pub fn set_supported_hw(mut self, hw: KVec<u32>) -> Result<Self> {
+ if self.supported_hw.is_some() {
+ return Err(EBUSY);
+ }
+
+ if hw.is_empty() {
+ return Err(EINVAL);
+ }
+
+ self.supported_hw = Some(hw);
+ Ok(self)
+ }
+
+ /// Sets the configuration with the OPP core.
+ ///
+ /// The returned [`ConfigToken`] will remove the configuration when dropped.
+ pub fn set(self, dev: &Device) -> Result<ConfigToken> {
+ let (_clk_list, clk_names) = match &self.clk_names {
+ Some(x) => {
+ let list = to_c_str_array(x)?;
+ let ptr = list.as_ptr();
+ (Some(list), ptr)
+ }
+ None => (None, ptr::null()),
+ };
+
+ let (_regulator_list, regulator_names) = match &self.regulator_names {
+ Some(x) => {
+ let list = to_c_str_array(x)?;
+ let ptr = list.as_ptr();
+ (Some(list), ptr)
+ }
+ None => (None, ptr::null()),
+ };
+
+ let prop_name = self
+ .prop_name
+ .as_ref()
+ .map_or(ptr::null(), |p| p.as_char_ptr());
+
+ let (supported_hw, supported_hw_count) = self
+ .supported_hw
+ .as_ref()
+ .map_or((ptr::null(), 0), |hw| (hw.as_ptr(), hw.len() as u32));
+
+ let (required_dev, required_dev_index) = self
+ .required_dev
+ .as_ref()
+ .map_or((ptr::null_mut(), 0), |(dev, idx)| (dev.as_raw(), *idx));
+
+ let mut config = bindings::dev_pm_opp_config {
+ clk_names,
+ config_clks: if T::HAS_CONFIG_CLKS {
+ Some(Self::config_clks)
+ } else {
+ None
+ },
+ prop_name,
+ regulator_names,
+ config_regulators: if T::HAS_CONFIG_REGULATORS {
+ Some(Self::config_regulators)
+ } else {
+ None
+ },
+ supported_hw,
+ supported_hw_count,
+
+ required_dev,
+ required_dev_index,
+ };
+
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements. The OPP core guarantees not to access fields of [`Config`] after this call
+ // and so we don't need to save a copy of them for future use.
+ let ret = unsafe { bindings::dev_pm_opp_set_config(dev.as_raw(), &mut config) };
+ if ret < 0 {
+ Err(Error::from_errno(ret))
+ } else {
+ Ok(ConfigToken(ret))
+ }
+ }
+
+ /// Config's clk callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn config_clks(
+ dev: *mut bindings::device,
+ opp_table: *mut bindings::opp_table,
+ opp: *mut bindings::dev_pm_opp,
+ _data: *mut kernel::ffi::c_void,
+ scaling_down: bool,
+ ) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: 'dev' is guaranteed by the C code to be valid.
+ let dev = unsafe { Device::get_device(dev) };
+ T::config_clks(
+ &dev,
+ // SAFETY: 'opp_table' is guaranteed by the C code to be valid.
+ &unsafe { Table::from_raw_table(opp_table, &dev) },
+ // SAFETY: 'opp' is guaranteed by the C code to be valid.
+ unsafe { OPP::from_raw_opp(opp)? },
+ scaling_down,
+ )
+ .map(|()| 0)
+ })
+ }
+
+ /// Config's regulator callback.
+ ///
+ /// SAFETY: Called from C. Inputs must be valid pointers.
+ extern "C" fn config_regulators(
+ dev: *mut bindings::device,
+ old_opp: *mut bindings::dev_pm_opp,
+ new_opp: *mut bindings::dev_pm_opp,
+ regulators: *mut *mut bindings::regulator,
+ count: kernel::ffi::c_uint,
+ ) -> kernel::ffi::c_int {
+ from_result(|| {
+ // SAFETY: 'dev' is guaranteed by the C code to be valid.
+ let dev = unsafe { Device::get_device(dev) };
+ T::config_regulators(
+ &dev,
+ // SAFETY: 'old_opp' is guaranteed by the C code to be valid.
+ unsafe { OPP::from_raw_opp(old_opp)? },
+ // SAFETY: 'new_opp' is guaranteed by the C code to be valid.
+ unsafe { OPP::from_raw_opp(new_opp)? },
+ regulators,
+ count,
+ )
+ .map(|()| 0)
+ })
+ }
+}
+
+/// A reference-counted OPP table.
+///
+/// Rust abstraction for the C `struct opp_table`.
+///
+/// # Invariants
+///
+/// The pointer stored in `Self` is non-null and valid for the lifetime of the [`Table`].
+///
+/// Instances of this type are reference-counted.
+///
+/// ## Examples
+///
+/// The following example demonstrates how to get OPP [`Table`] for a [`Cpumask`] and set its
+/// frequency.
+///
+/// ```
+/// # #![cfg(CONFIG_OF)]
+/// use kernel::clk::Hertz;
+/// use kernel::cpumask::Cpumask;
+/// use kernel::device::Device;
+/// use kernel::error::Result;
+/// use kernel::opp::Table;
+/// use kernel::types::ARef;
+///
+/// fn get_table(dev: &ARef<Device>, mask: &mut Cpumask, freq: Hertz) -> Result<Table> {
+/// let mut opp_table = Table::from_of_cpumask(dev, mask)?;
+///
+/// if opp_table.opp_count()? == 0 {
+/// return Err(EINVAL);
+/// }
+///
+/// pr_info!("Max transition latency is: {} ns\n", opp_table.max_transition_latency_ns());
+/// pr_info!("Suspend frequency is: {:?}\n", opp_table.suspend_freq());
+///
+/// opp_table.set_rate(freq)?;
+/// Ok(opp_table)
+/// }
+/// ```
+pub struct Table {
+ ptr: *mut bindings::opp_table,
+ dev: ARef<Device>,
+ #[allow(dead_code)]
+ em: bool,
+ #[allow(dead_code)]
+ of: bool,
+ cpus: Option<CpumaskVar>,
+}
+
+/// SAFETY: It is okay to send ownership of [`Table`] across thread boundaries.
+unsafe impl Send for Table {}
+
+/// SAFETY: It is okay to access [`Table`] through shared references from other threads because
+/// we're either accessing properties that don't change or that are properly synchronised by C code.
+unsafe impl Sync for Table {}
+
+impl Table {
+ /// Creates a new reference-counted [`Table`] from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `ptr` is valid and non-null.
+ unsafe fn from_raw_table(ptr: *mut bindings::opp_table, dev: &ARef<Device>) -> Self {
+ // SAFETY: By the safety requirements, ptr is valid and its refcount will be incremented.
+ //
+ // INVARIANT: The reference-count is decremented when [`Table`] goes out of scope.
+ unsafe { bindings::dev_pm_opp_get_opp_table_ref(ptr) };
+
+ Self {
+ ptr,
+ dev: dev.clone(),
+ em: false,
+ of: false,
+ cpus: None,
+ }
+ }
+
+ /// Creates a new reference-counted [`Table`] instance for a [`Device`].
+ pub fn from_dev(dev: &Device) -> Result<Self> {
+ // SAFETY: The requirements are satisfied by the existence of the [`Device`] and its safety
+ // requirements.
+ //
+ // INVARIANT: The reference-count is incremented by the C code and is decremented when
+ // [`Table`] goes out of scope.
+ let ptr = from_err_ptr(unsafe { bindings::dev_pm_opp_get_opp_table(dev.as_raw()) })?;
+
+ Ok(Self {
+ ptr,
+ dev: dev.into(),
+ em: false,
+ of: false,
+ cpus: None,
+ })
+ }
+
+ /// Creates a new reference-counted [`Table`] instance for a [`Device`] based on device tree
+ /// entries.
+ #[cfg(CONFIG_OF)]
+ pub fn from_of(dev: &ARef<Device>, index: i32) -> Result<Self> {
+ // SAFETY: The requirements are satisfied by the existence of the [`Device`] and its safety
+ // requirements.
+ //
+ // INVARIANT: The reference-count is incremented by the C code and is decremented when
+ // [`Table`] goes out of scope.
+ to_result(unsafe { bindings::dev_pm_opp_of_add_table_indexed(dev.as_raw(), index) })?;
+
+ // Get the newly created [`Table`].
+ let mut table = Self::from_dev(dev)?;
+ table.of = true;
+
+ Ok(table)
+ }
+
+ /// Remove device tree based [`Table`].
+ #[cfg(CONFIG_OF)]
+ #[inline]
+ fn remove_of(&self) {
+ // SAFETY: The requirements are satisfied by the existence of the [`Device`] and its safety
+ // requirements. We took the reference from [`from_of`] earlier, it is safe to drop the
+ // same now.
+ unsafe { bindings::dev_pm_opp_of_remove_table(self.dev.as_raw()) };
+ }
+
+ /// Creates a new reference-counted [`Table`] instance for a [`Cpumask`] based on device tree
+ /// entries.
+ #[cfg(CONFIG_OF)]
+ pub fn from_of_cpumask(dev: &Device, cpumask: &mut Cpumask) -> Result<Self> {
+ // SAFETY: The cpumask is valid and the returned pointer will be owned by the [`Table`]
+ // instance.
+ //
+ // INVARIANT: The reference-count is incremented by the C code and is decremented when
+ // [`Table`] goes out of scope.
+ to_result(unsafe { bindings::dev_pm_opp_of_cpumask_add_table(cpumask.as_raw()) })?;
+
+ // Fetch the newly created table.
+ let mut table = Self::from_dev(dev)?;
+ table.cpus = Some(CpumaskVar::try_clone(cpumask)?);
+
+ Ok(table)
+ }
+
+ /// Remove device tree based [`Table`] for a [`Cpumask`].
+ #[cfg(CONFIG_OF)]
+ #[inline]
+ fn remove_of_cpumask(&self, cpumask: &Cpumask) {
+ // SAFETY: The cpumask is valid and we took the reference from [`from_of_cpumask`] earlier,
+ // it is safe to drop the same now.
+ unsafe { bindings::dev_pm_opp_of_cpumask_remove_table(cpumask.as_raw()) };
+ }
+
+ /// Returns the number of [`OPP`]s in the [`Table`].
+ pub fn opp_count(&self) -> Result<u32> {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ let ret = unsafe { bindings::dev_pm_opp_get_opp_count(self.dev.as_raw()) };
+ if ret < 0 {
+ Err(Error::from_errno(ret))
+ } else {
+ Ok(ret as u32)
+ }
+ }
+
+ /// Returns max clock latency (in nanoseconds) of the [`OPP`]s in the [`Table`].
+ #[inline]
+ pub fn max_clock_latency_ns(&self) -> usize {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ unsafe { bindings::dev_pm_opp_get_max_clock_latency(self.dev.as_raw()) }
+ }
+
+ /// Returns max volt latency (in nanoseconds) of the [`OPP`]s in the [`Table`].
+ #[inline]
+ pub fn max_volt_latency_ns(&self) -> usize {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ unsafe { bindings::dev_pm_opp_get_max_volt_latency(self.dev.as_raw()) }
+ }
+
+ /// Returns max transition latency (in nanoseconds) of the [`OPP`]s in the [`Table`].
+ #[inline]
+ pub fn max_transition_latency_ns(&self) -> usize {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ unsafe { bindings::dev_pm_opp_get_max_transition_latency(self.dev.as_raw()) }
+ }
+
+ /// Returns the suspend [`OPP`]'s frequency.
+ #[inline]
+ pub fn suspend_freq(&self) -> Hertz {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ Hertz(unsafe { bindings::dev_pm_opp_get_suspend_opp_freq(self.dev.as_raw()) })
+ }
+
+ /// Synchronizes regulators used by the [`Table`].
+ #[inline]
+ pub fn sync_regulators(&self) -> Result {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ to_result(unsafe { bindings::dev_pm_opp_sync_regulators(self.dev.as_raw()) })
+ }
+
+ /// Gets sharing CPUs.
+ #[inline]
+ pub fn sharing_cpus(dev: &Device, cpumask: &mut Cpumask) -> Result {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ to_result(unsafe { bindings::dev_pm_opp_get_sharing_cpus(dev.as_raw(), cpumask.as_raw()) })
+ }
+
+ /// Sets sharing CPUs.
+ pub fn set_sharing_cpus(&mut self, cpumask: &mut Cpumask) -> Result {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ to_result(unsafe {
+ bindings::dev_pm_opp_set_sharing_cpus(self.dev.as_raw(), cpumask.as_raw())
+ })?;
+
+ if let Some(mask) = self.cpus.as_mut() {
+ // Update the cpumask as this will be used while removing the table.
+ cpumask.copy(mask);
+ }
+
+ Ok(())
+ }
+
+ /// Gets sharing CPUs from device tree.
+ #[cfg(CONFIG_OF)]
+ #[inline]
+ pub fn of_sharing_cpus(dev: &Device, cpumask: &mut Cpumask) -> Result {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ to_result(unsafe {
+ bindings::dev_pm_opp_of_get_sharing_cpus(dev.as_raw(), cpumask.as_raw())
+ })
+ }
+
+ /// Updates the voltage value for an [`OPP`].
+ #[inline]
+ pub fn adjust_voltage(
+ &self,
+ freq: Hertz,
+ volt: MicroVolt,
+ volt_min: MicroVolt,
+ volt_max: MicroVolt,
+ ) -> Result {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ to_result(unsafe {
+ bindings::dev_pm_opp_adjust_voltage(
+ self.dev.as_raw(),
+ freq.into(),
+ volt.into(),
+ volt_min.into(),
+ volt_max.into(),
+ )
+ })
+ }
+
+ /// Creates [`FreqTable`] from [`Table`].
+ #[cfg(CONFIG_CPU_FREQ)]
+ #[inline]
+ pub fn cpufreq_table(&mut self) -> Result<FreqTable> {
+ FreqTable::new(self)
+ }
+
+ /// Configures device with [`OPP`] matching the frequency value.
+ #[inline]
+ pub fn set_rate(&self, freq: Hertz) -> Result {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ to_result(unsafe { bindings::dev_pm_opp_set_rate(self.dev.as_raw(), freq.into()) })
+ }
+
+ /// Configures device with [`OPP`].
+ #[inline]
+ pub fn set_opp(&self, opp: &OPP) -> Result {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ to_result(unsafe { bindings::dev_pm_opp_set_opp(self.dev.as_raw(), opp.as_raw()) })
+ }
+
+ /// Finds [`OPP`] based on frequency.
+ pub fn opp_from_freq(
+ &self,
+ freq: Hertz,
+ available: Option<bool>,
+ index: Option<u32>,
+ stype: SearchType,
+ ) -> Result<ARef<OPP>> {
+ let raw_dev = self.dev.as_raw();
+ let index = index.unwrap_or(0);
+ let mut rate = freq.into();
+
+ let ptr = from_err_ptr(match stype {
+ SearchType::Exact => {
+ if let Some(available) = available {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and
+ // its safety requirements. The returned pointer will be owned by the new
+ // [`OPP`] instance.
+ unsafe {
+ bindings::dev_pm_opp_find_freq_exact_indexed(
+ raw_dev, rate, index, available,
+ )
+ }
+ } else {
+ return Err(EINVAL);
+ }
+ }
+
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements. The returned pointer will be owned by the new [`OPP`] instance.
+ SearchType::Ceil => unsafe {
+ bindings::dev_pm_opp_find_freq_ceil_indexed(raw_dev, &mut rate, index)
+ },
+
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements. The returned pointer will be owned by the new [`OPP`] instance.
+ SearchType::Floor => unsafe {
+ bindings::dev_pm_opp_find_freq_floor_indexed(raw_dev, &mut rate, index)
+ },
+ })?;
+
+ // SAFETY: The `ptr` is guaranteed by the C code to be valid.
+ unsafe { OPP::from_raw_opp_owned(ptr) }
+ }
+
+ /// Finds [`OPP`] based on level.
+ pub fn opp_from_level(&self, mut level: u32, stype: SearchType) -> Result<ARef<OPP>> {
+ let raw_dev = self.dev.as_raw();
+
+ let ptr = from_err_ptr(match stype {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements. The returned pointer will be owned by the new [`OPP`] instance.
+ SearchType::Exact => unsafe { bindings::dev_pm_opp_find_level_exact(raw_dev, level) },
+
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements. The returned pointer will be owned by the new [`OPP`] instance.
+ SearchType::Ceil => unsafe {
+ bindings::dev_pm_opp_find_level_ceil(raw_dev, &mut level)
+ },
+
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements. The returned pointer will be owned by the new [`OPP`] instance.
+ SearchType::Floor => unsafe {
+ bindings::dev_pm_opp_find_level_floor(raw_dev, &mut level)
+ },
+ })?;
+
+ // SAFETY: The `ptr` is guaranteed by the C code to be valid.
+ unsafe { OPP::from_raw_opp_owned(ptr) }
+ }
+
+ /// Finds [`OPP`] based on bandwidth.
+ pub fn opp_from_bw(&self, mut bw: u32, index: i32, stype: SearchType) -> Result<ARef<OPP>> {
+ let raw_dev = self.dev.as_raw();
+
+ let ptr = from_err_ptr(match stype {
+ // The OPP core doesn't support this yet.
+ SearchType::Exact => return Err(EINVAL),
+
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements. The returned pointer will be owned by the new [`OPP`] instance.
+ SearchType::Ceil => unsafe {
+ bindings::dev_pm_opp_find_bw_ceil(raw_dev, &mut bw, index)
+ },
+
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements. The returned pointer will be owned by the new [`OPP`] instance.
+ SearchType::Floor => unsafe {
+ bindings::dev_pm_opp_find_bw_floor(raw_dev, &mut bw, index)
+ },
+ })?;
+
+ // SAFETY: The `ptr` is guaranteed by the C code to be valid.
+ unsafe { OPP::from_raw_opp_owned(ptr) }
+ }
+
+ /// Enables the [`OPP`].
+ #[inline]
+ pub fn enable_opp(&self, freq: Hertz) -> Result {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ to_result(unsafe { bindings::dev_pm_opp_enable(self.dev.as_raw(), freq.into()) })
+ }
+
+ /// Disables the [`OPP`].
+ #[inline]
+ pub fn disable_opp(&self, freq: Hertz) -> Result {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ to_result(unsafe { bindings::dev_pm_opp_disable(self.dev.as_raw(), freq.into()) })
+ }
+
+ /// Registers with the Energy model.
+ #[cfg(CONFIG_OF)]
+ pub fn of_register_em(&mut self, cpumask: &mut Cpumask) -> Result {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements.
+ to_result(unsafe {
+ bindings::dev_pm_opp_of_register_em(self.dev.as_raw(), cpumask.as_raw())
+ })?;
+
+ self.em = true;
+ Ok(())
+ }
+
+ /// Unregisters with the Energy model.
+ #[cfg(all(CONFIG_OF, CONFIG_ENERGY_MODEL))]
+ #[inline]
+ fn of_unregister_em(&self) {
+ // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
+ // requirements. We registered with the EM framework earlier, it is safe to unregister now.
+ unsafe { bindings::em_dev_unregister_perf_domain(self.dev.as_raw()) };
+ }
+}
+
+impl Drop for Table {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe
+ // to relinquish it now.
+ unsafe { bindings::dev_pm_opp_put_opp_table(self.ptr) };
+
+ #[cfg(CONFIG_OF)]
+ {
+ #[cfg(CONFIG_ENERGY_MODEL)]
+ if self.em {
+ self.of_unregister_em();
+ }
+
+ if self.of {
+ self.remove_of();
+ } else if let Some(cpumask) = self.cpus.take() {
+ self.remove_of_cpumask(&cpumask);
+ }
+ }
+ }
+}
+
+/// A reference-counted Operating performance point (OPP).
+///
+/// Rust abstraction for the C `struct dev_pm_opp`.
+///
+/// # Invariants
+///
+/// The pointer stored in `Self` is non-null and valid for the lifetime of the [`OPP`].
+///
+/// Instances of this type are reference-counted. The reference count is incremented by the
+/// `dev_pm_opp_get` function and decremented by `dev_pm_opp_put`. The Rust type `ARef<OPP>`
+/// represents a pointer that owns a reference count on the [`OPP`].
+///
+/// A reference to the [`OPP`], &[`OPP`], isn't refcounted by the Rust code.
+///
+/// ## Examples
+///
+/// The following example demonstrates how to get [`OPP`] corresponding to a frequency value and
+/// configure the device with it.
+///
+/// ```
+/// use kernel::clk::Hertz;
+/// use kernel::error::Result;
+/// use kernel::opp::{SearchType, Table};
+///
+/// fn configure_opp(table: &Table, freq: Hertz) -> Result {
+/// let opp = table.opp_from_freq(freq, Some(true), None, SearchType::Exact)?;
+///
+/// if opp.freq(None) != freq {
+/// return Err(EINVAL);
+/// }
+///
+/// table.set_opp(&opp)
+/// }
+/// ```
+#[repr(transparent)]
+pub struct OPP(Opaque<bindings::dev_pm_opp>);
+
+/// SAFETY: It is okay to send the ownership of [`OPP`] across thread boundaries.
+unsafe impl Send for OPP {}
+
+/// SAFETY: It is okay to access [`OPP`] through shared references from other threads because we're
+/// either accessing properties that don't change or that are properly synchronised by C code.
+unsafe impl Sync for OPP {}
+
+/// SAFETY: The type invariants guarantee that [`OPP`] is always refcounted.
+unsafe impl AlwaysRefCounted for OPP {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::dev_pm_opp_get(self.0.get()) };
+ }
+
+ unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is nonzero.
+ unsafe { bindings::dev_pm_opp_put(obj.cast().as_ptr()) }
+ }
+}
+
+impl OPP {
+ /// Creates an owned reference to a [`OPP`] from a valid pointer.
+ ///
+ /// The refcount is incremented by the C code and will be decremented by `dec_ref` when the
+ /// [`ARef`] object is dropped.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid and the refcount of the [`OPP`] is incremented.
+ /// The caller must also ensure that it doesn't explicitly drop the refcount of the [`OPP`], as
+ /// the returned [`ARef`] object takes over the refcount increment on the underlying object and
+ /// the same will be dropped along with it.
+ pub unsafe fn from_raw_opp_owned(ptr: *mut bindings::dev_pm_opp) -> Result<ARef<Self>> {
+ let ptr = ptr::NonNull::new(ptr).ok_or(ENODEV)?;
+
+ // SAFETY: The safety requirements guarantee the validity of the pointer.
+ //
+ // INVARIANT: The reference-count is decremented when [`OPP`] goes out of scope.
+ Ok(unsafe { ARef::from_raw(ptr.cast()) })
+ }
+
+ /// Creates a reference to a [`OPP`] from a valid pointer.
+ ///
+ /// The refcount is not updated by the Rust API unless the returned reference is converted to
+ /// an [`ARef`] object.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid and remains valid for the duration of `'a`.
+ #[inline]
+ pub unsafe fn from_raw_opp<'a>(ptr: *mut bindings::dev_pm_opp) -> Result<&'a Self> {
+ // SAFETY: The caller guarantees that the pointer is not dangling and stays valid for the
+ // duration of 'a. The cast is okay because [`OPP`] is `repr(transparent)`.
+ Ok(unsafe { &*ptr.cast() })
+ }
+
+ #[inline]
+ fn as_raw(&self) -> *mut bindings::dev_pm_opp {
+ self.0.get()
+ }
+
+ /// Returns the frequency of an [`OPP`].
+ pub fn freq(&self, index: Option<u32>) -> Hertz {
+ let index = index.unwrap_or(0);
+
+ // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to
+ // use it.
+ Hertz(unsafe { bindings::dev_pm_opp_get_freq_indexed(self.as_raw(), index) })
+ }
+
+ /// Returns the voltage of an [`OPP`].
+ #[inline]
+ pub fn voltage(&self) -> MicroVolt {
+ // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to
+ // use it.
+ MicroVolt(unsafe { bindings::dev_pm_opp_get_voltage(self.as_raw()) })
+ }
+
+ /// Returns the level of an [`OPP`].
+ #[inline]
+ pub fn level(&self) -> u32 {
+ // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to
+ // use it.
+ unsafe { bindings::dev_pm_opp_get_level(self.as_raw()) }
+ }
+
+ /// Returns the power of an [`OPP`].
+ #[inline]
+ pub fn power(&self) -> MicroWatt {
+ // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to
+ // use it.
+ MicroWatt(unsafe { bindings::dev_pm_opp_get_power(self.as_raw()) })
+ }
+
+ /// Returns the required pstate of an [`OPP`].
+ #[inline]
+ pub fn required_pstate(&self, index: u32) -> u32 {
+ // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to
+ // use it.
+ unsafe { bindings::dev_pm_opp_get_required_pstate(self.as_raw(), index) }
+ }
+
+ /// Returns true if the [`OPP`] is turbo.
+ #[inline]
+ pub fn is_turbo(&self) -> bool {
+ // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to
+ // use it.
+ unsafe { bindings::dev_pm_opp_is_turbo(self.as_raw()) }
+ }
+}
diff --git a/rust/kernel/page.rs b/rust/kernel/page.rs
new file mode 100644
index 000000000000..f6126aca33a6
--- /dev/null
+++ b/rust/kernel/page.rs
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel page allocation and management.
+
+use crate::{
+ alloc::{AllocError, Flags},
+ bindings,
+ error::code::*,
+ error::Result,
+ uaccess::UserSliceReader,
+};
+use core::ptr::{self, NonNull};
+
+/// A bitwise shift for the page size.
+pub const PAGE_SHIFT: usize = bindings::PAGE_SHIFT as usize;
+
+/// The number of bytes in a page.
+pub const PAGE_SIZE: usize = bindings::PAGE_SIZE;
+
+/// A bitmask that gives the page containing a given address.
+pub const PAGE_MASK: usize = !(PAGE_SIZE - 1);
+
+/// Round up the given number to the next multiple of [`PAGE_SIZE`].
+///
+/// It is incorrect to pass an address where the next multiple of [`PAGE_SIZE`] doesn't fit in a
+/// [`usize`].
+pub const fn page_align(addr: usize) -> usize {
+ // Parentheses around `PAGE_SIZE - 1` to avoid triggering overflow sanitizers in the wrong
+ // cases.
+ (addr + (PAGE_SIZE - 1)) & PAGE_MASK
+}
+
+/// A pointer to a page that owns the page allocation.
+///
+/// # Invariants
+///
+/// The pointer is valid, and has ownership over the page.
+pub struct Page {
+ page: NonNull<bindings::page>,
+}
+
+// SAFETY: Pages have no logic that relies on them staying on a given thread, so moving them across
+// threads is safe.
+unsafe impl Send for Page {}
+
+// SAFETY: Pages have no logic that relies on them not being accessed concurrently, so accessing
+// them concurrently is safe.
+unsafe impl Sync for Page {}
+
+impl Page {
+ /// Allocates a new page.
+ ///
+ /// # Examples
+ ///
+ /// Allocate memory for a page.
+ ///
+ /// ```
+ /// use kernel::page::Page;
+ ///
+ /// let page = Page::alloc_page(GFP_KERNEL)?;
+ /// # Ok::<(), kernel::alloc::AllocError>(())
+ /// ```
+ ///
+ /// Allocate memory for a page and zero its contents.
+ ///
+ /// ```
+ /// use kernel::page::Page;
+ ///
+ /// let page = Page::alloc_page(GFP_KERNEL | __GFP_ZERO)?;
+ /// # Ok::<(), kernel::alloc::AllocError>(())
+ /// ```
+ pub fn alloc_page(flags: Flags) -> Result<Self, AllocError> {
+ // SAFETY: Depending on the value of `gfp_flags`, this call may sleep. Other than that, it
+ // is always safe to call this method.
+ let page = unsafe { bindings::alloc_pages(flags.as_raw(), 0) };
+ let page = NonNull::new(page).ok_or(AllocError)?;
+ // INVARIANT: We just successfully allocated a page, so we now have ownership of the newly
+ // allocated page. We transfer that ownership to the new `Page` object.
+ Ok(Self { page })
+ }
+
+ /// Returns a raw pointer to the page.
+ pub fn as_ptr(&self) -> *mut bindings::page {
+ self.page.as_ptr()
+ }
+
+ /// Runs a piece of code with this page mapped to an address.
+ ///
+ /// The page is unmapped when this call returns.
+ ///
+ /// # Using the raw pointer
+ ///
+ /// It is up to the caller to use the provided raw pointer correctly. The pointer is valid for
+ /// `PAGE_SIZE` bytes and for the duration in which the closure is called. The pointer might
+ /// only be mapped on the current thread, and when that is the case, dereferencing it on other
+ /// threads is UB. Other than that, the usual rules for dereferencing a raw pointer apply: don't
+ /// cause data races, the memory may be uninitialized, and so on.
+ ///
+ /// If multiple threads map the same page at the same time, then they may reference with
+ /// different addresses. However, even if the addresses are different, the underlying memory is
+ /// still the same for these purposes (e.g., it's still a data race if they both write to the
+ /// same underlying byte at the same time).
+ fn with_page_mapped<T>(&self, f: impl FnOnce(*mut u8) -> T) -> T {
+ // SAFETY: `page` is valid due to the type invariants on `Page`.
+ let mapped_addr = unsafe { bindings::kmap_local_page(self.as_ptr()) };
+
+ let res = f(mapped_addr.cast());
+
+ // This unmaps the page mapped above.
+ //
+ // SAFETY: Since this API takes the user code as a closure, it can only be used in a manner
+ // where the pages are unmapped in reverse order. This is as required by `kunmap_local`.
+ //
+ // In other words, if this call to `kunmap_local` happens when a different page should be
+ // unmapped first, then there must necessarily be a call to `kmap_local_page` other than the
+ // call just above in `with_page_mapped` that made that possible. In this case, it is the
+ // unsafe block that wraps that other call that is incorrect.
+ unsafe { bindings::kunmap_local(mapped_addr) };
+
+ res
+ }
+
+ /// Runs a piece of code with a raw pointer to a slice of this page, with bounds checking.
+ ///
+ /// If `f` is called, then it will be called with a pointer that points at `off` bytes into the
+ /// page, and the pointer will be valid for at least `len` bytes. The pointer is only valid on
+ /// this task, as this method uses a local mapping.
+ ///
+ /// If `off` and `len` refers to a region outside of this page, then this method returns
+ /// [`EINVAL`] and does not call `f`.
+ ///
+ /// # Using the raw pointer
+ ///
+ /// It is up to the caller to use the provided raw pointer correctly. The pointer is valid for
+ /// `len` bytes and for the duration in which the closure is called. The pointer might only be
+ /// mapped on the current thread, and when that is the case, dereferencing it on other threads
+ /// is UB. Other than that, the usual rules for dereferencing a raw pointer apply: don't cause
+ /// data races, the memory may be uninitialized, and so on.
+ ///
+ /// If multiple threads map the same page at the same time, then they may reference with
+ /// different addresses. However, even if the addresses are different, the underlying memory is
+ /// still the same for these purposes (e.g., it's still a data race if they both write to the
+ /// same underlying byte at the same time).
+ fn with_pointer_into_page<T>(
+ &self,
+ off: usize,
+ len: usize,
+ f: impl FnOnce(*mut u8) -> Result<T>,
+ ) -> Result<T> {
+ let bounds_ok = off <= PAGE_SIZE && len <= PAGE_SIZE && (off + len) <= PAGE_SIZE;
+
+ if bounds_ok {
+ self.with_page_mapped(move |page_addr| {
+ // SAFETY: The `off` integer is at most `PAGE_SIZE`, so this pointer offset will
+ // result in a pointer that is in bounds or one off the end of the page.
+ f(unsafe { page_addr.add(off) })
+ })
+ } else {
+ Err(EINVAL)
+ }
+ }
+
+ /// Maps the page and reads from it into the given buffer.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// * Callers must ensure that `dst` is valid for writing `len` bytes.
+ /// * Callers must ensure that this call does not race with a write to the same page that
+ /// overlaps with this read.
+ pub unsafe fn read_raw(&self, dst: *mut u8, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |src| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then
+ // it has performed a bounds check and guarantees that `src` is
+ // valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race.
+ unsafe { ptr::copy_nonoverlapping(src, dst, len) };
+ Ok(())
+ })
+ }
+
+ /// Maps the page and writes into it from the given buffer.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// * Callers must ensure that `src` is valid for reading `len` bytes.
+ /// * Callers must ensure that this call does not race with a read or write to the same page
+ /// that overlaps with this write.
+ pub unsafe fn write_raw(&self, src: *const u8, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race.
+ unsafe { ptr::copy_nonoverlapping(src, dst, len) };
+ Ok(())
+ })
+ }
+
+ /// Maps the page and zeroes the given slice.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that this call does not race with a read or write to the same page that
+ /// overlaps with this write.
+ pub unsafe fn fill_zero_raw(&self, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race.
+ unsafe { ptr::write_bytes(dst, 0u8, len) };
+ Ok(())
+ })
+ }
+
+ /// Copies data from userspace into this page.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// Like the other `UserSliceReader` methods, data races are allowed on the userspace address.
+ /// However, they are not allowed on the page you are copying into.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that this call does not race with a read or write to the same page that
+ /// overlaps with this write.
+ pub unsafe fn copy_from_user_slice_raw(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ len: usize,
+ ) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes. Furthermore, we have
+ // exclusive access to the slice since the caller guarantees that there are no races.
+ reader.read_raw(unsafe { core::slice::from_raw_parts_mut(dst.cast(), len) })
+ })
+ }
+}
+
+impl Drop for Page {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, we have ownership of the page and can free it.
+ unsafe { bindings::__free_pages(self.page.as_ptr(), 0) };
+ }
+}
diff --git a/rust/kernel/pci.rs b/rust/kernel/pci.rs
new file mode 100644
index 000000000000..38fc8d5ffbf9
--- /dev/null
+++ b/rust/kernel/pci.rs
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Abstractions for the PCI bus.
+//!
+//! C header: [`include/linux/pci.h`](srctree/include/linux/pci.h)
+
+use crate::{
+ alloc::flags::*,
+ bindings, container_of, device,
+ device_id::RawDeviceId,
+ devres::Devres,
+ driver,
+ error::{to_result, Result},
+ io::Io,
+ io::IoRaw,
+ str::CStr,
+ types::{ARef, ForeignOwnable, Opaque},
+ ThisModule,
+};
+use core::{
+ marker::PhantomData,
+ ops::Deref,
+ ptr::{addr_of_mut, NonNull},
+};
+use kernel::prelude::*;
+
+/// An adapter for the registration of PCI drivers.
+pub struct Adapter<T: Driver>(T);
+
+// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if
+// a preceding call to `register` has been successful.
+unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
+ type RegType = bindings::pci_driver;
+
+ unsafe fn register(
+ pdrv: &Opaque<Self::RegType>,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result {
+ // SAFETY: It's safe to set the fields of `struct pci_driver` on initialization.
+ unsafe {
+ (*pdrv.get()).name = name.as_char_ptr();
+ (*pdrv.get()).probe = Some(Self::probe_callback);
+ (*pdrv.get()).remove = Some(Self::remove_callback);
+ (*pdrv.get()).id_table = T::ID_TABLE.as_ptr();
+ }
+
+ // SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
+ to_result(unsafe {
+ bindings::__pci_register_driver(pdrv.get(), module.0, name.as_char_ptr())
+ })
+ }
+
+ unsafe fn unregister(pdrv: &Opaque<Self::RegType>) {
+ // SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
+ unsafe { bindings::pci_unregister_driver(pdrv.get()) }
+ }
+}
+
+impl<T: Driver + 'static> Adapter<T> {
+ extern "C" fn probe_callback(
+ pdev: *mut bindings::pci_dev,
+ id: *const bindings::pci_device_id,
+ ) -> kernel::ffi::c_int {
+ // SAFETY: The PCI bus only ever calls the probe callback with a valid pointer to a
+ // `struct pci_dev`.
+ //
+ // INVARIANT: `pdev` is valid for the duration of `probe_callback()`.
+ let pdev = unsafe { &*pdev.cast::<Device<device::Core>>() };
+
+ // SAFETY: `DeviceId` is a `#[repr(transparent)` wrapper of `struct pci_device_id` and
+ // does not add additional invariants, so it's safe to transmute.
+ let id = unsafe { &*id.cast::<DeviceId>() };
+ let info = T::ID_TABLE.info(id.index());
+
+ match T::probe(pdev, info) {
+ Ok(data) => {
+ // Let the `struct pci_dev` own a reference of the driver's private data.
+ // SAFETY: By the type invariant `pdev.as_raw` returns a valid pointer to a
+ // `struct pci_dev`.
+ unsafe { bindings::pci_set_drvdata(pdev.as_raw(), data.into_foreign() as _) };
+ }
+ Err(err) => return Error::to_errno(err),
+ }
+
+ 0
+ }
+
+ extern "C" fn remove_callback(pdev: *mut bindings::pci_dev) {
+ // SAFETY: The PCI bus only ever calls the remove callback with a valid pointer to a
+ // `struct pci_dev`.
+ let ptr = unsafe { bindings::pci_get_drvdata(pdev) };
+
+ // SAFETY: `remove_callback` is only ever called after a successful call to
+ // `probe_callback`, hence it's guaranteed that `ptr` points to a valid and initialized
+ // `KBox<T>` pointer created through `KBox::into_foreign`.
+ let _ = unsafe { KBox::<T>::from_foreign(ptr) };
+ }
+}
+
+/// Declares a kernel module that exposes a single PCI driver.
+///
+/// # Example
+///
+///```ignore
+/// kernel::module_pci_driver! {
+/// type: MyDriver,
+/// name: "Module name",
+/// authors: ["Author name"],
+/// description: "Description",
+/// license: "GPL v2",
+/// }
+///```
+#[macro_export]
+macro_rules! module_pci_driver {
+($($f:tt)*) => {
+ $crate::module_driver!(<T>, $crate::pci::Adapter<T>, { $($f)* });
+};
+}
+
+/// Abstraction for bindings::pci_device_id.
+#[repr(transparent)]
+#[derive(Clone, Copy)]
+pub struct DeviceId(bindings::pci_device_id);
+
+impl DeviceId {
+ const PCI_ANY_ID: u32 = !0;
+
+ /// Equivalent to C's `PCI_DEVICE` macro.
+ ///
+ /// Create a new `pci::DeviceId` from a vendor and device ID number.
+ pub const fn from_id(vendor: u32, device: u32) -> Self {
+ Self(bindings::pci_device_id {
+ vendor,
+ device,
+ subvendor: DeviceId::PCI_ANY_ID,
+ subdevice: DeviceId::PCI_ANY_ID,
+ class: 0,
+ class_mask: 0,
+ driver_data: 0,
+ override_only: 0,
+ })
+ }
+
+ /// Equivalent to C's `PCI_DEVICE_CLASS` macro.
+ ///
+ /// Create a new `pci::DeviceId` from a class number and mask.
+ pub const fn from_class(class: u32, class_mask: u32) -> Self {
+ Self(bindings::pci_device_id {
+ vendor: DeviceId::PCI_ANY_ID,
+ device: DeviceId::PCI_ANY_ID,
+ subvendor: DeviceId::PCI_ANY_ID,
+ subdevice: DeviceId::PCI_ANY_ID,
+ class,
+ class_mask,
+ driver_data: 0,
+ override_only: 0,
+ })
+ }
+}
+
+// SAFETY:
+// * `DeviceId` is a `#[repr(transparent)` wrapper of `pci_device_id` and does not add
+// additional invariants, so it's safe to transmute to `RawType`.
+// * `DRIVER_DATA_OFFSET` is the offset to the `driver_data` field.
+unsafe impl RawDeviceId for DeviceId {
+ type RawType = bindings::pci_device_id;
+
+ const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::pci_device_id, driver_data);
+
+ fn index(&self) -> usize {
+ self.0.driver_data as _
+ }
+}
+
+/// IdTable type for PCI
+pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>;
+
+/// Create a PCI `IdTable` with its alias for modpost.
+#[macro_export]
+macro_rules! pci_device_table {
+ ($table_name:ident, $module_table_name:ident, $id_info_type: ty, $table_data: expr) => {
+ const $table_name: $crate::device_id::IdArray<
+ $crate::pci::DeviceId,
+ $id_info_type,
+ { $table_data.len() },
+ > = $crate::device_id::IdArray::new($table_data);
+
+ $crate::module_device_table!("pci", $module_table_name, $table_name);
+ };
+}
+
+/// The PCI driver trait.
+///
+/// # Example
+///
+///```
+/// # use kernel::{bindings, device::Core, pci};
+///
+/// struct MyDriver;
+///
+/// kernel::pci_device_table!(
+/// PCI_TABLE,
+/// MODULE_PCI_TABLE,
+/// <MyDriver as pci::Driver>::IdInfo,
+/// [
+/// (pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, bindings::PCI_ANY_ID as _), ())
+/// ]
+/// );
+///
+/// impl pci::Driver for MyDriver {
+/// type IdInfo = ();
+/// const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
+///
+/// fn probe(
+/// _pdev: &pci::Device<Core>,
+/// _id_info: &Self::IdInfo,
+/// ) -> Result<Pin<KBox<Self>>> {
+/// Err(ENODEV)
+/// }
+/// }
+///```
+/// Drivers must implement this trait in order to get a PCI driver registered. Please refer to the
+/// `Adapter` documentation for an example.
+pub trait Driver: Send {
+ /// The type holding information about each device id supported by the driver.
+ ///
+ /// TODO: Use associated_type_defaults once stabilized:
+ ///
+ /// type IdInfo: 'static = ();
+ type IdInfo: 'static;
+
+ /// The table of device ids supported by the driver.
+ const ID_TABLE: IdTable<Self::IdInfo>;
+
+ /// PCI driver probe.
+ ///
+ /// Called when a new platform device is added or discovered.
+ /// Implementers should attempt to initialize the device here.
+ fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>;
+}
+
+/// The PCI device representation.
+///
+/// This structure represents the Rust abstraction for a C `struct pci_dev`. The implementation
+/// abstracts the usage of an already existing C `struct pci_dev` within Rust code that we get
+/// passed from the C side.
+///
+/// # Invariants
+///
+/// A [`Device`] instance represents a valid `struct device` created by the C portion of the kernel.
+#[repr(transparent)]
+pub struct Device<Ctx: device::DeviceContext = device::Normal>(
+ Opaque<bindings::pci_dev>,
+ PhantomData<Ctx>,
+);
+
+/// A PCI BAR to perform I/O-Operations on.
+///
+/// # Invariants
+///
+/// `Bar` always holds an `IoRaw` inststance that holds a valid pointer to the start of the I/O
+/// memory mapped PCI bar and its size.
+pub struct Bar<const SIZE: usize = 0> {
+ pdev: ARef<Device>,
+ io: IoRaw<SIZE>,
+ num: i32,
+}
+
+impl<const SIZE: usize> Bar<SIZE> {
+ fn new(pdev: &Device, num: u32, name: &CStr) -> Result<Self> {
+ let len = pdev.resource_len(num)?;
+ if len == 0 {
+ return Err(ENOMEM);
+ }
+
+ // Convert to `i32`, since that's what all the C bindings use.
+ let num = i32::try_from(num)?;
+
+ // SAFETY:
+ // `pdev` is valid by the invariants of `Device`.
+ // `num` is checked for validity by a previous call to `Device::resource_len`.
+ // `name` is always valid.
+ let ret = unsafe { bindings::pci_request_region(pdev.as_raw(), num, name.as_char_ptr()) };
+ if ret != 0 {
+ return Err(EBUSY);
+ }
+
+ // SAFETY:
+ // `pdev` is valid by the invariants of `Device`.
+ // `num` is checked for validity by a previous call to `Device::resource_len`.
+ // `name` is always valid.
+ let ioptr: usize = unsafe { bindings::pci_iomap(pdev.as_raw(), num, 0) } as usize;
+ if ioptr == 0 {
+ // SAFETY:
+ // `pdev` valid by the invariants of `Device`.
+ // `num` is checked for validity by a previous call to `Device::resource_len`.
+ unsafe { bindings::pci_release_region(pdev.as_raw(), num) };
+ return Err(ENOMEM);
+ }
+
+ let io = match IoRaw::new(ioptr, len as usize) {
+ Ok(io) => io,
+ Err(err) => {
+ // SAFETY:
+ // `pdev` is valid by the invariants of `Device`.
+ // `ioptr` is guaranteed to be the start of a valid I/O mapped memory region.
+ // `num` is checked for validity by a previous call to `Device::resource_len`.
+ unsafe { Self::do_release(pdev, ioptr, num) };
+ return Err(err);
+ }
+ };
+
+ Ok(Bar {
+ pdev: pdev.into(),
+ io,
+ num,
+ })
+ }
+
+ /// # Safety
+ ///
+ /// `ioptr` must be a valid pointer to the memory mapped PCI bar number `num`.
+ unsafe fn do_release(pdev: &Device, ioptr: usize, num: i32) {
+ // SAFETY:
+ // `pdev` is valid by the invariants of `Device`.
+ // `ioptr` is valid by the safety requirements.
+ // `num` is valid by the safety requirements.
+ unsafe {
+ bindings::pci_iounmap(pdev.as_raw(), ioptr as _);
+ bindings::pci_release_region(pdev.as_raw(), num);
+ }
+ }
+
+ fn release(&self) {
+ // SAFETY: The safety requirements are guaranteed by the type invariant of `self.pdev`.
+ unsafe { Self::do_release(&self.pdev, self.io.addr(), self.num) };
+ }
+}
+
+impl Bar {
+ fn index_is_valid(index: u32) -> bool {
+ // A `struct pci_dev` owns an array of resources with at most `PCI_NUM_RESOURCES` entries.
+ index < bindings::PCI_NUM_RESOURCES
+ }
+}
+
+impl<const SIZE: usize> Drop for Bar<SIZE> {
+ fn drop(&mut self) {
+ self.release();
+ }
+}
+
+impl<const SIZE: usize> Deref for Bar<SIZE> {
+ type Target = Io<SIZE>;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariant of `Self`, the MMIO range in `self.io` is properly mapped.
+ unsafe { Io::from_raw(&self.io) }
+ }
+}
+
+impl<Ctx: device::DeviceContext> Device<Ctx> {
+ fn as_raw(&self) -> *mut bindings::pci_dev {
+ self.0.get()
+ }
+}
+
+impl Device {
+ /// Returns the PCI vendor ID.
+ pub fn vendor_id(&self) -> u16 {
+ // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
+ unsafe { (*self.as_raw()).vendor }
+ }
+
+ /// Returns the PCI device ID.
+ pub fn device_id(&self) -> u16 {
+ // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
+ unsafe { (*self.as_raw()).device }
+ }
+
+ /// Returns the size of the given PCI bar resource.
+ pub fn resource_len(&self, bar: u32) -> Result<bindings::resource_size_t> {
+ if !Bar::index_is_valid(bar) {
+ return Err(EINVAL);
+ }
+
+ // SAFETY:
+ // - `bar` is a valid bar number, as guaranteed by the above call to `Bar::index_is_valid`,
+ // - by its type invariant `self.as_raw` is always a valid pointer to a `struct pci_dev`.
+ Ok(unsafe { bindings::pci_resource_len(self.as_raw(), bar.try_into()?) })
+ }
+}
+
+impl Device<device::Bound> {
+ /// Mapps an entire PCI-BAR after performing a region-request on it. I/O operation bound checks
+ /// can be performed on compile time for offsets (plus the requested type size) < SIZE.
+ pub fn iomap_region_sized<const SIZE: usize>(
+ &self,
+ bar: u32,
+ name: &CStr,
+ ) -> Result<Devres<Bar<SIZE>>> {
+ let bar = Bar::<SIZE>::new(self, bar, name)?;
+ let devres = Devres::new(self.as_ref(), bar, GFP_KERNEL)?;
+
+ Ok(devres)
+ }
+
+ /// Mapps an entire PCI-BAR after performing a region-request on it.
+ pub fn iomap_region(&self, bar: u32, name: &CStr) -> Result<Devres<Bar>> {
+ self.iomap_region_sized::<0>(bar, name)
+ }
+}
+
+impl Device<device::Core> {
+ /// Enable memory resources for this device.
+ pub fn enable_device_mem(&self) -> Result {
+ // SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`.
+ to_result(unsafe { bindings::pci_enable_device_mem(self.as_raw()) })
+ }
+
+ /// Enable bus-mastering for this device.
+ pub fn set_master(&self) {
+ // SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`.
+ unsafe { bindings::pci_set_master(self.as_raw()) };
+ }
+}
+
+// SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic
+// argument.
+kernel::impl_device_context_deref!(unsafe { Device });
+kernel::impl_device_context_into_aref!(Device);
+
+// SAFETY: Instances of `Device` are always reference-counted.
+unsafe impl crate::types::AlwaysRefCounted for Device {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
+ unsafe { bindings::pci_dev_get(self.as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::pci_dev_put(obj.cast().as_ptr()) }
+ }
+}
+
+impl<Ctx: device::DeviceContext> AsRef<device::Device<Ctx>> for Device<Ctx> {
+ fn as_ref(&self) -> &device::Device<Ctx> {
+ // SAFETY: By the type invariant of `Self`, `self.as_raw()` is a pointer to a valid
+ // `struct pci_dev`.
+ let dev = unsafe { addr_of_mut!((*self.as_raw()).dev) };
+
+ // SAFETY: `dev` points to a valid `struct device`.
+ unsafe { device::Device::as_ref(dev) }
+ }
+}
+
+impl<Ctx: device::DeviceContext> TryFrom<&device::Device<Ctx>> for &Device<Ctx> {
+ type Error = kernel::error::Error;
+
+ fn try_from(dev: &device::Device<Ctx>) -> Result<Self, Self::Error> {
+ // SAFETY: By the type invariant of `Device`, `dev.as_raw()` is a valid pointer to a
+ // `struct device`.
+ if !unsafe { bindings::dev_is_pci(dev.as_raw()) } {
+ return Err(EINVAL);
+ }
+
+ // SAFETY: We've just verified that the bus type of `dev` equals `bindings::pci_bus_type`,
+ // hence `dev` must be embedded in a valid `struct pci_dev` as guaranteed by the
+ // corresponding C code.
+ let pdev = unsafe { container_of!(dev.as_raw(), bindings::pci_dev, dev) };
+
+ // SAFETY: `pdev` is a valid pointer to a `struct pci_dev`.
+ Ok(unsafe { &*pdev.cast() })
+ }
+}
+
+// SAFETY: A `Device` is always reference-counted and can be released from any thread.
+unsafe impl Send for Device {}
+
+// SAFETY: `Device` can be shared among threads because all methods of `Device`
+// (i.e. `Device<Normal>) are thread safe.
+unsafe impl Sync for Device {}
diff --git a/rust/kernel/pid_namespace.rs b/rust/kernel/pid_namespace.rs
new file mode 100644
index 000000000000..0e93808e4639
--- /dev/null
+++ b/rust/kernel/pid_namespace.rs
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (c) 2024 Christian Brauner <brauner@kernel.org>
+
+//! Pid namespaces.
+//!
+//! C header: [`include/linux/pid_namespace.h`](srctree/include/linux/pid_namespace.h) and
+//! [`include/linux/pid.h`](srctree/include/linux/pid.h)
+
+use crate::{
+ bindings,
+ types::{AlwaysRefCounted, Opaque},
+};
+use core::ptr;
+
+/// Wraps the kernel's `struct pid_namespace`. Thread safe.
+///
+/// This structure represents the Rust abstraction for a C `struct pid_namespace`. This
+/// implementation abstracts the usage of an already existing C `struct pid_namespace` within Rust
+/// code that we get passed from the C side.
+#[repr(transparent)]
+pub struct PidNamespace {
+ inner: Opaque<bindings::pid_namespace>,
+}
+
+impl PidNamespace {
+ /// Returns a raw pointer to the inner C struct.
+ #[inline]
+ pub fn as_ptr(&self) -> *mut bindings::pid_namespace {
+ self.inner.get()
+ }
+
+ /// Creates a reference to a [`PidNamespace`] from a valid pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid and remains valid for the lifetime of the
+ /// returned [`PidNamespace`] reference.
+ pub unsafe fn from_ptr<'a>(ptr: *const bindings::pid_namespace) -> &'a Self {
+ // SAFETY: The safety requirements guarantee the validity of the dereference, while the
+ // `PidNamespace` type being transparent makes the cast ok.
+ unsafe { &*ptr.cast() }
+ }
+}
+
+// SAFETY: Instances of `PidNamespace` are always reference-counted.
+unsafe impl AlwaysRefCounted for PidNamespace {
+ #[inline]
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::get_pid_ns(self.as_ptr()) };
+ }
+
+ #[inline]
+ unsafe fn dec_ref(obj: ptr::NonNull<PidNamespace>) {
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::put_pid_ns(obj.cast().as_ptr()) }
+ }
+}
+
+// SAFETY:
+// - `PidNamespace::dec_ref` can be called from any thread.
+// - It is okay to send ownership of `PidNamespace` across thread boundaries.
+unsafe impl Send for PidNamespace {}
+
+// SAFETY: It's OK to access `PidNamespace` through shared references from other threads because
+// we're either accessing properties that don't change or that are properly synchronised by C code.
+unsafe impl Sync for PidNamespace {}
diff --git a/rust/kernel/platform.rs b/rust/kernel/platform.rs
new file mode 100644
index 000000000000..08849d92c074
--- /dev/null
+++ b/rust/kernel/platform.rs
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Abstractions for the platform bus.
+//!
+//! C header: [`include/linux/platform_device.h`](srctree/include/linux/platform_device.h)
+
+use crate::{
+ bindings, container_of, device, driver,
+ error::{to_result, Result},
+ of,
+ prelude::*,
+ str::CStr,
+ types::{ForeignOwnable, Opaque},
+ ThisModule,
+};
+
+use core::{
+ marker::PhantomData,
+ ptr::{addr_of_mut, NonNull},
+};
+
+/// An adapter for the registration of platform drivers.
+pub struct Adapter<T: Driver>(T);
+
+// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if
+// a preceding call to `register` has been successful.
+unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
+ type RegType = bindings::platform_driver;
+
+ unsafe fn register(
+ pdrv: &Opaque<Self::RegType>,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result {
+ let of_table = match T::OF_ID_TABLE {
+ Some(table) => table.as_ptr(),
+ None => core::ptr::null(),
+ };
+
+ // SAFETY: It's safe to set the fields of `struct platform_driver` on initialization.
+ unsafe {
+ (*pdrv.get()).driver.name = name.as_char_ptr();
+ (*pdrv.get()).probe = Some(Self::probe_callback);
+ (*pdrv.get()).remove = Some(Self::remove_callback);
+ (*pdrv.get()).driver.of_match_table = of_table;
+ }
+
+ // SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
+ to_result(unsafe { bindings::__platform_driver_register(pdrv.get(), module.0) })
+ }
+
+ unsafe fn unregister(pdrv: &Opaque<Self::RegType>) {
+ // SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
+ unsafe { bindings::platform_driver_unregister(pdrv.get()) };
+ }
+}
+
+impl<T: Driver + 'static> Adapter<T> {
+ extern "C" fn probe_callback(pdev: *mut bindings::platform_device) -> kernel::ffi::c_int {
+ // SAFETY: The platform bus only ever calls the probe callback with a valid pointer to a
+ // `struct platform_device`.
+ //
+ // INVARIANT: `pdev` is valid for the duration of `probe_callback()`.
+ let pdev = unsafe { &*pdev.cast::<Device<device::Core>>() };
+
+ let info = <Self as driver::Adapter>::id_info(pdev.as_ref());
+ match T::probe(pdev, info) {
+ Ok(data) => {
+ // Let the `struct platform_device` own a reference of the driver's private data.
+ // SAFETY: By the type invariant `pdev.as_raw` returns a valid pointer to a
+ // `struct platform_device`.
+ unsafe { bindings::platform_set_drvdata(pdev.as_raw(), data.into_foreign() as _) };
+ }
+ Err(err) => return Error::to_errno(err),
+ }
+
+ 0
+ }
+
+ extern "C" fn remove_callback(pdev: *mut bindings::platform_device) {
+ // SAFETY: `pdev` is a valid pointer to a `struct platform_device`.
+ let ptr = unsafe { bindings::platform_get_drvdata(pdev) };
+
+ // SAFETY: `remove_callback` is only ever called after a successful call to
+ // `probe_callback`, hence it's guaranteed that `ptr` points to a valid and initialized
+ // `KBox<T>` pointer created through `KBox::into_foreign`.
+ let _ = unsafe { KBox::<T>::from_foreign(ptr) };
+ }
+}
+
+impl<T: Driver + 'static> driver::Adapter for Adapter<T> {
+ type IdInfo = T::IdInfo;
+
+ fn of_id_table() -> Option<of::IdTable<Self::IdInfo>> {
+ T::OF_ID_TABLE
+ }
+}
+
+/// Declares a kernel module that exposes a single platform driver.
+///
+/// # Examples
+///
+/// ```ignore
+/// kernel::module_platform_driver! {
+/// type: MyDriver,
+/// name: "Module name",
+/// authors: ["Author name"],
+/// description: "Description",
+/// license: "GPL v2",
+/// }
+/// ```
+#[macro_export]
+macro_rules! module_platform_driver {
+ ($($f:tt)*) => {
+ $crate::module_driver!(<T>, $crate::platform::Adapter<T>, { $($f)* });
+ };
+}
+
+/// The platform driver trait.
+///
+/// Drivers must implement this trait in order to get a platform driver registered.
+///
+/// # Example
+///
+///```
+/// # use kernel::{bindings, c_str, device::Core, of, platform};
+///
+/// struct MyDriver;
+///
+/// kernel::of_device_table!(
+/// OF_TABLE,
+/// MODULE_OF_TABLE,
+/// <MyDriver as platform::Driver>::IdInfo,
+/// [
+/// (of::DeviceId::new(c_str!("test,device")), ())
+/// ]
+/// );
+///
+/// impl platform::Driver for MyDriver {
+/// type IdInfo = ();
+/// const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+///
+/// fn probe(
+/// _pdev: &platform::Device<Core>,
+/// _id_info: Option<&Self::IdInfo>,
+/// ) -> Result<Pin<KBox<Self>>> {
+/// Err(ENODEV)
+/// }
+/// }
+///```
+pub trait Driver: Send {
+ /// The type holding driver private data about each device id supported by the driver.
+ ///
+ /// TODO: Use associated_type_defaults once stabilized:
+ ///
+ /// type IdInfo: 'static = ();
+ type IdInfo: 'static;
+
+ /// The table of OF device ids supported by the driver.
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>>;
+
+ /// Platform driver probe.
+ ///
+ /// Called when a new platform device is added or discovered.
+ /// Implementers should attempt to initialize the device here.
+ fn probe(dev: &Device<device::Core>, id_info: Option<&Self::IdInfo>)
+ -> Result<Pin<KBox<Self>>>;
+}
+
+/// The platform device representation.
+///
+/// This structure represents the Rust abstraction for a C `struct platform_device`. The
+/// implementation abstracts the usage of an already existing C `struct platform_device` within Rust
+/// code that we get passed from the C side.
+///
+/// # Invariants
+///
+/// A [`Device`] instance represents a valid `struct platform_device` created by the C portion of
+/// the kernel.
+#[repr(transparent)]
+pub struct Device<Ctx: device::DeviceContext = device::Normal>(
+ Opaque<bindings::platform_device>,
+ PhantomData<Ctx>,
+);
+
+impl<Ctx: device::DeviceContext> Device<Ctx> {
+ fn as_raw(&self) -> *mut bindings::platform_device {
+ self.0.get()
+ }
+}
+
+// SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic
+// argument.
+kernel::impl_device_context_deref!(unsafe { Device });
+kernel::impl_device_context_into_aref!(Device);
+
+// SAFETY: Instances of `Device` are always reference-counted.
+unsafe impl crate::types::AlwaysRefCounted for Device {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
+ unsafe { bindings::get_device(self.as_ref().as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::platform_device_put(obj.cast().as_ptr()) }
+ }
+}
+
+impl<Ctx: device::DeviceContext> AsRef<device::Device<Ctx>> for Device<Ctx> {
+ fn as_ref(&self) -> &device::Device<Ctx> {
+ // SAFETY: By the type invariant of `Self`, `self.as_raw()` is a pointer to a valid
+ // `struct platform_device`.
+ let dev = unsafe { addr_of_mut!((*self.as_raw()).dev) };
+
+ // SAFETY: `dev` points to a valid `struct device`.
+ unsafe { device::Device::as_ref(dev) }
+ }
+}
+
+impl<Ctx: device::DeviceContext> TryFrom<&device::Device<Ctx>> for &Device<Ctx> {
+ type Error = kernel::error::Error;
+
+ fn try_from(dev: &device::Device<Ctx>) -> Result<Self, Self::Error> {
+ // SAFETY: By the type invariant of `Device`, `dev.as_raw()` is a valid pointer to a
+ // `struct device`.
+ if !unsafe { bindings::dev_is_platform(dev.as_raw()) } {
+ return Err(EINVAL);
+ }
+
+ // SAFETY: We've just verified that the bus type of `dev` equals
+ // `bindings::platform_bus_type`, hence `dev` must be embedded in a valid
+ // `struct platform_device` as guaranteed by the corresponding C code.
+ let pdev = unsafe { container_of!(dev.as_raw(), bindings::platform_device, dev) };
+
+ // SAFETY: `pdev` is a valid pointer to a `struct platform_device`.
+ Ok(unsafe { &*pdev.cast() })
+ }
+}
+
+// SAFETY: A `Device` is always reference-counted and can be released from any thread.
+unsafe impl Send for Device {}
+
+// SAFETY: `Device` can be shared among threads because all methods of `Device`
+// (i.e. `Device<Normal>) are thread safe.
+unsafe impl Sync for Device {}
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
index ae21600970b3..baa774a351ce 100644
--- a/rust/kernel/prelude.rs
+++ b/rust/kernel/prelude.rs
@@ -14,20 +14,23 @@
#[doc(no_inline)]
pub use core::pin::Pin;
-#[doc(no_inline)]
-pub use alloc::{boxed::Box, vec::Vec};
+pub use crate::alloc::{flags::*, Box, KBox, KVBox, KVVec, KVec, VBox, VVec, Vec};
#[doc(no_inline)]
-pub use macros::{module, pin_data, pinned_drop, vtable, Zeroable};
+pub use macros::{export, module, vtable};
+
+pub use pin_init::{init, pin_data, pin_init, pinned_drop, InPlaceWrite, Init, PinInit, Zeroable};
-pub use super::build_assert;
+pub use super::{build_assert, build_error};
// `super::std_vendor` is hidden, which makes the macro inline for some reason.
#[doc(no_inline)]
pub use super::dbg;
+pub use super::fmt;
+pub use super::{dev_alert, dev_crit, dev_dbg, dev_emerg, dev_err, dev_info, dev_notice, dev_warn};
pub use super::{pr_alert, pr_crit, pr_debug, pr_emerg, pr_err, pr_info, pr_notice, pr_warn};
-pub use super::{init, pin_init, try_init, try_pin_init};
+pub use super::{try_init, try_pin_init};
pub use super::static_assert;
@@ -35,6 +38,6 @@ pub use super::error::{code::*, Error, Result};
pub use super::{str::CStr, ThisModule};
-pub use super::init::{InPlaceInit, Init, PinInit};
+pub use super::init::InPlaceInit;
pub use super::current;
diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
index 9b13aca832c2..cf4714242e14 100644
--- a/rust/kernel/print.rs
+++ b/rust/kernel/print.rs
@@ -4,20 +4,18 @@
//!
//! C header: [`include/linux/printk.h`](srctree/include/linux/printk.h)
//!
-//! Reference: <https://www.kernel.org/doc/html/latest/core-api/printk-basics.html>
+//! Reference: <https://docs.kernel.org/core-api/printk-basics.html>
-use core::{
+use crate::{
ffi::{c_char, c_void},
- fmt,
+ prelude::*,
+ str::RawFormatter,
};
-
-use crate::str::RawFormatter;
-
-#[cfg(CONFIG_PRINTK)]
-use crate::bindings;
+use core::fmt;
// Called from `vsprintf` with format specifier `%pA`.
-#[no_mangle]
+#[expect(clippy::missing_safety_doc)]
+#[export]
unsafe extern "C" fn rust_fmt_argument(
buf: *mut c_char,
end: *mut c_char,
@@ -26,6 +24,7 @@ unsafe extern "C" fn rust_fmt_argument(
use fmt::Write;
// SAFETY: The C contract guarantees that `buf` is valid if it's less than `end`.
let mut w = unsafe { RawFormatter::from_ptrs(buf.cast(), end.cast()) };
+ // SAFETY: TODO.
let _ = w.write_fmt(unsafe { *(ptr as *const fmt::Arguments<'_>) });
w.pos().cast()
}
@@ -35,8 +34,6 @@ unsafe extern "C" fn rust_fmt_argument(
/// Public but hidden since it should only be used from public macros.
#[doc(hidden)]
pub mod format_strings {
- use crate::bindings;
-
/// The length we copy from the `KERN_*` kernel prefixes.
const LENGTH_PREFIX: usize = 2;
@@ -107,9 +104,10 @@ pub unsafe fn call_printk(
) {
// `_printk` does not seem to fail in any path.
#[cfg(CONFIG_PRINTK)]
+ // SAFETY: TODO.
unsafe {
bindings::_printk(
- format_string.as_ptr() as _,
+ format_string.as_ptr(),
module_name.as_ptr(),
&args as *const _ as *const c_void,
);
@@ -130,7 +128,7 @@ pub fn call_printk_cont(args: fmt::Arguments<'_>) {
#[cfg(CONFIG_PRINTK)]
unsafe {
bindings::_printk(
- format_strings::CONT.as_ptr() as _,
+ format_strings::CONT.as_ptr(),
&args as *const _ as *const c_void,
);
}
@@ -142,7 +140,7 @@ pub fn call_printk_cont(args: fmt::Arguments<'_>) {
#[doc(hidden)]
#[cfg(not(testlib))]
#[macro_export]
-#[allow(clippy::crate_in_macro_def)]
+#[expect(clippy::crate_in_macro_def)]
macro_rules! print_macro (
// The non-continuation cases (most of them, e.g. `INFO`).
($format_string:path, false, $($arg:tt)+) => (
@@ -202,7 +200,7 @@ macro_rules! print_macro (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_emerg`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_emerg
+/// [`pr_emerg`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_emerg
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -226,7 +224,7 @@ macro_rules! pr_emerg (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_alert`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_alert
+/// [`pr_alert`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_alert
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -250,7 +248,7 @@ macro_rules! pr_alert (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_crit`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_crit
+/// [`pr_crit`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_crit
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -274,7 +272,7 @@ macro_rules! pr_crit (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_err`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_err
+/// [`pr_err`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_err
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -298,7 +296,7 @@ macro_rules! pr_err (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_warn`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_warn
+/// [`pr_warn`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_warn
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -322,7 +320,7 @@ macro_rules! pr_warn (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_notice`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_notice
+/// [`pr_notice`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_notice
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -346,7 +344,7 @@ macro_rules! pr_notice (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_info`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_info
+/// [`pr_info`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_info
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -372,7 +370,7 @@ macro_rules! pr_info (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_debug`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_debug
+/// [`pr_debug`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_debug
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -400,7 +398,7 @@ macro_rules! pr_debug (
/// `alloc::format!` for information about the formatting syntax.
///
/// [`pr_info!`]: crate::pr_info!
-/// [`pr_cont`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_cont
+/// [`pr_cont`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_cont
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
new file mode 100644
index 000000000000..5246b2c8a4ff
--- /dev/null
+++ b/rust/kernel/rbtree.rs
@@ -0,0 +1,1286 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Red-black trees.
+//!
+//! C header: [`include/linux/rbtree.h`](srctree/include/linux/rbtree.h)
+//!
+//! Reference: <https://docs.kernel.org/core-api/rbtree.html>
+
+use crate::{alloc::Flags, bindings, container_of, error::Result, prelude::*};
+use core::{
+ cmp::{Ord, Ordering},
+ marker::PhantomData,
+ mem::MaybeUninit,
+ ptr::{addr_of_mut, from_mut, NonNull},
+};
+
+/// A red-black tree with owned nodes.
+///
+/// It is backed by the kernel C red-black trees.
+///
+/// # Examples
+///
+/// In the example below we do several operations on a tree. We note that insertions may fail if
+/// the system is out of memory.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation}};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Check the nodes we just inserted.
+/// {
+/// assert_eq!(tree.get(&10), Some(&100));
+/// assert_eq!(tree.get(&20), Some(&200));
+/// assert_eq!(tree.get(&30), Some(&300));
+/// }
+///
+/// // Iterate over the nodes we just inserted.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next(), Some((&10, &100)));
+/// assert_eq!(iter.next(), Some((&20, &200)));
+/// assert_eq!(iter.next(), Some((&30, &300)));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Print all elements.
+/// for (key, value) in &tree {
+/// pr_info!("{} = {}\n", key, value);
+/// }
+///
+/// // Replace one of the elements.
+/// tree.try_create_and_insert(10, 1000, flags::GFP_KERNEL)?;
+///
+/// // Check that the tree reflects the replacement.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next(), Some((&10, &1000)));
+/// assert_eq!(iter.next(), Some((&20, &200)));
+/// assert_eq!(iter.next(), Some((&30, &300)));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Change the value of one of the elements.
+/// *tree.get_mut(&30).unwrap() = 3000;
+///
+/// // Check that the tree reflects the update.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next(), Some((&10, &1000)));
+/// assert_eq!(iter.next(), Some((&20, &200)));
+/// assert_eq!(iter.next(), Some((&30, &3000)));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Remove an element.
+/// tree.remove(&10);
+///
+/// // Check that the tree reflects the removal.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next(), Some((&20, &200)));
+/// assert_eq!(iter.next(), Some((&30, &3000)));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// In the example below, we first allocate a node, acquire a spinlock, then insert the node into
+/// the tree. This is useful when the insertion context does not allow sleeping, for example, when
+/// holding a spinlock.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNode}, sync::SpinLock};
+///
+/// fn insert_test(tree: &SpinLock<RBTree<u32, u32>>) -> Result {
+/// // Pre-allocate node. This may fail (as it allocates memory).
+/// let node = RBTreeNode::new(10, 100, flags::GFP_KERNEL)?;
+///
+/// // Insert node while holding the lock. It is guaranteed to succeed with no allocation
+/// // attempts.
+/// let mut guard = tree.lock();
+/// guard.insert(node);
+/// Ok(())
+/// }
+/// ```
+///
+/// In the example below, we reuse an existing node allocation from an element we removed.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNodeReservation}};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Check the nodes we just inserted.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next(), Some((&10, &100)));
+/// assert_eq!(iter.next(), Some((&20, &200)));
+/// assert_eq!(iter.next(), Some((&30, &300)));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Remove a node, getting back ownership of it.
+/// let existing = tree.remove(&30);
+///
+/// // Check that the tree reflects the removal.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next(), Some((&10, &100)));
+/// assert_eq!(iter.next(), Some((&20, &200)));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Create a preallocated reservation that we can re-use later.
+/// let reservation = RBTreeNodeReservation::new(flags::GFP_KERNEL)?;
+///
+/// // Insert a new node into the tree, reusing the previous allocation. This is guaranteed to
+/// // succeed (no memory allocations).
+/// tree.insert(reservation.into_node(15, 150));
+///
+/// // Check that the tree reflect the new insertion.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next(), Some((&10, &100)));
+/// assert_eq!(iter.next(), Some((&15, &150)));
+/// assert_eq!(iter.next(), Some((&20, &200)));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// # Invariants
+///
+/// Non-null parent/children pointers stored in instances of the `rb_node` C struct are always
+/// valid, and pointing to a field of our internal representation of a node.
+pub struct RBTree<K, V> {
+ root: bindings::rb_root,
+ _p: PhantomData<Node<K, V>>,
+}
+
+// SAFETY: An [`RBTree`] allows the same kinds of access to its values that a struct allows to its
+// fields, so we use the same Send condition as would be used for a struct with K and V fields.
+unsafe impl<K: Send, V: Send> Send for RBTree<K, V> {}
+
+// SAFETY: An [`RBTree`] allows the same kinds of access to its values that a struct allows to its
+// fields, so we use the same Sync condition as would be used for a struct with K and V fields.
+unsafe impl<K: Sync, V: Sync> Sync for RBTree<K, V> {}
+
+impl<K, V> RBTree<K, V> {
+ /// Creates a new and empty tree.
+ pub fn new() -> Self {
+ Self {
+ // INVARIANT: There are no nodes in the tree, so the invariant holds vacuously.
+ root: bindings::rb_root::default(),
+ _p: PhantomData,
+ }
+ }
+
+ /// Returns an iterator over the tree nodes, sorted by key.
+ pub fn iter(&self) -> Iter<'_, K, V> {
+ Iter {
+ _tree: PhantomData,
+ // INVARIANT:
+ // - `self.root` is a valid pointer to a tree root.
+ // - `bindings::rb_first` produces a valid pointer to a node given `root` is valid.
+ iter_raw: IterRaw {
+ // SAFETY: by the invariants, all pointers are valid.
+ next: unsafe { bindings::rb_first(&self.root) },
+ _phantom: PhantomData,
+ },
+ }
+ }
+
+ /// Returns a mutable iterator over the tree nodes, sorted by key.
+ pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
+ IterMut {
+ _tree: PhantomData,
+ // INVARIANT:
+ // - `self.root` is a valid pointer to a tree root.
+ // - `bindings::rb_first` produces a valid pointer to a node given `root` is valid.
+ iter_raw: IterRaw {
+ // SAFETY: by the invariants, all pointers are valid.
+ next: unsafe { bindings::rb_first(from_mut(&mut self.root)) },
+ _phantom: PhantomData,
+ },
+ }
+ }
+
+ /// Returns an iterator over the keys of the nodes in the tree, in sorted order.
+ pub fn keys(&self) -> impl Iterator<Item = &'_ K> {
+ self.iter().map(|(k, _)| k)
+ }
+
+ /// Returns an iterator over the values of the nodes in the tree, sorted by key.
+ pub fn values(&self) -> impl Iterator<Item = &'_ V> {
+ self.iter().map(|(_, v)| v)
+ }
+
+ /// Returns a mutable iterator over the values of the nodes in the tree, sorted by key.
+ pub fn values_mut(&mut self) -> impl Iterator<Item = &'_ mut V> {
+ self.iter_mut().map(|(_, v)| v)
+ }
+
+ /// Returns a cursor over the tree nodes, starting with the smallest key.
+ pub fn cursor_front(&mut self) -> Option<Cursor<'_, K, V>> {
+ let root = addr_of_mut!(self.root);
+ // SAFETY: `self.root` is always a valid root node
+ let current = unsafe { bindings::rb_first(root) };
+ NonNull::new(current).map(|current| {
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self`.
+ Cursor {
+ current,
+ tree: self,
+ }
+ })
+ }
+
+ /// Returns a cursor over the tree nodes, starting with the largest key.
+ pub fn cursor_back(&mut self) -> Option<Cursor<'_, K, V>> {
+ let root = addr_of_mut!(self.root);
+ // SAFETY: `self.root` is always a valid root node
+ let current = unsafe { bindings::rb_last(root) };
+ NonNull::new(current).map(|current| {
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self`.
+ Cursor {
+ current,
+ tree: self,
+ }
+ })
+ }
+}
+
+impl<K, V> RBTree<K, V>
+where
+ K: Ord,
+{
+ /// Tries to insert a new value into the tree.
+ ///
+ /// It overwrites a node if one already exists with the same key and returns it (containing the
+ /// key/value pair). Returns [`None`] if a node with the same key didn't already exist.
+ ///
+ /// Returns an error if it cannot allocate memory for the new node.
+ pub fn try_create_and_insert(
+ &mut self,
+ key: K,
+ value: V,
+ flags: Flags,
+ ) -> Result<Option<RBTreeNode<K, V>>> {
+ Ok(self.insert(RBTreeNode::new(key, value, flags)?))
+ }
+
+ /// Inserts a new node into the tree.
+ ///
+ /// It overwrites a node if one already exists with the same key and returns it (containing the
+ /// key/value pair). Returns [`None`] if a node with the same key didn't already exist.
+ ///
+ /// This function always succeeds.
+ pub fn insert(&mut self, node: RBTreeNode<K, V>) -> Option<RBTreeNode<K, V>> {
+ match self.raw_entry(&node.node.key) {
+ RawEntry::Occupied(entry) => Some(entry.replace(node)),
+ RawEntry::Vacant(entry) => {
+ entry.insert(node);
+ None
+ }
+ }
+ }
+
+ fn raw_entry(&mut self, key: &K) -> RawEntry<'_, K, V> {
+ let raw_self: *mut RBTree<K, V> = self;
+ // The returned `RawEntry` is used to call either `rb_link_node` or `rb_replace_node`.
+ // The parameters of `bindings::rb_link_node` are as follows:
+ // - `node`: A pointer to an uninitialized node being inserted.
+ // - `parent`: A pointer to an existing node in the tree. One of its child pointers must be
+ // null, and `node` will become a child of `parent` by replacing that child pointer
+ // with a pointer to `node`.
+ // - `rb_link`: A pointer to either the left-child or right-child field of `parent`. This
+ // specifies which child of `parent` should hold `node` after this call. The
+ // value of `*rb_link` must be null before the call to `rb_link_node`. If the
+ // red/black tree is empty, then it’s also possible for `parent` to be null. In
+ // this case, `rb_link` is a pointer to the `root` field of the red/black tree.
+ //
+ // We will traverse the tree looking for a node that has a null pointer as its child,
+ // representing an empty subtree where we can insert our new node. We need to make sure
+ // that we preserve the ordering of the nodes in the tree. In each iteration of the loop
+ // we store `parent` and `child_field_of_parent`, and the new `node` will go somewhere
+ // in the subtree of `parent` that `child_field_of_parent` points at. Once
+ // we find an empty subtree, we can insert the new node using `rb_link_node`.
+ let mut parent = core::ptr::null_mut();
+ let mut child_field_of_parent: &mut *mut bindings::rb_node =
+ // SAFETY: `raw_self` is a valid pointer to the `RBTree` (created from `self` above).
+ unsafe { &mut (*raw_self).root.rb_node };
+ while !(*child_field_of_parent).is_null() {
+ let curr = *child_field_of_parent;
+ // SAFETY: All links fields we create are in a `Node<K, V>`.
+ let node = unsafe { container_of!(curr, Node<K, V>, links) };
+
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ match key.cmp(unsafe { &(*node).key }) {
+ // SAFETY: `curr` is a non-null node so it is valid by the type invariants.
+ Ordering::Less => child_field_of_parent = unsafe { &mut (*curr).rb_left },
+ // SAFETY: `curr` is a non-null node so it is valid by the type invariants.
+ Ordering::Greater => child_field_of_parent = unsafe { &mut (*curr).rb_right },
+ Ordering::Equal => {
+ return RawEntry::Occupied(OccupiedEntry {
+ rbtree: self,
+ node_links: curr,
+ })
+ }
+ }
+ parent = curr;
+ }
+
+ RawEntry::Vacant(RawVacantEntry {
+ rbtree: raw_self,
+ parent,
+ child_field_of_parent,
+ _phantom: PhantomData,
+ })
+ }
+
+ /// Gets the given key's corresponding entry in the map for in-place manipulation.
+ pub fn entry(&mut self, key: K) -> Entry<'_, K, V> {
+ match self.raw_entry(&key) {
+ RawEntry::Occupied(entry) => Entry::Occupied(entry),
+ RawEntry::Vacant(entry) => Entry::Vacant(VacantEntry { raw: entry, key }),
+ }
+ }
+
+ /// Used for accessing the given node, if it exists.
+ pub fn find_mut(&mut self, key: &K) -> Option<OccupiedEntry<'_, K, V>> {
+ match self.raw_entry(key) {
+ RawEntry::Occupied(entry) => Some(entry),
+ RawEntry::Vacant(_entry) => None,
+ }
+ }
+
+ /// Returns a reference to the value corresponding to the key.
+ pub fn get(&self, key: &K) -> Option<&V> {
+ let mut node = self.root.rb_node;
+ while !node.is_null() {
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(node, Node<K, V>, links) };
+ // SAFETY: `this` is a non-null node so it is valid by the type invariants.
+ node = match key.cmp(unsafe { &(*this).key }) {
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Less => unsafe { (*node).rb_left },
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Greater => unsafe { (*node).rb_right },
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Equal => return Some(unsafe { &(*this).value }),
+ }
+ }
+ None
+ }
+
+ /// Returns a mutable reference to the value corresponding to the key.
+ pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
+ self.find_mut(key).map(|node| node.into_mut())
+ }
+
+ /// Removes the node with the given key from the tree.
+ ///
+ /// It returns the node that was removed if one exists, or [`None`] otherwise.
+ pub fn remove_node(&mut self, key: &K) -> Option<RBTreeNode<K, V>> {
+ self.find_mut(key).map(OccupiedEntry::remove_node)
+ }
+
+ /// Removes the node with the given key from the tree.
+ ///
+ /// It returns the value that was removed if one exists, or [`None`] otherwise.
+ pub fn remove(&mut self, key: &K) -> Option<V> {
+ self.find_mut(key).map(OccupiedEntry::remove)
+ }
+
+ /// Returns a cursor over the tree nodes based on the given key.
+ ///
+ /// If the given key exists, the cursor starts there.
+ /// Otherwise it starts with the first larger key in sort order.
+ /// If there is no larger key, it returns [`None`].
+ pub fn cursor_lower_bound(&mut self, key: &K) -> Option<Cursor<'_, K, V>>
+ where
+ K: Ord,
+ {
+ let mut node = self.root.rb_node;
+ let mut best_match: Option<NonNull<Node<K, V>>> = None;
+ while !node.is_null() {
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(node, Node<K, V>, links) }.cast_mut();
+ // SAFETY: `this` is a non-null node so it is valid by the type invariants.
+ let this_key = unsafe { &(*this).key };
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ let left_child = unsafe { (*node).rb_left };
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ let right_child = unsafe { (*node).rb_right };
+ match key.cmp(this_key) {
+ Ordering::Equal => {
+ best_match = NonNull::new(this);
+ break;
+ }
+ Ordering::Greater => {
+ node = right_child;
+ }
+ Ordering::Less => {
+ let is_better_match = match best_match {
+ None => true,
+ Some(best) => {
+ // SAFETY: `best` is a non-null node so it is valid by the type invariants.
+ let best_key = unsafe { &(*best.as_ptr()).key };
+ best_key > this_key
+ }
+ };
+ if is_better_match {
+ best_match = NonNull::new(this);
+ }
+ node = left_child;
+ }
+ };
+ }
+
+ let best = best_match?;
+
+ // SAFETY: `best` is a non-null node so it is valid by the type invariants.
+ let links = unsafe { addr_of_mut!((*best.as_ptr()).links) };
+
+ NonNull::new(links).map(|current| {
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self`.
+ Cursor {
+ current,
+ tree: self,
+ }
+ })
+ }
+}
+
+impl<K, V> Default for RBTree<K, V> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<K, V> Drop for RBTree<K, V> {
+ fn drop(&mut self) {
+ // SAFETY: `root` is valid as it's embedded in `self` and we have a valid `self`.
+ let mut next = unsafe { bindings::rb_first_postorder(&self.root) };
+
+ // INVARIANT: The loop invariant is that all tree nodes from `next` in postorder are valid.
+ while !next.is_null() {
+ // SAFETY: All links fields we create are in a `Node<K, V>`.
+ let this = unsafe { container_of!(next, Node<K, V>, links) };
+
+ // Find out what the next node is before disposing of the current one.
+ // SAFETY: `next` and all nodes in postorder are still valid.
+ next = unsafe { bindings::rb_next_postorder(next) };
+
+ // INVARIANT: This is the destructor, so we break the type invariant during clean-up,
+ // but it is not observable. The loop invariant is still maintained.
+
+ // SAFETY: `this` is valid per the loop invariant.
+ unsafe { drop(KBox::from_raw(this.cast_mut())) };
+ }
+ }
+}
+
+/// A bidirectional cursor over the tree nodes, sorted by key.
+///
+/// # Examples
+///
+/// In the following example, we obtain a cursor to the first element in the tree.
+/// The cursor allows us to iterate bidirectionally over key/value pairs in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Get a cursor to the first element.
+/// let mut cursor = tree.cursor_front().unwrap();
+/// let mut current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+///
+/// // Move the cursor, updating it to the 2nd element.
+/// cursor = cursor.move_next().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Peek at the next element without impacting the cursor.
+/// let next = cursor.peek_next().unwrap();
+/// assert_eq!(next, (&30, &300));
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Moving past the last element causes the cursor to return [`None`].
+/// cursor = cursor.move_next().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+/// let cursor = cursor.move_next();
+/// assert!(cursor.is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// A cursor can also be obtained at the last element in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// let mut cursor = tree.cursor_back().unwrap();
+/// let current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// Obtaining a cursor returns [`None`] if the tree is empty.
+///
+/// ```
+/// use kernel::rbtree::RBTree;
+///
+/// let mut tree: RBTree<u16, u16> = RBTree::new();
+/// assert!(tree.cursor_front().is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// [`RBTree::cursor_lower_bound`] can be used to start at an arbitrary node in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert five elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(40, 400, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(50, 500, flags::GFP_KERNEL)?;
+///
+/// // If the provided key exists, a cursor to that key is returned.
+/// let cursor = tree.cursor_lower_bound(&20).unwrap();
+/// let current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // If the provided key doesn't exist, a cursor to the first larger element in sort order is returned.
+/// let cursor = tree.cursor_lower_bound(&25).unwrap();
+/// let current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// // If there is no larger key, [`None`] is returned.
+/// let cursor = tree.cursor_lower_bound(&55);
+/// assert!(cursor.is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// The cursor allows mutation of values in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Retrieve a cursor.
+/// let mut cursor = tree.cursor_front().unwrap();
+///
+/// // Get a mutable reference to the current value.
+/// let (k, v) = cursor.current_mut();
+/// *v = 1000;
+///
+/// // The updated value is reflected in the tree.
+/// let updated = tree.get(&10).unwrap();
+/// assert_eq!(updated, &1000);
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// It also allows node removal. The following examples demonstrate the behavior of removing the current node.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Remove the first element.
+/// let mut cursor = tree.cursor_front().unwrap();
+/// let mut current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+/// cursor = cursor.remove_current().0.unwrap();
+///
+/// // If a node exists after the current element, it is returned.
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Get a cursor to the last element, and remove it.
+/// cursor = tree.cursor_back().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// // Since there is no next node, the previous node is returned.
+/// cursor = cursor.remove_current().0.unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Removing the last element in the tree returns [`None`].
+/// assert!(cursor.remove_current().0.is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// Nodes adjacent to the current node can also be removed.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Get a cursor to the first element.
+/// let mut cursor = tree.cursor_front().unwrap();
+/// let mut current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+///
+/// // Calling `remove_prev` from the first element returns [`None`].
+/// assert!(cursor.remove_prev().is_none());
+///
+/// // Get a cursor to the last element.
+/// cursor = tree.cursor_back().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// // Calling `remove_prev` removes and returns the middle element.
+/// assert_eq!(cursor.remove_prev().unwrap().to_key_value(), (20, 200));
+///
+/// // Calling `remove_next` from the last element returns [`None`].
+/// assert!(cursor.remove_next().is_none());
+///
+/// // Move to the first element
+/// cursor = cursor.move_prev().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+///
+/// // Calling `remove_next` removes and returns the last element.
+/// assert_eq!(cursor.remove_next().unwrap().to_key_value(), (30, 300));
+///
+/// # Ok::<(), Error>(())
+///
+/// ```
+///
+/// # Invariants
+/// - `current` points to a node that is in the same [`RBTree`] as `tree`.
+pub struct Cursor<'a, K, V> {
+ tree: &'a mut RBTree<K, V>,
+ current: NonNull<bindings::rb_node>,
+}
+
+// SAFETY: The [`Cursor`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`.
+// The cursor only gives out immutable references to the keys, but since it has excusive access to those same
+// keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user.
+unsafe impl<'a, K: Send, V: Send> Send for Cursor<'a, K, V> {}
+
+// SAFETY: The [`Cursor`] gives out immutable references to K and mutable references to V,
+// so it has the same thread safety requirements as mutable references.
+unsafe impl<'a, K: Sync, V: Sync> Sync for Cursor<'a, K, V> {}
+
+impl<'a, K, V> Cursor<'a, K, V> {
+ /// The current node
+ pub fn current(&self) -> (&K, &V) {
+ // SAFETY:
+ // - `self.current` is a valid node by the type invariants.
+ // - We have an immutable reference by the function signature.
+ unsafe { Self::to_key_value(self.current) }
+ }
+
+ /// The current node, with a mutable value
+ pub fn current_mut(&mut self) -> (&K, &mut V) {
+ // SAFETY:
+ // - `self.current` is a valid node by the type invariants.
+ // - We have an mutable reference by the function signature.
+ unsafe { Self::to_key_value_mut(self.current) }
+ }
+
+ /// Remove the current node from the tree.
+ ///
+ /// Returns a tuple where the first element is a cursor to the next node, if it exists,
+ /// else the previous node, else [`None`] (if the tree becomes empty). The second element
+ /// is the removed node.
+ pub fn remove_current(self) -> (Option<Self>, RBTreeNode<K, V>) {
+ let prev = self.get_neighbor_raw(Direction::Prev);
+ let next = self.get_neighbor_raw(Direction::Next);
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(self.current.as_ptr(), Node<K, V>, links) }.cast_mut();
+ // SAFETY: `this` is valid by the type invariants as described above.
+ let node = unsafe { KBox::from_raw(this) };
+ let node = RBTreeNode { node };
+ // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so
+ // the tree cannot change. By the tree invariant, all nodes are valid.
+ unsafe { bindings::rb_erase(&mut (*this).links, addr_of_mut!(self.tree.root)) };
+
+ let current = match (prev, next) {
+ (_, Some(next)) => next,
+ (Some(prev), None) => prev,
+ (None, None) => {
+ return (None, node);
+ }
+ };
+
+ (
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self.tree`.
+ Some(Self {
+ current,
+ tree: self.tree,
+ }),
+ node,
+ )
+ }
+
+ /// Remove the previous node, returning it if it exists.
+ pub fn remove_prev(&mut self) -> Option<RBTreeNode<K, V>> {
+ self.remove_neighbor(Direction::Prev)
+ }
+
+ /// Remove the next node, returning it if it exists.
+ pub fn remove_next(&mut self) -> Option<RBTreeNode<K, V>> {
+ self.remove_neighbor(Direction::Next)
+ }
+
+ fn remove_neighbor(&mut self, direction: Direction) -> Option<RBTreeNode<K, V>> {
+ if let Some(neighbor) = self.get_neighbor_raw(direction) {
+ let neighbor = neighbor.as_ptr();
+ // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so
+ // the tree cannot change. By the tree invariant, all nodes are valid.
+ unsafe { bindings::rb_erase(neighbor, addr_of_mut!(self.tree.root)) };
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(neighbor, Node<K, V>, links) }.cast_mut();
+ // SAFETY: `this` is valid by the type invariants as described above.
+ let node = unsafe { KBox::from_raw(this) };
+ return Some(RBTreeNode { node });
+ }
+ None
+ }
+
+ /// Move the cursor to the previous node, returning [`None`] if it doesn't exist.
+ pub fn move_prev(self) -> Option<Self> {
+ self.mv(Direction::Prev)
+ }
+
+ /// Move the cursor to the next node, returning [`None`] if it doesn't exist.
+ pub fn move_next(self) -> Option<Self> {
+ self.mv(Direction::Next)
+ }
+
+ fn mv(self, direction: Direction) -> Option<Self> {
+ // INVARIANT:
+ // - `neighbor` is a valid node in the [`RBTree`] pointed to by `self.tree`.
+ self.get_neighbor_raw(direction).map(|neighbor| Self {
+ tree: self.tree,
+ current: neighbor,
+ })
+ }
+
+ /// Access the previous node without moving the cursor.
+ pub fn peek_prev(&self) -> Option<(&K, &V)> {
+ self.peek(Direction::Prev)
+ }
+
+ /// Access the previous node without moving the cursor.
+ pub fn peek_next(&self) -> Option<(&K, &V)> {
+ self.peek(Direction::Next)
+ }
+
+ fn peek(&self, direction: Direction) -> Option<(&K, &V)> {
+ self.get_neighbor_raw(direction).map(|neighbor| {
+ // SAFETY:
+ // - `neighbor` is a valid tree node.
+ // - By the function signature, we have an immutable reference to `self`.
+ unsafe { Self::to_key_value(neighbor) }
+ })
+ }
+
+ /// Access the previous node mutably without moving the cursor.
+ pub fn peek_prev_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.peek_mut(Direction::Prev)
+ }
+
+ /// Access the next node mutably without moving the cursor.
+ pub fn peek_next_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.peek_mut(Direction::Next)
+ }
+
+ fn peek_mut(&mut self, direction: Direction) -> Option<(&K, &mut V)> {
+ self.get_neighbor_raw(direction).map(|neighbor| {
+ // SAFETY:
+ // - `neighbor` is a valid tree node.
+ // - By the function signature, we have a mutable reference to `self`.
+ unsafe { Self::to_key_value_mut(neighbor) }
+ })
+ }
+
+ fn get_neighbor_raw(&self, direction: Direction) -> Option<NonNull<bindings::rb_node>> {
+ // SAFETY: `self.current` is valid by the type invariants.
+ let neighbor = unsafe {
+ match direction {
+ Direction::Prev => bindings::rb_prev(self.current.as_ptr()),
+ Direction::Next => bindings::rb_next(self.current.as_ptr()),
+ }
+ };
+
+ NonNull::new(neighbor)
+ }
+
+ /// # Safety
+ ///
+ /// - `node` must be a valid pointer to a node in an [`RBTree`].
+ /// - The caller has immutable access to `node` for the duration of `'b`.
+ unsafe fn to_key_value<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b V) {
+ // SAFETY: the caller guarantees that `node` is a valid pointer in an `RBTree`.
+ let (k, v) = unsafe { Self::to_key_value_raw(node) };
+ // SAFETY: the caller guarantees immutable access to `node`.
+ (k, unsafe { &*v })
+ }
+
+ /// # Safety
+ ///
+ /// - `node` must be a valid pointer to a node in an [`RBTree`].
+ /// - The caller has mutable access to `node` for the duration of `'b`.
+ unsafe fn to_key_value_mut<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b mut V) {
+ // SAFETY: the caller guarantees that `node` is a valid pointer in an `RBTree`.
+ let (k, v) = unsafe { Self::to_key_value_raw(node) };
+ // SAFETY: the caller guarantees mutable access to `node`.
+ (k, unsafe { &mut *v })
+ }
+
+ /// # Safety
+ ///
+ /// - `node` must be a valid pointer to a node in an [`RBTree`].
+ /// - The caller has immutable access to the key for the duration of `'b`.
+ unsafe fn to_key_value_raw<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, *mut V) {
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(node.as_ptr(), Node<K, V>, links) }.cast_mut();
+ // SAFETY: The passed `node` is the current node or a non-null neighbor,
+ // thus `this` is valid by the type invariants.
+ let k = unsafe { &(*this).key };
+ // SAFETY: The passed `node` is the current node or a non-null neighbor,
+ // thus `this` is valid by the type invariants.
+ let v = unsafe { addr_of_mut!((*this).value) };
+ (k, v)
+ }
+}
+
+/// Direction for [`Cursor`] operations.
+enum Direction {
+ /// the node immediately before, in sort order
+ Prev,
+ /// the node immediately after, in sort order
+ Next,
+}
+
+impl<'a, K, V> IntoIterator for &'a RBTree<K, V> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = Iter<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+/// An iterator over the nodes of a [`RBTree`].
+///
+/// Instances are created by calling [`RBTree::iter`].
+pub struct Iter<'a, K, V> {
+ _tree: PhantomData<&'a RBTree<K, V>>,
+ iter_raw: IterRaw<K, V>,
+}
+
+// SAFETY: The [`Iter`] gives out immutable references to K and V, so it has the same
+// thread safety requirements as immutable references.
+unsafe impl<'a, K: Sync, V: Sync> Send for Iter<'a, K, V> {}
+
+// SAFETY: The [`Iter`] gives out immutable references to K and V, so it has the same
+// thread safety requirements as immutable references.
+unsafe impl<'a, K: Sync, V: Sync> Sync for Iter<'a, K, V> {}
+
+impl<'a, K, V> Iterator for Iter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ // SAFETY: Due to `self._tree`, `k` and `v` are valid for the lifetime of `'a`.
+ self.iter_raw.next().map(|(k, v)| unsafe { (&*k, &*v) })
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a mut RBTree<K, V> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = IterMut<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+/// A mutable iterator over the nodes of a [`RBTree`].
+///
+/// Instances are created by calling [`RBTree::iter_mut`].
+pub struct IterMut<'a, K, V> {
+ _tree: PhantomData<&'a mut RBTree<K, V>>,
+ iter_raw: IterRaw<K, V>,
+}
+
+// SAFETY: The [`IterMut`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`.
+// The iterator only gives out immutable references to the keys, but since the iterator has excusive access to those same
+// keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user.
+unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {}
+
+// SAFETY: The [`IterMut`] gives out immutable references to K and mutable references to V, so it has the same
+// thread safety requirements as mutable references.
+unsafe impl<'a, K: Sync, V: Sync> Sync for IterMut<'a, K, V> {}
+
+impl<'a, K, V> Iterator for IterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter_raw.next().map(|(k, v)|
+ // SAFETY: Due to `&mut self`, we have exclusive access to `k` and `v`, for the lifetime of `'a`.
+ unsafe { (&*k, &mut *v) })
+ }
+}
+
+/// A raw iterator over the nodes of a [`RBTree`].
+///
+/// # Invariants
+/// - `self.next` is a valid pointer.
+/// - `self.next` points to a node stored inside of a valid `RBTree`.
+struct IterRaw<K, V> {
+ next: *mut bindings::rb_node,
+ _phantom: PhantomData<fn() -> (K, V)>,
+}
+
+impl<K, V> Iterator for IterRaw<K, V> {
+ type Item = (*mut K, *mut V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.next.is_null() {
+ return None;
+ }
+
+ // SAFETY: By the type invariant of `IterRaw`, `self.next` is a valid node in an `RBTree`,
+ // and by the type invariant of `RBTree`, all nodes point to the links field of `Node<K, V>` objects.
+ let cur = unsafe { container_of!(self.next, Node<K, V>, links) }.cast_mut();
+
+ // SAFETY: `self.next` is a valid tree node by the type invariants.
+ self.next = unsafe { bindings::rb_next(self.next) };
+
+ // SAFETY: By the same reasoning above, it is safe to dereference the node.
+ Some(unsafe { (addr_of_mut!((*cur).key), addr_of_mut!((*cur).value)) })
+ }
+}
+
+/// A memory reservation for a red-black tree node.
+///
+///
+/// It contains the memory needed to hold a node that can be inserted into a red-black tree. One
+/// can be obtained by directly allocating it ([`RBTreeNodeReservation::new`]).
+pub struct RBTreeNodeReservation<K, V> {
+ node: KBox<MaybeUninit<Node<K, V>>>,
+}
+
+impl<K, V> RBTreeNodeReservation<K, V> {
+ /// Allocates memory for a node to be eventually initialised and inserted into the tree via a
+ /// call to [`RBTree::insert`].
+ pub fn new(flags: Flags) -> Result<RBTreeNodeReservation<K, V>> {
+ Ok(RBTreeNodeReservation {
+ node: KBox::new_uninit(flags)?,
+ })
+ }
+}
+
+// SAFETY: This doesn't actually contain K or V, and is just a memory allocation. Those can always
+// be moved across threads.
+unsafe impl<K, V> Send for RBTreeNodeReservation<K, V> {}
+
+// SAFETY: This doesn't actually contain K or V, and is just a memory allocation.
+unsafe impl<K, V> Sync for RBTreeNodeReservation<K, V> {}
+
+impl<K, V> RBTreeNodeReservation<K, V> {
+ /// Initialises a node reservation.
+ ///
+ /// It then becomes an [`RBTreeNode`] that can be inserted into a tree.
+ pub fn into_node(self, key: K, value: V) -> RBTreeNode<K, V> {
+ let node = KBox::write(
+ self.node,
+ Node {
+ key,
+ value,
+ links: bindings::rb_node::default(),
+ },
+ );
+ RBTreeNode { node }
+ }
+}
+
+/// A red-black tree node.
+///
+/// The node is fully initialised (with key and value) and can be inserted into a tree without any
+/// extra allocations or failure paths.
+pub struct RBTreeNode<K, V> {
+ node: KBox<Node<K, V>>,
+}
+
+impl<K, V> RBTreeNode<K, V> {
+ /// Allocates and initialises a node that can be inserted into the tree via
+ /// [`RBTree::insert`].
+ pub fn new(key: K, value: V, flags: Flags) -> Result<RBTreeNode<K, V>> {
+ Ok(RBTreeNodeReservation::new(flags)?.into_node(key, value))
+ }
+
+ /// Get the key and value from inside the node.
+ pub fn to_key_value(self) -> (K, V) {
+ let node = KBox::into_inner(self.node);
+
+ (node.key, node.value)
+ }
+}
+
+// SAFETY: If K and V can be sent across threads, then it's also okay to send [`RBTreeNode`] across
+// threads.
+unsafe impl<K: Send, V: Send> Send for RBTreeNode<K, V> {}
+
+// SAFETY: If K and V can be accessed without synchronization, then it's also okay to access
+// [`RBTreeNode`] without synchronization.
+unsafe impl<K: Sync, V: Sync> Sync for RBTreeNode<K, V> {}
+
+impl<K, V> RBTreeNode<K, V> {
+ /// Drop the key and value, but keep the allocation.
+ ///
+ /// It then becomes a reservation that can be re-initialised into a different node (i.e., with
+ /// a different key and/or value).
+ ///
+ /// The existing key and value are dropped in-place as part of this operation, that is, memory
+ /// may be freed (but only for the key/value; memory for the node itself is kept for reuse).
+ pub fn into_reservation(self) -> RBTreeNodeReservation<K, V> {
+ RBTreeNodeReservation {
+ node: KBox::drop_contents(self.node),
+ }
+ }
+}
+
+/// A view into a single entry in a map, which may either be vacant or occupied.
+///
+/// This enum is constructed from the [`RBTree::entry`].
+///
+/// [`entry`]: fn@RBTree::entry
+pub enum Entry<'a, K, V> {
+ /// This [`RBTree`] does not have a node with this key.
+ Vacant(VacantEntry<'a, K, V>),
+ /// This [`RBTree`] already has a node with this key.
+ Occupied(OccupiedEntry<'a, K, V>),
+}
+
+/// Like [`Entry`], except that it doesn't have ownership of the key.
+enum RawEntry<'a, K, V> {
+ Vacant(RawVacantEntry<'a, K, V>),
+ Occupied(OccupiedEntry<'a, K, V>),
+}
+
+/// A view into a vacant entry in a [`RBTree`]. It is part of the [`Entry`] enum.
+pub struct VacantEntry<'a, K, V> {
+ key: K,
+ raw: RawVacantEntry<'a, K, V>,
+}
+
+/// Like [`VacantEntry`], but doesn't hold on to the key.
+///
+/// # Invariants
+/// - `parent` may be null if the new node becomes the root.
+/// - `child_field_of_parent` is a valid pointer to the left-child or right-child of `parent`. If `parent` is
+/// null, it is a pointer to the root of the [`RBTree`].
+struct RawVacantEntry<'a, K, V> {
+ rbtree: *mut RBTree<K, V>,
+ /// The node that will become the parent of the new node if we insert one.
+ parent: *mut bindings::rb_node,
+ /// This points to the left-child or right-child field of `parent`, or `root` if `parent` is
+ /// null.
+ child_field_of_parent: *mut *mut bindings::rb_node,
+ _phantom: PhantomData<&'a mut RBTree<K, V>>,
+}
+
+impl<'a, K, V> RawVacantEntry<'a, K, V> {
+ /// Inserts the given node into the [`RBTree`] at this entry.
+ ///
+ /// The `node` must have a key such that inserting it here does not break the ordering of this
+ /// [`RBTree`].
+ fn insert(self, node: RBTreeNode<K, V>) -> &'a mut V {
+ let node = KBox::into_raw(node.node);
+
+ // SAFETY: `node` is valid at least until we call `KBox::from_raw`, which only happens when
+ // the node is removed or replaced.
+ let node_links = unsafe { addr_of_mut!((*node).links) };
+
+ // INVARIANT: We are linking in a new node, which is valid. It remains valid because we
+ // "forgot" it with `KBox::into_raw`.
+ // SAFETY: The type invariants of `RawVacantEntry` are exactly the safety requirements of `rb_link_node`.
+ unsafe { bindings::rb_link_node(node_links, self.parent, self.child_field_of_parent) };
+
+ // SAFETY: All pointers are valid. `node` has just been inserted into the tree.
+ unsafe { bindings::rb_insert_color(node_links, addr_of_mut!((*self.rbtree).root)) };
+
+ // SAFETY: The node is valid until we remove it from the tree.
+ unsafe { &mut (*node).value }
+ }
+}
+
+impl<'a, K, V> VacantEntry<'a, K, V> {
+ /// Inserts the given node into the [`RBTree`] at this entry.
+ pub fn insert(self, value: V, reservation: RBTreeNodeReservation<K, V>) -> &'a mut V {
+ self.raw.insert(reservation.into_node(self.key, value))
+ }
+}
+
+/// A view into an occupied entry in a [`RBTree`]. It is part of the [`Entry`] enum.
+///
+/// # Invariants
+/// - `node_links` is a valid, non-null pointer to a tree node in `self.rbtree`
+pub struct OccupiedEntry<'a, K, V> {
+ rbtree: &'a mut RBTree<K, V>,
+ /// The node that this entry corresponds to.
+ node_links: *mut bindings::rb_node,
+}
+
+impl<'a, K, V> OccupiedEntry<'a, K, V> {
+ /// Gets a reference to the value in the entry.
+ pub fn get(&self) -> &V {
+ // SAFETY:
+ // - `self.node_links` is a valid pointer to a node in the tree.
+ // - We have shared access to the underlying tree, and can thus give out a shared reference.
+ unsafe { &(*container_of!(self.node_links, Node<K, V>, links)).value }
+ }
+
+ /// Gets a mutable reference to the value in the entry.
+ pub fn get_mut(&mut self) -> &mut V {
+ // SAFETY:
+ // - `self.node_links` is a valid pointer to a node in the tree.
+ // - We have exclusive access to the underlying tree, and can thus give out a mutable reference.
+ unsafe { &mut (*(container_of!(self.node_links, Node<K, V>, links).cast_mut())).value }
+ }
+
+ /// Converts the entry into a mutable reference to its value.
+ ///
+ /// If you need multiple references to the `OccupiedEntry`, see [`self#get_mut`].
+ pub fn into_mut(self) -> &'a mut V {
+ // SAFETY:
+ // - `self.node_links` is a valid pointer to a node in the tree.
+ // - This consumes the `&'a mut RBTree<K, V>`, therefore it can give out a mutable reference that lives for `'a`.
+ unsafe { &mut (*(container_of!(self.node_links, Node<K, V>, links).cast_mut())).value }
+ }
+
+ /// Remove this entry from the [`RBTree`].
+ pub fn remove_node(self) -> RBTreeNode<K, V> {
+ // SAFETY: The node is a node in the tree, so it is valid.
+ unsafe { bindings::rb_erase(self.node_links, &mut self.rbtree.root) };
+
+ // INVARIANT: The node is being returned and the caller may free it, however, it was
+ // removed from the tree. So the invariants still hold.
+ RBTreeNode {
+ // SAFETY: The node was a node in the tree, but we removed it, so we can convert it
+ // back into a box.
+ node: unsafe {
+ KBox::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut())
+ },
+ }
+ }
+
+ /// Takes the value of the entry out of the map, and returns it.
+ pub fn remove(self) -> V {
+ let rb_node = self.remove_node();
+ let node = KBox::into_inner(rb_node.node);
+
+ node.value
+ }
+
+ /// Swap the current node for the provided node.
+ ///
+ /// The key of both nodes must be equal.
+ fn replace(self, node: RBTreeNode<K, V>) -> RBTreeNode<K, V> {
+ let node = KBox::into_raw(node.node);
+
+ // SAFETY: `node` is valid at least until we call `KBox::from_raw`, which only happens when
+ // the node is removed or replaced.
+ let new_node_links = unsafe { addr_of_mut!((*node).links) };
+
+ // SAFETY: This updates the pointers so that `new_node_links` is in the tree where
+ // `self.node_links` used to be.
+ unsafe {
+ bindings::rb_replace_node(self.node_links, new_node_links, &mut self.rbtree.root)
+ };
+
+ // SAFETY:
+ // - `self.node_ptr` produces a valid pointer to a node in the tree.
+ // - Now that we removed this entry from the tree, we can convert the node to a box.
+ let old_node =
+ unsafe { KBox::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut()) };
+
+ RBTreeNode { node: old_node }
+ }
+}
+
+struct Node<K, V> {
+ links: bindings::rb_node,
+ key: K,
+ value: V,
+}
diff --git a/rust/kernel/revocable.rs b/rust/kernel/revocable.rs
new file mode 100644
index 000000000000..db4aa46bb121
--- /dev/null
+++ b/rust/kernel/revocable.rs
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Revocable objects.
+//!
+//! The [`Revocable`] type wraps other types and allows access to them to be revoked. The existence
+//! of a [`RevocableGuard`] ensures that objects remain valid.
+
+use crate::{bindings, prelude::*, sync::rcu, types::Opaque};
+use core::{
+ marker::PhantomData,
+ ops::Deref,
+ ptr::drop_in_place,
+ sync::atomic::{AtomicBool, Ordering},
+};
+
+/// An object that can become inaccessible at runtime.
+///
+/// Once access is revoked and all concurrent users complete (i.e., all existing instances of
+/// [`RevocableGuard`] are dropped), the wrapped object is also dropped.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::revocable::Revocable;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn add_two(v: &Revocable<Example>) -> Option<u32> {
+/// let guard = v.try_access()?;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// let v = KBox::pin_init(Revocable::new(Example { a: 10, b: 20 }), GFP_KERNEL).unwrap();
+/// assert_eq!(add_two(&v), Some(30));
+/// v.revoke();
+/// assert_eq!(add_two(&v), None);
+/// ```
+///
+/// Sample example as above, but explicitly using the rcu read side lock.
+///
+/// ```
+/// # use kernel::revocable::Revocable;
+/// use kernel::sync::rcu;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn add_two(v: &Revocable<Example>) -> Option<u32> {
+/// let guard = rcu::read_lock();
+/// let e = v.try_access_with_guard(&guard)?;
+/// Some(e.a + e.b)
+/// }
+///
+/// let v = KBox::pin_init(Revocable::new(Example { a: 10, b: 20 }), GFP_KERNEL).unwrap();
+/// assert_eq!(add_two(&v), Some(30));
+/// v.revoke();
+/// assert_eq!(add_two(&v), None);
+/// ```
+#[pin_data(PinnedDrop)]
+pub struct Revocable<T> {
+ is_available: AtomicBool,
+ #[pin]
+ data: Opaque<T>,
+}
+
+// SAFETY: `Revocable` is `Send` if the wrapped object is also `Send`. This is because while the
+// functionality exposed by `Revocable` can be accessed from any thread/CPU, it is possible that
+// this isn't supported by the wrapped object.
+unsafe impl<T: Send> Send for Revocable<T> {}
+
+// SAFETY: `Revocable` is `Sync` if the wrapped object is both `Send` and `Sync`. We require `Send`
+// from the wrapped object as well because of `Revocable::revoke`, which can trigger the `Drop`
+// implementation of the wrapped object from an arbitrary thread.
+unsafe impl<T: Sync + Send> Sync for Revocable<T> {}
+
+impl<T> Revocable<T> {
+ /// Creates a new revocable instance of the given data.
+ pub fn new(data: impl PinInit<T>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ is_available: AtomicBool::new(true),
+ data <- Opaque::pin_init(data),
+ })
+ }
+
+ /// Tries to access the revocable wrapped object.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a guard that gives access to the object otherwise; the object is guaranteed to
+ /// remain accessible while the guard is alive. In such cases, callers are not allowed to sleep
+ /// because another CPU may be waiting to complete the revocation of this object.
+ pub fn try_access(&self) -> Option<RevocableGuard<'_, T>> {
+ let guard = rcu::read_lock();
+ if self.is_available.load(Ordering::Relaxed) {
+ // Since `self.is_available` is true, data is initialised and has to remain valid
+ // because the RCU read side lock prevents it from being dropped.
+ Some(RevocableGuard::new(self.data.get(), guard))
+ } else {
+ None
+ }
+ }
+
+ /// Tries to access the revocable wrapped object.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a shared reference to the object otherwise; the object is guaranteed to
+ /// remain accessible while the rcu read side guard is alive. In such cases, callers are not
+ /// allowed to sleep because another CPU may be waiting to complete the revocation of this
+ /// object.
+ pub fn try_access_with_guard<'a>(&'a self, _guard: &'a rcu::Guard) -> Option<&'a T> {
+ if self.is_available.load(Ordering::Relaxed) {
+ // SAFETY: Since `self.is_available` is true, data is initialised and has to remain
+ // valid because the RCU read side lock prevents it from being dropped.
+ Some(unsafe { &*self.data.get() })
+ } else {
+ None
+ }
+ }
+
+ /// Tries to access the wrapped object and run a closure on it while the guard is held.
+ ///
+ /// This is a convenience method to run short non-sleepable code blocks while ensuring the
+ /// guard is dropped afterwards. [`Self::try_access`] carries the risk that the caller will
+ /// forget to explicitly drop that returned guard before calling sleepable code; this method
+ /// adds an extra safety to make sure it doesn't happen.
+ ///
+ /// Returns [`None`] if the object has been revoked and is therefore no longer accessible, or
+ /// the result of the closure wrapped in [`Some`]. If the closure returns a [`Result`] then the
+ /// return type becomes `Option<Result<>>`, which can be inconvenient. Users are encouraged to
+ /// define their own macro that turns the [`Option`] into a proper error code and flattens the
+ /// inner result into it if it makes sense within their subsystem.
+ pub fn try_access_with<R, F: FnOnce(&T) -> R>(&self, f: F) -> Option<R> {
+ self.try_access().map(|t| f(&*t))
+ }
+
+ /// Directly access the revocable wrapped object.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure this [`Revocable`] instance hasn't been revoked and won't be revoked
+ /// as long as the returned `&T` lives.
+ pub unsafe fn access(&self) -> &T {
+ // SAFETY: By the safety requirement of this function it is guaranteed that
+ // `self.data.get()` is a valid pointer to an instance of `T`.
+ unsafe { &*self.data.get() }
+ }
+
+ /// # Safety
+ ///
+ /// Callers must ensure that there are no more concurrent users of the revocable object.
+ unsafe fn revoke_internal<const SYNC: bool>(&self) {
+ if self.is_available.swap(false, Ordering::Relaxed) {
+ if SYNC {
+ // SAFETY: Just an FFI call, there are no further requirements.
+ unsafe { bindings::synchronize_rcu() };
+ }
+
+ // SAFETY: We know `self.data` is valid because only one CPU can succeed the
+ // `compare_exchange` above that takes `is_available` from `true` to `false`.
+ unsafe { drop_in_place(self.data.get()) };
+ }
+ }
+
+ /// Revokes access to and drops the wrapped object.
+ ///
+ /// Access to the object is revoked immediately to new callers of [`Revocable::try_access`],
+ /// expecting that there are no concurrent users of the object.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that there are no more concurrent users of the revocable object.
+ pub unsafe fn revoke_nosync(&self) {
+ // SAFETY: By the safety requirement of this function, the caller ensures that nobody is
+ // accessing the data anymore and hence we don't have to wait for the grace period to
+ // finish.
+ unsafe { self.revoke_internal::<false>() }
+ }
+
+ /// Revokes access to and drops the wrapped object.
+ ///
+ /// Access to the object is revoked immediately to new callers of [`Revocable::try_access`].
+ ///
+ /// If there are concurrent users of the object (i.e., ones that called
+ /// [`Revocable::try_access`] beforehand and still haven't dropped the returned guard), this
+ /// function waits for the concurrent access to complete before dropping the wrapped object.
+ pub fn revoke(&self) {
+ // SAFETY: By passing `true` we ask `revoke_internal` to wait for the grace period to
+ // finish.
+ unsafe { self.revoke_internal::<true>() }
+ }
+}
+
+#[pinned_drop]
+impl<T> PinnedDrop for Revocable<T> {
+ fn drop(self: Pin<&mut Self>) {
+ // Drop only if the data hasn't been revoked yet (in which case it has already been
+ // dropped).
+ // SAFETY: We are not moving out of `p`, only dropping in place
+ let p = unsafe { self.get_unchecked_mut() };
+ if *p.is_available.get_mut() {
+ // SAFETY: We know `self.data` is valid because no other CPU has changed
+ // `is_available` to `false` yet, and no other CPU can do it anymore because this CPU
+ // holds the only reference (mutable) to `self` now.
+ unsafe { drop_in_place(p.data.get()) };
+ }
+ }
+}
+
+/// A guard that allows access to a revocable object and keeps it alive.
+///
+/// CPUs may not sleep while holding on to [`RevocableGuard`] because it's in atomic context
+/// holding the RCU read-side lock.
+///
+/// # Invariants
+///
+/// The RCU read-side lock is held while the guard is alive.
+pub struct RevocableGuard<'a, T> {
+ data_ref: *const T,
+ _rcu_guard: rcu::Guard,
+ _p: PhantomData<&'a ()>,
+}
+
+impl<T> RevocableGuard<'_, T> {
+ fn new(data_ref: *const T, rcu_guard: rcu::Guard) -> Self {
+ Self {
+ data_ref,
+ _rcu_guard: rcu_guard,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<T> Deref for RevocableGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariants, we hold the rcu read-side lock, so the object is
+ // guaranteed to remain valid.
+ unsafe { &*self.data_ref }
+ }
+}
diff --git a/rust/kernel/security.rs b/rust/kernel/security.rs
new file mode 100644
index 000000000000..0c63e9e7e564
--- /dev/null
+++ b/rust/kernel/security.rs
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Linux Security Modules (LSM).
+//!
+//! C header: [`include/linux/security.h`](srctree/include/linux/security.h).
+
+use crate::{
+ bindings,
+ error::{to_result, Result},
+};
+
+/// A security context string.
+///
+/// # Invariants
+///
+/// The `ctx` field corresponds to a valid security context as returned by a successful call to
+/// `security_secid_to_secctx`, that has not yet been released by `security_release_secctx`.
+pub struct SecurityCtx {
+ ctx: bindings::lsm_context,
+}
+
+impl SecurityCtx {
+ /// Get the security context given its id.
+ #[inline]
+ pub fn from_secid(secid: u32) -> Result<Self> {
+ // SAFETY: `struct lsm_context` can be initialized to all zeros.
+ let mut ctx: bindings::lsm_context = unsafe { core::mem::zeroed() };
+
+ // SAFETY: Just a C FFI call. The pointer is valid for writes.
+ to_result(unsafe { bindings::security_secid_to_secctx(secid, &mut ctx) })?;
+
+ // INVARIANT: If the above call did not fail, then we have a valid security context.
+ Ok(Self { ctx })
+ }
+
+ /// Returns whether the security context is empty.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.ctx.len == 0
+ }
+
+ /// Returns the length of this security context.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.ctx.len as usize
+ }
+
+ /// Returns the bytes for this security context.
+ #[inline]
+ pub fn as_bytes(&self) -> &[u8] {
+ let ptr = self.ctx.context;
+ if ptr.is_null() {
+ debug_assert_eq!(self.len(), 0);
+ // We can't pass a null pointer to `slice::from_raw_parts` even if the length is zero.
+ return &[];
+ }
+
+ // SAFETY: The call to `security_secid_to_secctx` guarantees that the pointer is valid for
+ // `self.len()` bytes. Furthermore, if the length is zero, then we have ensured that the
+ // pointer is not null.
+ unsafe { core::slice::from_raw_parts(ptr.cast(), self.len()) }
+ }
+}
+
+impl Drop for SecurityCtx {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: By the invariant of `Self`, this releases an lsm context that came from a
+ // successful call to `security_secid_to_secctx` and has not yet been released.
+ unsafe { bindings::security_release_secctx(&mut self.ctx) };
+ }
+}
diff --git a/rust/kernel/seq_file.rs b/rust/kernel/seq_file.rs
new file mode 100644
index 000000000000..7a9403eb6e5b
--- /dev/null
+++ b/rust/kernel/seq_file.rs
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Seq file bindings.
+//!
+//! C header: [`include/linux/seq_file.h`](srctree/include/linux/seq_file.h)
+
+use crate::{bindings, c_str, types::NotThreadSafe, types::Opaque};
+
+/// A utility for generating the contents of a seq file.
+#[repr(transparent)]
+pub struct SeqFile {
+ inner: Opaque<bindings::seq_file>,
+ _not_send: NotThreadSafe,
+}
+
+impl SeqFile {
+ /// Creates a new [`SeqFile`] from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that for the duration of `'a` the following is satisfied:
+ /// * The pointer points at a valid `struct seq_file`.
+ /// * The `struct seq_file` is not accessed from any other thread.
+ pub unsafe fn from_raw<'a>(ptr: *mut bindings::seq_file) -> &'a SeqFile {
+ // SAFETY: The caller ensures that the reference is valid for 'a. There's no way to trigger
+ // a data race by using the `&SeqFile` since this is the only thread accessing the seq_file.
+ //
+ // CAST: The layout of `struct seq_file` and `SeqFile` is compatible.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Used by the [`seq_print`] macro.
+ #[inline]
+ pub fn call_printf(&self, args: core::fmt::Arguments<'_>) {
+ // SAFETY: Passing a void pointer to `Arguments` is valid for `%pA`.
+ unsafe {
+ bindings::seq_printf(
+ self.inner.get(),
+ c_str!("%pA").as_char_ptr(),
+ &args as *const _ as *const crate::ffi::c_void,
+ );
+ }
+ }
+}
+
+/// Write to a [`SeqFile`] with the ordinary Rust formatting syntax.
+#[macro_export]
+macro_rules! seq_print {
+ ($m:expr, $($arg:tt)+) => (
+ $m.call_printf(format_args!($($arg)+))
+ );
+}
+pub use seq_print;
diff --git a/rust/kernel/sizes.rs b/rust/kernel/sizes.rs
new file mode 100644
index 000000000000..834c343e4170
--- /dev/null
+++ b/rust/kernel/sizes.rs
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Commonly used sizes.
+//!
+//! C headers: [`include/linux/sizes.h`](srctree/include/linux/sizes.h).
+
+/// 0x00000400
+pub const SZ_1K: usize = bindings::SZ_1K as usize;
+/// 0x00000800
+pub const SZ_2K: usize = bindings::SZ_2K as usize;
+/// 0x00001000
+pub const SZ_4K: usize = bindings::SZ_4K as usize;
+/// 0x00002000
+pub const SZ_8K: usize = bindings::SZ_8K as usize;
+/// 0x00004000
+pub const SZ_16K: usize = bindings::SZ_16K as usize;
+/// 0x00008000
+pub const SZ_32K: usize = bindings::SZ_32K as usize;
+/// 0x00010000
+pub const SZ_64K: usize = bindings::SZ_64K as usize;
+/// 0x00020000
+pub const SZ_128K: usize = bindings::SZ_128K as usize;
+/// 0x00040000
+pub const SZ_256K: usize = bindings::SZ_256K as usize;
+/// 0x00080000
+pub const SZ_512K: usize = bindings::SZ_512K as usize;
diff --git a/rust/kernel/std_vendor.rs b/rust/kernel/std_vendor.rs
index 388d6a5147a2..279bd353687a 100644
--- a/rust/kernel/std_vendor.rs
+++ b/rust/kernel/std_vendor.rs
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
+//! Rust standard library vendored code.
+//!
//! The contents of this file come from the Rust standard library, hosted in
//! the <https://github.com/rust-lang/rust> repository, licensed under
//! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details,
@@ -14,9 +16,9 @@
///
/// ```rust
/// let a = 2;
-/// # #[allow(clippy::dbg_macro)]
+/// # #[expect(clippy::disallowed_macros)]
/// let b = dbg!(a * 2) + 1;
-/// // ^-- prints: [src/main.rs:2] a * 2 = 4
+/// // ^-- prints: [src/main.rs:3:9] a * 2 = 4
/// assert_eq!(b, 5);
/// ```
///
@@ -52,7 +54,7 @@
/// With a method call:
///
/// ```rust
-/// # #[allow(clippy::dbg_macro)]
+/// # #[expect(clippy::disallowed_macros)]
/// fn foo(n: usize) {
/// if dbg!(n.checked_sub(4)).is_some() {
/// // ...
@@ -65,14 +67,13 @@
/// This prints to the kernel log:
///
/// ```text,ignore
-/// [src/main.rs:4] n.checked_sub(4) = None
+/// [src/main.rs:3:8] n.checked_sub(4) = None
/// ```
///
/// Naive factorial implementation:
///
/// ```rust
-/// # #[allow(clippy::dbg_macro)]
-/// # {
+/// # #![expect(clippy::disallowed_macros)]
/// fn factorial(n: u32) -> u32 {
/// if dbg!(n <= 1) {
/// dbg!(1)
@@ -82,21 +83,20 @@
/// }
///
/// dbg!(factorial(4));
-/// # }
/// ```
///
/// This prints to the kernel log:
///
/// ```text,ignore
-/// [src/main.rs:3] n <= 1 = false
-/// [src/main.rs:3] n <= 1 = false
-/// [src/main.rs:3] n <= 1 = false
-/// [src/main.rs:3] n <= 1 = true
-/// [src/main.rs:4] 1 = 1
-/// [src/main.rs:5] n * factorial(n - 1) = 2
-/// [src/main.rs:5] n * factorial(n - 1) = 6
-/// [src/main.rs:5] n * factorial(n - 1) = 24
-/// [src/main.rs:11] factorial(4) = 24
+/// [src/main.rs:3:8] n <= 1 = false
+/// [src/main.rs:3:8] n <= 1 = false
+/// [src/main.rs:3:8] n <= 1 = false
+/// [src/main.rs:3:8] n <= 1 = true
+/// [src/main.rs:4:9] 1 = 1
+/// [src/main.rs:5:9] n * factorial(n - 1) = 2
+/// [src/main.rs:5:9] n * factorial(n - 1) = 6
+/// [src/main.rs:5:9] n * factorial(n - 1) = 24
+/// [src/main.rs:11:1] factorial(4) = 24
/// ```
///
/// The `dbg!(..)` macro moves the input:
@@ -118,7 +118,7 @@
/// a tuple (and return it, too):
///
/// ```
-/// # #[allow(clippy::dbg_macro)]
+/// # #![expect(clippy::disallowed_macros)]
/// assert_eq!(dbg!(1usize, 2u32), (1, 2));
/// ```
///
@@ -127,16 +127,14 @@
/// invocations. You can use a 1-tuple directly if you need one:
///
/// ```
-/// # #[allow(clippy::dbg_macro)]
-/// # {
+/// # #![expect(clippy::disallowed_macros)]
/// assert_eq!(1, dbg!(1u32,)); // trailing comma ignored
/// assert_eq!((1,), dbg!((1u32,))); // 1-tuple
-/// # }
/// ```
///
/// [`std::dbg`]: https://doc.rust-lang.org/std/macro.dbg.html
/// [`eprintln`]: https://doc.rust-lang.org/std/macro.eprintln.html
-/// [`printk`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html
+/// [`printk`]: https://docs.kernel.org/core-api/printk-basics.html
/// [`pr_info`]: crate::pr_info!
/// [`pr_debug`]: crate::pr_debug!
#[macro_export]
@@ -146,15 +144,16 @@ macro_rules! dbg {
// `$val` expression could be a block (`{ .. }`), in which case the `pr_info!`
// will be malformed.
() => {
- $crate::pr_info!("[{}:{}]\n", ::core::file!(), ::core::line!())
+ $crate::pr_info!("[{}:{}:{}]\n", ::core::file!(), ::core::line!(), ::core::column!())
};
($val:expr $(,)?) => {
// Use of `match` here is intentional because it affects the lifetimes
// of temporaries - https://stackoverflow.com/a/48732525/1063961
match $val {
tmp => {
- $crate::pr_info!("[{}:{}] {} = {:#?}\n",
- ::core::file!(), ::core::line!(), ::core::stringify!($val), &tmp);
+ $crate::pr_info!("[{}:{}:{}] {} = {:#?}\n",
+ ::core::file!(), ::core::line!(), ::core::column!(),
+ ::core::stringify!($val), &tmp);
tmp
}
}
diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
index 925ced8fdc61..fb61ce81ea28 100644
--- a/rust/kernel/str.rs
+++ b/rust/kernel/str.rs
@@ -2,15 +2,11 @@
//! String representations.
-use alloc::alloc::AllocError;
-use alloc::vec::Vec;
+use crate::alloc::{flags::*, AllocError, KVec};
use core::fmt::{self, Write};
-use core::ops::{self, Deref, Index};
+use core::ops::{self, Deref, DerefMut, Index};
-use crate::{
- bindings,
- error::{code::*, Error},
-};
+use crate::error::{code::*, Error};
/// Byte string without UTF-8 validity guarantee.
#[repr(transparent)]
@@ -35,6 +31,23 @@ impl BStr {
// SAFETY: `BStr` is transparent to `[u8]`.
unsafe { &*(bytes as *const [u8] as *const BStr) }
}
+
+ /// Strip a prefix from `self`. Delegates to [`slice::strip_prefix`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::b_str;
+ /// assert_eq!(Some(b_str!("bar")), b_str!("foobar").strip_prefix(b_str!("foo")));
+ /// assert_eq!(None, b_str!("foobar").strip_prefix(b_str!("bar")));
+ /// assert_eq!(Some(b_str!("foobar")), b_str!("foobar").strip_prefix(b_str!("")));
+ /// assert_eq!(Some(b_str!("")), b_str!("foobar").strip_prefix(b_str!("foobar")));
+ /// ```
+ pub fn strip_prefix(&self, pattern: impl AsRef<Self>) -> Option<&BStr> {
+ self.deref()
+ .strip_prefix(pattern.as_ref().deref())
+ .map(Self::from_bytes)
+ }
}
impl fmt::Display for BStr {
@@ -43,12 +56,13 @@ impl fmt::Display for BStr {
/// ```
/// # use kernel::{fmt, b_str, str::{BStr, CString}};
/// let ascii = b_str!("Hello, BStr!");
- /// let s = CString::try_from_fmt(fmt!("{}", ascii)).unwrap();
+ /// let s = CString::try_from_fmt(fmt!("{}", ascii))?;
/// assert_eq!(s.as_bytes(), "Hello, BStr!".as_bytes());
///
/// let non_ascii = b_str!("🦀");
- /// let s = CString::try_from_fmt(fmt!("{}", non_ascii)).unwrap();
+ /// let s = CString::try_from_fmt(fmt!("{}", non_ascii))?;
/// assert_eq!(s.as_bytes(), "\\xf0\\x9f\\xa6\\x80".as_bytes());
+ /// # Ok::<(), kernel::error::Error>(())
/// ```
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for &b in &self.0 {
@@ -59,7 +73,7 @@ impl fmt::Display for BStr {
b'\r' => f.write_str("\\r")?,
// Printable characters.
0x20..=0x7e => f.write_char(b as char)?,
- _ => write!(f, "\\x{:02x}", b)?,
+ _ => write!(f, "\\x{b:02x}")?,
}
}
Ok(())
@@ -74,12 +88,13 @@ impl fmt::Debug for BStr {
/// # use kernel::{fmt, b_str, str::{BStr, CString}};
/// // Embedded double quotes are escaped.
/// let ascii = b_str!("Hello, \"BStr\"!");
- /// let s = CString::try_from_fmt(fmt!("{:?}", ascii)).unwrap();
+ /// let s = CString::try_from_fmt(fmt!("{:?}", ascii))?;
/// assert_eq!(s.as_bytes(), "\"Hello, \\\"BStr\\\"!\"".as_bytes());
///
/// let non_ascii = b_str!("😺");
- /// let s = CString::try_from_fmt(fmt!("{:?}", non_ascii)).unwrap();
+ /// let s = CString::try_from_fmt(fmt!("{:?}", non_ascii))?;
/// assert_eq!(s.as_bytes(), "\"\\xf0\\x9f\\x98\\xba\"".as_bytes());
+ /// # Ok::<(), kernel::error::Error>(())
/// ```
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_char('"')?;
@@ -94,7 +109,7 @@ impl fmt::Debug for BStr {
b'\\' => f.write_str("\\\\")?,
// Printable characters.
0x20..=0x7e => f.write_char(b as char)?,
- _ => write!(f, "\\x{:02x}", b)?,
+ _ => write!(f, "\\x{b:02x}")?,
}
}
f.write_char('"')
@@ -110,6 +125,35 @@ impl Deref for BStr {
}
}
+impl PartialEq for BStr {
+ fn eq(&self, other: &Self) -> bool {
+ self.deref().eq(other.deref())
+ }
+}
+
+impl<Idx> Index<Idx> for BStr
+where
+ [u8]: Index<Idx, Output = [u8]>,
+{
+ type Output = Self;
+
+ fn index(&self, index: Idx) -> &Self::Output {
+ BStr::from_bytes(&self.0[index])
+ }
+}
+
+impl AsRef<BStr> for [u8] {
+ fn as_ref(&self) -> &BStr {
+ BStr::from_bytes(self)
+ }
+}
+
+impl AsRef<BStr> for BStr {
+ fn as_ref(&self) -> &BStr {
+ self
+ }
+}
+
/// Creates a new [`BStr`] from a string literal.
///
/// `b_str!` converts the supplied string literal to byte string, so non-ASCII
@@ -165,10 +209,10 @@ impl CStr {
/// Returns the length of this string with `NUL`.
#[inline]
pub const fn len_with_nul(&self) -> usize {
- // SAFETY: This is one of the invariant of `CStr`.
- // We add a `unreachable_unchecked` here to hint the optimizer that
- // the value returned from this function is non-zero.
if self.0.is_empty() {
+ // SAFETY: This is one of the invariant of `CStr`.
+ // We add a `unreachable_unchecked` here to hint the optimizer that
+ // the value returned from this function is non-zero.
unsafe { core::hint::unreachable_unchecked() };
}
self.0.len()
@@ -188,12 +232,12 @@ impl CStr {
/// last at least `'a`. When `CStr` is alive, the memory pointed by `ptr`
/// must not be mutated.
#[inline]
- pub unsafe fn from_char_ptr<'a>(ptr: *const core::ffi::c_char) -> &'a Self {
+ pub unsafe fn from_char_ptr<'a>(ptr: *const crate::ffi::c_char) -> &'a Self {
// SAFETY: The safety precondition guarantees `ptr` is a valid pointer
// to a `NUL`-terminated C string.
let len = unsafe { bindings::strlen(ptr) } + 1;
// SAFETY: Lifetime guaranteed by the safety precondition.
- let bytes = unsafe { core::slice::from_raw_parts(ptr as _, len as _) };
+ let bytes = unsafe { core::slice::from_raw_parts(ptr as _, len) };
// SAFETY: As `len` is returned by `strlen`, `bytes` does not contain interior `NUL`.
// As we have added 1 to `len`, the last byte is known to be `NUL`.
unsafe { Self::from_bytes_with_nul_unchecked(bytes) }
@@ -236,10 +280,23 @@ impl CStr {
unsafe { core::mem::transmute(bytes) }
}
+ /// Creates a mutable [`CStr`] from a `[u8]` without performing any
+ /// additional checks.
+ ///
+ /// # Safety
+ ///
+ /// `bytes` *must* end with a `NUL` byte, and should only have a single
+ /// `NUL` byte (or the string will be truncated).
+ #[inline]
+ pub unsafe fn from_bytes_with_nul_unchecked_mut(bytes: &mut [u8]) -> &mut CStr {
+ // SAFETY: Properties of `bytes` guaranteed by the safety precondition.
+ unsafe { &mut *(bytes as *mut [u8] as *mut CStr) }
+ }
+
/// Returns a C pointer to the string.
#[inline]
- pub const fn as_char_ptr(&self) -> *const core::ffi::c_char {
- self.0.as_ptr() as _
+ pub const fn as_char_ptr(&self) -> *const crate::ffi::c_char {
+ self.0.as_ptr()
}
/// Convert the string to a byte slice without the trailing `NUL` byte.
@@ -264,8 +321,9 @@ impl CStr {
///
/// ```
/// # use kernel::str::CStr;
- /// let cstr = CStr::from_bytes_with_nul(b"foo\0").unwrap();
+ /// let cstr = CStr::from_bytes_with_nul(b"foo\0")?;
/// assert_eq!(cstr.to_str(), Ok("foo"));
+ /// # Ok::<(), kernel::error::Error>(())
/// ```
#[inline]
pub fn to_str(&self) -> Result<&str, core::str::Utf8Error> {
@@ -291,6 +349,7 @@ impl CStr {
/// ```
#[inline]
pub unsafe fn as_str_unchecked(&self) -> &str {
+ // SAFETY: TODO.
unsafe { core::str::from_utf8_unchecked(self.as_bytes()) }
}
@@ -299,6 +358,70 @@ impl CStr {
pub fn to_cstring(&self) -> Result<CString, AllocError> {
CString::try_from(self)
}
+
+ /// Converts this [`CStr`] to its ASCII lower case equivalent in-place.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new lowercased value without modifying the existing one, use
+ /// [`to_ascii_lowercase()`].
+ ///
+ /// [`to_ascii_lowercase()`]: #method.to_ascii_lowercase
+ pub fn make_ascii_lowercase(&mut self) {
+ // INVARIANT: This doesn't introduce or remove NUL bytes in the C
+ // string.
+ self.0.make_ascii_lowercase();
+ }
+
+ /// Converts this [`CStr`] to its ASCII upper case equivalent in-place.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new uppercased value without modifying the existing one, use
+ /// [`to_ascii_uppercase()`].
+ ///
+ /// [`to_ascii_uppercase()`]: #method.to_ascii_uppercase
+ pub fn make_ascii_uppercase(&mut self) {
+ // INVARIANT: This doesn't introduce or remove NUL bytes in the C
+ // string.
+ self.0.make_ascii_uppercase();
+ }
+
+ /// Returns a copy of this [`CString`] where each character is mapped to its
+ /// ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`make_ascii_lowercase`].
+ ///
+ /// [`make_ascii_lowercase`]: str::make_ascii_lowercase
+ pub fn to_ascii_lowercase(&self) -> Result<CString, AllocError> {
+ let mut s = self.to_cstring()?;
+
+ s.make_ascii_lowercase();
+
+ Ok(s)
+ }
+
+ /// Returns a copy of this [`CString`] where each character is mapped to its
+ /// ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`make_ascii_uppercase`].
+ ///
+ /// [`make_ascii_uppercase`]: str::make_ascii_uppercase
+ pub fn to_ascii_uppercase(&self) -> Result<CString, AllocError> {
+ let mut s = self.to_cstring()?;
+
+ s.make_ascii_uppercase();
+
+ Ok(s)
+ }
}
impl fmt::Display for CStr {
@@ -310,12 +433,13 @@ impl fmt::Display for CStr {
/// # use kernel::str::CStr;
/// # use kernel::str::CString;
/// let penguin = c_str!("🐧");
- /// let s = CString::try_from_fmt(fmt!("{}", penguin)).unwrap();
+ /// let s = CString::try_from_fmt(fmt!("{}", penguin))?;
/// assert_eq!(s.as_bytes_with_nul(), "\\xf0\\x9f\\x90\\xa7\0".as_bytes());
///
/// let ascii = c_str!("so \"cool\"");
- /// let s = CString::try_from_fmt(fmt!("{}", ascii)).unwrap();
+ /// let s = CString::try_from_fmt(fmt!("{}", ascii))?;
/// assert_eq!(s.as_bytes_with_nul(), "so \"cool\"\0".as_bytes());
+ /// # Ok::<(), kernel::error::Error>(())
/// ```
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for &c in self.as_bytes() {
@@ -323,7 +447,7 @@ impl fmt::Display for CStr {
// Printable character.
f.write_char(c as char)?;
} else {
- write!(f, "\\x{:02x}", c)?;
+ write!(f, "\\x{c:02x}")?;
}
}
Ok(())
@@ -339,13 +463,14 @@ impl fmt::Debug for CStr {
/// # use kernel::str::CStr;
/// # use kernel::str::CString;
/// let penguin = c_str!("🐧");
- /// let s = CString::try_from_fmt(fmt!("{:?}", penguin)).unwrap();
+ /// let s = CString::try_from_fmt(fmt!("{:?}", penguin))?;
/// assert_eq!(s.as_bytes_with_nul(), "\"\\xf0\\x9f\\x90\\xa7\"\0".as_bytes());
///
/// // Embedded double quotes are escaped.
/// let ascii = c_str!("so \"cool\"");
- /// let s = CString::try_from_fmt(fmt!("{:?}", ascii)).unwrap();
+ /// let s = CString::try_from_fmt(fmt!("{:?}", ascii))?;
/// assert_eq!(s.as_bytes_with_nul(), "\"so \\\"cool\\\"\"\0".as_bytes());
+ /// # Ok::<(), kernel::error::Error>(())
/// ```
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("\"")?;
@@ -354,7 +479,7 @@ impl fmt::Debug for CStr {
// Printable characters.
b'\"' => f.write_str("\\\"")?,
0x20..=0x7e => f.write_char(c as char)?,
- _ => write!(f, "\\x{:02x}", c)?,
+ _ => write!(f, "\\x{c:02x}")?,
}
}
f.write_str("\"")
@@ -448,11 +573,33 @@ macro_rules! c_str {
}
#[cfg(test)]
+#[expect(clippy::items_after_test_module)]
mod tests {
use super::*;
- use alloc::format;
- const ALL_ASCII_CHARS: &'static str =
+ struct String(CString);
+
+ impl String {
+ fn from_fmt(args: fmt::Arguments<'_>) -> Self {
+ String(CString::try_from_fmt(args).unwrap())
+ }
+ }
+
+ impl Deref for String {
+ type Target = str;
+
+ fn deref(&self) -> &str {
+ self.0.to_str().unwrap()
+ }
+ }
+
+ macro_rules! format {
+ ($($f:tt)*) => ({
+ &*String::from_fmt(kernel::fmt!($($f)*))
+ })
+ }
+
+ const ALL_ASCII_CHARS: &str =
"\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a\\x0b\\x0c\\x0d\\x0e\\x0f\
\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f \
!\"#$%&'()*+,-./0123456789:;<=>?@\
@@ -486,6 +633,7 @@ mod tests {
fn test_cstr_as_str_unchecked() {
let good_bytes = b"\xf0\x9f\x90\xA7\0";
let checked_cstr = CStr::from_bytes_with_nul(good_bytes).unwrap();
+ // SAFETY: The contents come from a string literal which contains valid UTF-8.
let unchecked_str = unsafe { checked_cstr.as_str_unchecked() };
assert_eq!(unchecked_str, "🐧");
}
@@ -493,13 +641,13 @@ mod tests {
#[test]
fn test_cstr_display() {
let hello_world = CStr::from_bytes_with_nul(b"hello, world!\0").unwrap();
- assert_eq!(format!("{}", hello_world), "hello, world!");
+ assert_eq!(format!("{hello_world}"), "hello, world!");
let non_printables = CStr::from_bytes_with_nul(b"\x01\x09\x0a\0").unwrap();
- assert_eq!(format!("{}", non_printables), "\\x01\\x09\\x0a");
+ assert_eq!(format!("{non_printables}"), "\\x01\\x09\\x0a");
let non_ascii = CStr::from_bytes_with_nul(b"d\xe9j\xe0 vu\0").unwrap();
- assert_eq!(format!("{}", non_ascii), "d\\xe9j\\xe0 vu");
+ assert_eq!(format!("{non_ascii}"), "d\\xe9j\\xe0 vu");
let good_bytes = CStr::from_bytes_with_nul(b"\xf0\x9f\xa6\x80\0").unwrap();
- assert_eq!(format!("{}", good_bytes), "\\xf0\\x9f\\xa6\\x80");
+ assert_eq!(format!("{good_bytes}"), "\\xf0\\x9f\\xa6\\x80");
}
#[test]
@@ -510,47 +658,47 @@ mod tests {
bytes[i as usize] = i.wrapping_add(1);
}
let cstr = CStr::from_bytes_with_nul(&bytes).unwrap();
- assert_eq!(format!("{}", cstr), ALL_ASCII_CHARS);
+ assert_eq!(format!("{cstr}"), ALL_ASCII_CHARS);
}
#[test]
fn test_cstr_debug() {
let hello_world = CStr::from_bytes_with_nul(b"hello, world!\0").unwrap();
- assert_eq!(format!("{:?}", hello_world), "\"hello, world!\"");
+ assert_eq!(format!("{hello_world:?}"), "\"hello, world!\"");
let non_printables = CStr::from_bytes_with_nul(b"\x01\x09\x0a\0").unwrap();
- assert_eq!(format!("{:?}", non_printables), "\"\\x01\\x09\\x0a\"");
+ assert_eq!(format!("{non_printables:?}"), "\"\\x01\\x09\\x0a\"");
let non_ascii = CStr::from_bytes_with_nul(b"d\xe9j\xe0 vu\0").unwrap();
- assert_eq!(format!("{:?}", non_ascii), "\"d\\xe9j\\xe0 vu\"");
+ assert_eq!(format!("{non_ascii:?}"), "\"d\\xe9j\\xe0 vu\"");
let good_bytes = CStr::from_bytes_with_nul(b"\xf0\x9f\xa6\x80\0").unwrap();
- assert_eq!(format!("{:?}", good_bytes), "\"\\xf0\\x9f\\xa6\\x80\"");
+ assert_eq!(format!("{good_bytes:?}"), "\"\\xf0\\x9f\\xa6\\x80\"");
}
#[test]
fn test_bstr_display() {
let hello_world = BStr::from_bytes(b"hello, world!");
- assert_eq!(format!("{}", hello_world), "hello, world!");
+ assert_eq!(format!("{hello_world}"), "hello, world!");
let escapes = BStr::from_bytes(b"_\t_\n_\r_\\_\'_\"_");
- assert_eq!(format!("{}", escapes), "_\\t_\\n_\\r_\\_'_\"_");
+ assert_eq!(format!("{escapes}"), "_\\t_\\n_\\r_\\_'_\"_");
let others = BStr::from_bytes(b"\x01");
- assert_eq!(format!("{}", others), "\\x01");
+ assert_eq!(format!("{others}"), "\\x01");
let non_ascii = BStr::from_bytes(b"d\xe9j\xe0 vu");
- assert_eq!(format!("{}", non_ascii), "d\\xe9j\\xe0 vu");
+ assert_eq!(format!("{non_ascii}"), "d\\xe9j\\xe0 vu");
let good_bytes = BStr::from_bytes(b"\xf0\x9f\xa6\x80");
- assert_eq!(format!("{}", good_bytes), "\\xf0\\x9f\\xa6\\x80");
+ assert_eq!(format!("{good_bytes}"), "\\xf0\\x9f\\xa6\\x80");
}
#[test]
fn test_bstr_debug() {
let hello_world = BStr::from_bytes(b"hello, world!");
- assert_eq!(format!("{:?}", hello_world), "\"hello, world!\"");
+ assert_eq!(format!("{hello_world:?}"), "\"hello, world!\"");
let escapes = BStr::from_bytes(b"_\t_\n_\r_\\_\'_\"_");
- assert_eq!(format!("{:?}", escapes), "\"_\\t_\\n_\\r_\\\\_'_\\\"_\"");
+ assert_eq!(format!("{escapes:?}"), "\"_\\t_\\n_\\r_\\\\_'_\\\"_\"");
let others = BStr::from_bytes(b"\x01");
- assert_eq!(format!("{:?}", others), "\"\\x01\"");
+ assert_eq!(format!("{others:?}"), "\"\\x01\"");
let non_ascii = BStr::from_bytes(b"d\xe9j\xe0 vu");
- assert_eq!(format!("{:?}", non_ascii), "\"d\\xe9j\\xe0 vu\"");
+ assert_eq!(format!("{non_ascii:?}"), "\"d\\xe9j\\xe0 vu\"");
let good_bytes = BStr::from_bytes(b"\xf0\x9f\xa6\x80");
- assert_eq!(format!("{:?}", good_bytes), "\"\\xf0\\x9f\\xa6\\x80\"");
+ assert_eq!(format!("{good_bytes:?}"), "\"\\xf0\\x9f\\xa6\\x80\"");
}
}
@@ -704,19 +852,20 @@ impl fmt::Write for Formatter {
/// ```
/// use kernel::{str::CString, fmt};
///
-/// let s = CString::try_from_fmt(fmt!("{}{}{}", "abc", 10, 20)).unwrap();
+/// let s = CString::try_from_fmt(fmt!("{}{}{}", "abc", 10, 20))?;
/// assert_eq!(s.as_bytes_with_nul(), "abc1020\0".as_bytes());
///
/// let tmp = "testing";
-/// let s = CString::try_from_fmt(fmt!("{tmp}{}", 123)).unwrap();
+/// let s = CString::try_from_fmt(fmt!("{tmp}{}", 123))?;
/// assert_eq!(s.as_bytes_with_nul(), "testing123\0".as_bytes());
///
/// // This fails because it has an embedded `NUL` byte.
/// let s = CString::try_from_fmt(fmt!("a\0b{}", 123));
/// assert_eq!(s.is_ok(), false);
+/// # Ok::<(), kernel::error::Error>(())
/// ```
pub struct CString {
- buf: Vec<u8>,
+ buf: KVec<u8>,
}
impl CString {
@@ -729,7 +878,7 @@ impl CString {
let size = f.bytes_written();
// Allocate a vector with the required number of bytes, and write to it.
- let mut buf = Vec::try_with_capacity(size)?;
+ let mut buf = KVec::with_capacity(size, GFP_KERNEL)?;
// SAFETY: The buffer stored in `buf` is at least of size `size` and is valid for writes.
let mut f = unsafe { Formatter::from_buffer(buf.as_mut_ptr(), size) };
f.write_fmt(args)?;
@@ -743,7 +892,7 @@ impl CString {
// SAFETY: The buffer is valid for read because `f.bytes_written()` is bounded by `size`
// (which the minimum buffer size) and is non-zero (we wrote at least the `NUL` terminator)
// so `f.bytes_written() - 1` doesn't underflow.
- let ptr = unsafe { bindings::memchr(buf.as_ptr().cast(), 0, (f.bytes_written() - 1) as _) };
+ let ptr = unsafe { bindings::memchr(buf.as_ptr().cast(), 0, f.bytes_written() - 1) };
if !ptr.is_null() {
return Err(EINVAL);
}
@@ -764,14 +913,21 @@ impl Deref for CString {
}
}
+impl DerefMut for CString {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: A `CString` is always NUL-terminated and contains no other
+ // NUL bytes.
+ unsafe { CStr::from_bytes_with_nul_unchecked_mut(self.buf.as_mut_slice()) }
+ }
+}
+
impl<'a> TryFrom<&'a CStr> for CString {
type Error = AllocError;
fn try_from(cstr: &'a CStr) -> Result<CString, AllocError> {
- let mut buf = Vec::new();
+ let mut buf = KVec::new();
- buf.try_extend_from_slice(cstr.as_bytes_with_nul())
- .map_err(|_| AllocError)?;
+ buf.extend_from_slice(cstr.as_bytes_with_nul(), GFP_KERNEL)?;
// INVARIANT: The `CStr` and `CString` types have the same invariants for
// the string data, and we copied it over without changes.
diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
index c983f63fd56e..36a719015583 100644
--- a/rust/kernel/sync.rs
+++ b/rust/kernel/sync.rs
@@ -5,35 +5,85 @@
//! This module contains the kernel APIs related to synchronisation that have been ported or
//! wrapped for usage by Rust code in the kernel.
+use crate::prelude::*;
use crate::types::Opaque;
+use pin_init;
mod arc;
mod condvar;
pub mod lock;
mod locked_by;
+pub mod poll;
+pub mod rcu;
pub use arc::{Arc, ArcBorrow, UniqueArc};
pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult};
-pub use lock::mutex::{new_mutex, Mutex};
-pub use lock::spinlock::{new_spinlock, SpinLock};
+pub use lock::global::{global_lock, GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
+pub use lock::mutex::{new_mutex, Mutex, MutexGuard};
+pub use lock::spinlock::{new_spinlock, SpinLock, SpinLockGuard};
pub use locked_by::LockedBy;
/// Represents a lockdep class. It's a wrapper around C's `lock_class_key`.
#[repr(transparent)]
-pub struct LockClassKey(Opaque<bindings::lock_class_key>);
+#[pin_data(PinnedDrop)]
+pub struct LockClassKey {
+ #[pin]
+ inner: Opaque<bindings::lock_class_key>,
+}
// SAFETY: `bindings::lock_class_key` is designed to be used concurrently from multiple threads and
// provides its own synchronization.
unsafe impl Sync for LockClassKey {}
impl LockClassKey {
- /// Creates a new lock class key.
- pub const fn new() -> Self {
- Self(Opaque::uninit())
+ /// Initializes a dynamically allocated lock class key. In the common case of using a
+ /// statically allocated lock class key, the static_lock_class! macro should be used instead.
+ ///
+ /// # Example
+ /// ```
+ /// # use kernel::c_str;
+ /// # use kernel::alloc::KBox;
+ /// # use kernel::types::ForeignOwnable;
+ /// # use kernel::sync::{LockClassKey, SpinLock};
+ /// # use pin_init::stack_pin_init;
+ ///
+ /// let key = KBox::pin_init(LockClassKey::new_dynamic(), GFP_KERNEL)?;
+ /// let key_ptr = key.into_foreign();
+ ///
+ /// {
+ /// stack_pin_init!(let num: SpinLock<u32> = SpinLock::new(
+ /// 0,
+ /// c_str!("my_spinlock"),
+ /// // SAFETY: `key_ptr` is returned by the above `into_foreign()`, whose
+ /// // `from_foreign()` has not yet been called.
+ /// unsafe { <Pin<KBox<LockClassKey>> as ForeignOwnable>::borrow(key_ptr) }
+ /// ));
+ /// }
+ ///
+ /// // SAFETY: We dropped `num`, the only use of the key, so the result of the previous
+ /// // `borrow` has also been dropped. Thus, it's safe to use from_foreign.
+ /// unsafe { drop(<Pin<KBox<LockClassKey>> as ForeignOwnable>::from_foreign(key_ptr)) };
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn new_dynamic() -> impl PinInit<Self> {
+ pin_init!(Self {
+ // SAFETY: lockdep_register_key expects an uninitialized block of memory
+ inner <- Opaque::ffi_init(|slot| unsafe { bindings::lockdep_register_key(slot) })
+ })
}
pub(crate) fn as_ptr(&self) -> *mut bindings::lock_class_key {
- self.0.get()
+ self.inner.get()
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for LockClassKey {
+ fn drop(self: Pin<&mut Self>) {
+ // SAFETY: self.as_ptr was registered with lockdep and self is pinned, so the address
+ // hasn't changed. Thus, it's safe to pass to unregister.
+ unsafe { bindings::lockdep_unregister_key(self.as_ptr()) }
}
}
@@ -42,8 +92,11 @@ impl LockClassKey {
#[macro_export]
macro_rules! static_lock_class {
() => {{
- static CLASS: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
- &CLASS
+ static CLASS: $crate::sync::LockClassKey =
+ // SAFETY: lockdep expects uninitialized memory when it's handed a statically allocated
+ // lock_class_key
+ unsafe { ::core::mem::MaybeUninit::uninit().assume_init() };
+ $crate::prelude::Pin::static_ref(&CLASS)
}};
}
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index 7d4c4bf58388..8484c814609a 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -12,27 +12,27 @@
//! 2. It does not support weak references, which allows it to be half the size.
//! 3. It saturates the reference count instead of aborting when it goes over a threshold.
//! 4. It does not provide a `get_mut` method, so the ref counted object is pinned.
+//! 5. The object in [`Arc`] is pinned implicitly.
//!
//! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
use crate::{
+ alloc::{AllocError, Flags, KBox},
bindings,
- error::{self, Error},
- init::{self, InPlaceInit, Init, PinInit},
+ init::InPlaceInit,
try_init,
types::{ForeignOwnable, Opaque},
};
-use alloc::boxed::Box;
use core::{
- alloc::{AllocError, Layout},
+ alloc::Layout,
fmt,
- marker::{PhantomData, Unsize},
+ marker::PhantomData,
mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
pin::Pin,
ptr::NonNull,
};
-use macros::pin_data;
+use pin_init::{self, pin_data, InPlaceWrite, Init, PinInit};
mod std_vendor;
@@ -57,7 +57,7 @@ mod std_vendor;
/// }
///
/// // Create a refcounted instance of `Example`.
-/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
+/// let obj = Arc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?;
///
/// // Get a new pointer to `obj` and increment the refcount.
/// let cloned = obj.clone();
@@ -96,7 +96,7 @@ mod std_vendor;
/// }
/// }
///
-/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
+/// let obj = Arc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?;
/// obj.use_reference();
/// obj.take_over();
/// # Ok::<(), Error>(())
@@ -119,14 +119,24 @@ mod std_vendor;
/// impl MyTrait for Example {}
///
/// // `obj` has type `Arc<Example>`.
-/// let obj: Arc<Example> = Arc::try_new(Example)?;
+/// let obj: Arc<Example> = Arc::new(Example, GFP_KERNEL)?;
///
/// // `coerced` has type `Arc<dyn MyTrait>`.
/// let coerced: Arc<dyn MyTrait> = obj;
/// # Ok::<(), Error>(())
/// ```
+#[repr(transparent)]
+#[cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, derive(core::marker::CoercePointee))]
pub struct Arc<T: ?Sized> {
ptr: NonNull<ArcInner<T>>,
+ // NB: this informs dropck that objects of type `ArcInner<T>` may be used in `<Arc<T> as
+ // Drop>::drop`. Note that dropck already assumes that objects of type `T` may be used in
+ // `<Arc<T> as Drop>::drop` and the distinction between `T` and `ArcInner<T>` is not presently
+ // meaningful with respect to dropck - but this may change in the future so this is left here
+ // out of an abundance of caution.
+ //
+ // See https://doc.rust-lang.org/nomicon/phantom-data.html#generic-parameters-and-drop-checking
+ // for more detail on the semantics of dropck in the presence of `PhantomData`.
_p: PhantomData<ArcInner<T>>,
}
@@ -137,15 +147,47 @@ struct ArcInner<T: ?Sized> {
data: T,
}
-// This is to allow [`Arc`] (and variants) to be used as the type of `self`.
-impl<T: ?Sized> core::ops::Receiver for Arc<T> {}
+impl<T: ?Sized> ArcInner<T> {
+ /// Converts a pointer to the contents of an [`Arc`] into a pointer to the [`ArcInner`].
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must have been returned by a previous call to [`Arc::into_raw`], and the `Arc` must
+ /// not yet have been destroyed.
+ unsafe fn container_of(ptr: *const T) -> NonNull<ArcInner<T>> {
+ let refcount_layout = Layout::new::<bindings::refcount_t>();
+ // SAFETY: The caller guarantees that the pointer is valid.
+ let val_layout = Layout::for_value(unsafe { &*ptr });
+ // SAFETY: We're computing the layout of a real struct that existed when compiling this
+ // binary, so its layout is not so large that it can trigger arithmetic overflow.
+ let val_offset = unsafe { refcount_layout.extend(val_layout).unwrap_unchecked().1 };
+
+ // Pointer casts leave the metadata unchanged. This is okay because the metadata of `T` and
+ // `ArcInner<T>` is the same since `ArcInner` is a struct with `T` as its last field.
+ //
+ // This is documented at:
+ // <https://doc.rust-lang.org/std/ptr/trait.Pointee.html>.
+ let ptr = ptr as *const ArcInner<T>;
+
+ // SAFETY: The pointer is in-bounds of an allocation both before and after offsetting the
+ // pointer, since it originates from a previous call to `Arc::into_raw` on an `Arc` that is
+ // still valid.
+ let ptr = unsafe { ptr.byte_sub(val_offset) };
+
+ // SAFETY: The pointer can't be null since you can't have an `ArcInner<T>` value at the null
+ // address.
+ unsafe { NonNull::new_unchecked(ptr.cast_mut()) }
+ }
+}
// This is to allow coercion from `Arc<T>` to `Arc<U>` if `T` can be converted to the
// dynamically-sized type (DST) `U`.
-impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::CoerceUnsized<Arc<U>> for Arc<T> {}
+#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))]
+impl<T: ?Sized + core::marker::Unsize<U>, U: ?Sized> core::ops::CoerceUnsized<Arc<U>> for Arc<T> {}
// This is to allow `Arc<U>` to be dispatched on when `Arc<T>` can be coerced into `Arc<U>`.
-impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<Arc<U>> for Arc<T> {}
+#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))]
+impl<T: ?Sized + core::marker::Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<Arc<U>> for Arc<T> {}
// SAFETY: It is safe to send `Arc<T>` to another thread when the underlying `T` is `Sync` because
// it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs
@@ -160,9 +202,29 @@ unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
// the reference count reaches zero and `T` is dropped.
unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
+impl<T> InPlaceInit<T> for Arc<T> {
+ type PinnedSelf = Self;
+
+ #[inline]
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
+ where
+ E: From<AllocError>,
+ {
+ UniqueArc::try_pin_init(init, flags).map(|u| u.into())
+ }
+
+ #[inline]
+ fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ UniqueArc::try_init(init, flags).map(|u| u.into())
+ }
+}
+
impl<T> Arc<T> {
/// Constructs a new reference counted instance of `T`.
- pub fn try_new(contents: T) -> Result<Self, AllocError> {
+ pub fn new(contents: T, flags: Flags) -> Result<Self, AllocError> {
// INVARIANT: The refcount is initialised to a non-zero value.
let value = ArcInner {
// SAFETY: There are no safety requirements for this FFI call.
@@ -170,33 +232,12 @@ impl<T> Arc<T> {
data: contents,
};
- let inner = Box::try_new(value)?;
+ let inner = KBox::new(value, flags)?;
+ let inner = KBox::leak(inner).into();
// SAFETY: We just created `inner` with a reference count of 1, which is owned by the new
// `Arc` object.
- Ok(unsafe { Self::from_inner(Box::leak(inner).into()) })
- }
-
- /// Use the given initializer to in-place initialize a `T`.
- ///
- /// If `T: !Unpin` it will not be able to move afterwards.
- #[inline]
- pub fn pin_init<E>(init: impl PinInit<T, E>) -> error::Result<Self>
- where
- Error: From<E>,
- {
- UniqueArc::pin_init(init).map(|u| u.into())
- }
-
- /// Use the given initializer to in-place initialize a `T`.
- ///
- /// This is equivalent to [`Arc<T>::pin_init`], since an [`Arc`] is always pinned.
- #[inline]
- pub fn init<E>(init: impl Init<T, E>) -> error::Result<Self>
- where
- Error: From<E>,
- {
- UniqueArc::init(init).map(|u| u.into())
+ Ok(unsafe { Self::from_inner(inner) })
}
}
@@ -225,6 +266,15 @@ impl<T: ?Sized> Arc<T> {
unsafe { core::ptr::addr_of!((*ptr).data) }
}
+ /// Return a raw pointer to the data in this arc.
+ pub fn as_ptr(this: &Self) -> *const T {
+ let ptr = this.ptr.as_ptr();
+
+ // SAFETY: As `ptr` points to a valid allocation of type `ArcInner`,
+ // field projection to `data`is within bounds of the allocation.
+ unsafe { core::ptr::addr_of!((*ptr).data) }
+ }
+
/// Recreates an [`Arc`] instance previously deconstructed via [`Arc::into_raw`].
///
/// # Safety
@@ -232,27 +282,13 @@ impl<T: ?Sized> Arc<T> {
/// `ptr` must have been returned by a previous call to [`Arc::into_raw`]. Additionally, it
/// must not be called more than once for each previous call to [`Arc::into_raw`].
pub unsafe fn from_raw(ptr: *const T) -> Self {
- let refcount_layout = Layout::new::<bindings::refcount_t>();
- // SAFETY: The caller guarantees that the pointer is valid.
- let val_layout = Layout::for_value(unsafe { &*ptr });
- // SAFETY: We're computing the layout of a real struct that existed when compiling this
- // binary, so its layout is not so large that it can trigger arithmetic overflow.
- let val_offset = unsafe { refcount_layout.extend(val_layout).unwrap_unchecked().1 };
-
- // Pointer casts leave the metadata unchanged. This is okay because the metadata of `T` and
- // `ArcInner<T>` is the same since `ArcInner` is a struct with `T` as its last field.
- //
- // This is documented at:
- // <https://doc.rust-lang.org/std/ptr/trait.Pointee.html>.
- let ptr = ptr as *const ArcInner<T>;
-
- // SAFETY: The pointer is in-bounds of an allocation both before and after offsetting the
- // pointer, since it originates from a previous call to `Arc::into_raw` and is still valid.
- let ptr = unsafe { ptr.byte_sub(val_offset) };
+ // SAFETY: The caller promises that this pointer originates from a call to `into_raw` on an
+ // `Arc` that is still valid.
+ let ptr = unsafe { ArcInner::container_of(ptr) };
// SAFETY: By the safety requirements we know that `ptr` came from `Arc::into_raw`, so the
// reference count held then will be owned by the new `Arc` object.
- unsafe { Self::from_inner(NonNull::new_unchecked(ptr.cast_mut())) }
+ unsafe { Self::from_inner(ptr) }
}
/// Returns an [`ArcBorrow`] from the given [`Arc`].
@@ -271,30 +307,103 @@ impl<T: ?Sized> Arc<T> {
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
core::ptr::eq(this.ptr.as_ptr(), other.ptr.as_ptr())
}
+
+ /// Converts this [`Arc`] into a [`UniqueArc`], or destroys it if it is not unique.
+ ///
+ /// When this destroys the `Arc`, it does so while properly avoiding races. This means that
+ /// this method will never call the destructor of the value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::sync::{Arc, UniqueArc};
+ ///
+ /// let arc = Arc::new(42, GFP_KERNEL)?;
+ /// let unique_arc = arc.into_unique_or_drop();
+ ///
+ /// // The above conversion should succeed since refcount of `arc` is 1.
+ /// assert!(unique_arc.is_some());
+ ///
+ /// assert_eq!(*(unique_arc.unwrap()), 42);
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ ///
+ /// ```
+ /// use kernel::sync::{Arc, UniqueArc};
+ ///
+ /// let arc = Arc::new(42, GFP_KERNEL)?;
+ /// let another = arc.clone();
+ ///
+ /// let unique_arc = arc.into_unique_or_drop();
+ ///
+ /// // The above conversion should fail since refcount of `arc` is >1.
+ /// assert!(unique_arc.is_none());
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn into_unique_or_drop(self) -> Option<Pin<UniqueArc<T>>> {
+ // We will manually manage the refcount in this method, so we disable the destructor.
+ let me = ManuallyDrop::new(self);
+ // SAFETY: We own a refcount, so the pointer is still valid.
+ let refcount = unsafe { me.ptr.as_ref() }.refcount.get();
+
+ // If the refcount reaches a non-zero value, then we have destroyed this `Arc` and will
+ // return without further touching the `Arc`. If the refcount reaches zero, then there are
+ // no other arcs, and we can create a `UniqueArc`.
+ //
+ // SAFETY: We own a refcount, so the pointer is not dangling.
+ let is_zero = unsafe { bindings::refcount_dec_and_test(refcount) };
+ if is_zero {
+ // SAFETY: We have exclusive access to the arc, so we can perform unsynchronized
+ // accesses to the refcount.
+ unsafe { core::ptr::write(refcount, bindings::REFCOUNT_INIT(1)) };
+
+ // INVARIANT: We own the only refcount to this arc, so we may create a `UniqueArc`. We
+ // must pin the `UniqueArc` because the values was previously in an `Arc`, and they pin
+ // their values.
+ Some(Pin::from(UniqueArc {
+ inner: ManuallyDrop::into_inner(me),
+ }))
+ } else {
+ None
+ }
+ }
}
impl<T: 'static> ForeignOwnable for Arc<T> {
type Borrowed<'a> = ArcBorrow<'a, T>;
+ type BorrowedMut<'a> = Self::Borrowed<'a>;
- fn into_foreign(self) -> *const core::ffi::c_void {
- ManuallyDrop::new(self).ptr.as_ptr() as _
+ fn into_foreign(self) -> *mut crate::ffi::c_void {
+ ManuallyDrop::new(self).ptr.as_ptr().cast()
}
- unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> ArcBorrow<'a, T> {
+ unsafe fn from_foreign(ptr: *mut crate::ffi::c_void) -> Self {
+ // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
+ // call to `Self::into_foreign`.
+ let inner = unsafe { NonNull::new_unchecked(ptr.cast::<ArcInner<T>>()) };
+
// SAFETY: By the safety requirement of this function, we know that `ptr` came from
- // a previous call to `Arc::into_foreign`.
- let inner = NonNull::new(ptr as *mut ArcInner<T>).unwrap();
+ // a previous call to `Arc::into_foreign`, which guarantees that `ptr` is valid and
+ // holds a reference count increment that is transferrable to us.
+ unsafe { Self::from_inner(inner) }
+ }
+
+ unsafe fn borrow<'a>(ptr: *mut crate::ffi::c_void) -> ArcBorrow<'a, T> {
+ // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
+ // call to `Self::into_foreign`.
+ let inner = unsafe { NonNull::new_unchecked(ptr.cast::<ArcInner<T>>()) };
// SAFETY: The safety requirements of `from_foreign` ensure that the object remains alive
// for the lifetime of the returned value.
unsafe { ArcBorrow::new(inner) }
}
- unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
- // SAFETY: By the safety requirement of this function, we know that `ptr` came from
- // a previous call to `Arc::into_foreign`, which guarantees that `ptr` is valid and
- // holds a reference count increment that is transferrable to us.
- unsafe { Self::from_inner(NonNull::new(ptr as _).unwrap()) }
+ unsafe fn borrow_mut<'a>(ptr: *mut crate::ffi::c_void) -> ArcBorrow<'a, T> {
+ // SAFETY: The safety requirements for `borrow_mut` are a superset of the safety
+ // requirements for `borrow`.
+ unsafe { Self::borrow(ptr) }
}
}
@@ -316,10 +425,14 @@ impl<T: ?Sized> AsRef<T> for Arc<T> {
impl<T: ?Sized> Clone for Arc<T> {
fn clone(&self) -> Self {
+ // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
+ // safe to dereference it.
+ let refcount = unsafe { self.ptr.as_ref() }.refcount.get();
+
// INVARIANT: C `refcount_inc` saturates the refcount, so it cannot overflow to zero.
// SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
// safe to increment the refcount.
- unsafe { bindings::refcount_inc(self.ptr.as_ref().refcount.get()) };
+ unsafe { bindings::refcount_inc(refcount) };
// SAFETY: We just incremented the refcount. This increment is now owned by the new `Arc`.
unsafe { Self::from_inner(self.ptr) }
@@ -341,8 +454,8 @@ impl<T: ?Sized> Drop for Arc<T> {
if is_zero {
// The count reached zero, we must free the memory.
//
- // SAFETY: The pointer was initialised from the result of `Box::leak`.
- unsafe { drop(Box::from_raw(self.ptr.as_ptr())) };
+ // SAFETY: The pointer was initialised from the result of `KBox::leak`.
+ unsafe { drop(KBox::from_raw(self.ptr.as_ptr())) };
}
}
}
@@ -387,7 +500,7 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
/// e.into()
/// }
///
-/// let obj = Arc::try_new(Example)?;
+/// let obj = Arc::new(Example, GFP_KERNEL)?;
/// let cloned = do_something(obj.as_arc_borrow());
///
/// // Assert that both `obj` and `cloned` point to the same underlying object.
@@ -411,21 +524,21 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
/// }
/// }
///
-/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
+/// let obj = Arc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?;
/// obj.as_arc_borrow().use_reference();
/// # Ok::<(), Error>(())
/// ```
+#[repr(transparent)]
+#[cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, derive(core::marker::CoercePointee))]
pub struct ArcBorrow<'a, T: ?Sized + 'a> {
inner: NonNull<ArcInner<T>>,
_p: PhantomData<&'a ()>,
}
-// This is to allow [`ArcBorrow`] (and variants) to be used as the type of `self`.
-impl<T: ?Sized> core::ops::Receiver for ArcBorrow<'_, T> {}
-
// This is to allow `ArcBorrow<U>` to be dispatched on when `ArcBorrow<T>` can be coerced into
// `ArcBorrow<U>`.
-impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<ArcBorrow<'_, U>>
+#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))]
+impl<T: ?Sized + core::marker::Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<ArcBorrow<'_, U>>
for ArcBorrow<'_, T>
{
}
@@ -453,6 +566,27 @@ impl<T: ?Sized> ArcBorrow<'_, T> {
_p: PhantomData,
}
}
+
+ /// Creates an [`ArcBorrow`] to an [`Arc`] that has previously been deconstructed with
+ /// [`Arc::into_raw`] or [`Arc::as_ptr`].
+ ///
+ /// # Safety
+ ///
+ /// * The provided pointer must originate from a call to [`Arc::into_raw`] or [`Arc::as_ptr`].
+ /// * For the duration of the lifetime annotated on this `ArcBorrow`, the reference count must
+ /// not hit zero.
+ /// * For the duration of the lifetime annotated on this `ArcBorrow`, there must not be a
+ /// [`UniqueArc`] reference to this value.
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // SAFETY: The caller promises that this pointer originates from a call to `into_raw` on an
+ // `Arc` that is still valid.
+ let ptr = unsafe { ArcInner::container_of(ptr) };
+
+ // SAFETY: The caller promises that the value remains valid since the reference count must
+ // not hit zero, and no mutable reference will be created since that would involve a
+ // `UniqueArc`.
+ unsafe { Self::new(ptr) }
+ }
}
impl<T: ?Sized> From<ArcBorrow<'_, T>> for Arc<T> {
@@ -499,7 +633,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> {
/// }
///
/// fn test() -> Result<Arc<Example>> {
-/// let mut x = UniqueArc::try_new(Example { a: 10, b: 20 })?;
+/// let mut x = UniqueArc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?;
/// x.a += 1;
/// x.b += 1;
/// Ok(x.into())
@@ -522,7 +656,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> {
/// }
///
/// fn test() -> Result<Arc<Example>> {
-/// let x = UniqueArc::try_new_uninit()?;
+/// let x = UniqueArc::new_uninit(GFP_KERNEL)?;
/// Ok(x.write(Example { a: 10, b: 20 }).into())
/// }
///
@@ -542,7 +676,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> {
/// }
///
/// fn test() -> Result<Arc<Example>> {
-/// let mut pinned = Pin::from(UniqueArc::try_new(Example { a: 10, b: 20 })?);
+/// let mut pinned = Pin::from(UniqueArc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?);
/// // We can modify `pinned` because it is `Unpin`.
/// pinned.as_mut().a += 1;
/// Ok(pinned.into())
@@ -554,27 +688,72 @@ pub struct UniqueArc<T: ?Sized> {
inner: Arc<T>,
}
+impl<T> InPlaceInit<T> for UniqueArc<T> {
+ type PinnedSelf = Pin<Self>;
+
+ #[inline]
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
+ where
+ E: From<AllocError>,
+ {
+ UniqueArc::new_uninit(flags)?.write_pin_init(init)
+ }
+
+ #[inline]
+ fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ UniqueArc::new_uninit(flags)?.write_init(init)
+ }
+}
+
+impl<T> InPlaceWrite<T> for UniqueArc<MaybeUninit<T>> {
+ type Initialized = UniqueArc<T>;
+
+ fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid.
+ unsafe { init.__init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() })
+ }
+
+ fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid and will not be moved, because we pin it later.
+ unsafe { init.__pinned_init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() }.into())
+ }
+}
+
impl<T> UniqueArc<T> {
/// Tries to allocate a new [`UniqueArc`] instance.
- pub fn try_new(value: T) -> Result<Self, AllocError> {
+ pub fn new(value: T, flags: Flags) -> Result<Self, AllocError> {
Ok(Self {
// INVARIANT: The newly-created object has a refcount of 1.
- inner: Arc::try_new(value)?,
+ inner: Arc::new(value, flags)?,
})
}
/// Tries to allocate a new [`UniqueArc`] instance whose contents are not initialised yet.
- pub fn try_new_uninit() -> Result<UniqueArc<MaybeUninit<T>>, AllocError> {
+ pub fn new_uninit(flags: Flags) -> Result<UniqueArc<MaybeUninit<T>>, AllocError> {
// INVARIANT: The refcount is initialised to a non-zero value.
- let inner = Box::try_init::<AllocError>(try_init!(ArcInner {
- // SAFETY: There are no safety requirements for this FFI call.
- refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
- data <- init::uninit::<T, AllocError>(),
- }? AllocError))?;
+ let inner = KBox::try_init::<AllocError>(
+ try_init!(ArcInner {
+ // SAFETY: There are no safety requirements for this FFI call.
+ refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
+ data <- pin_init::uninit::<T, AllocError>(),
+ }? AllocError),
+ flags,
+ )?;
Ok(UniqueArc {
// INVARIANT: The newly-created object has a refcount of 1.
- // SAFETY: The pointer from the `Box` is valid.
- inner: unsafe { Arc::from_inner(Box::leak(inner).into()) },
+ // SAFETY: The pointer from the `KBox` is valid.
+ inner: unsafe { Arc::from_inner(KBox::leak(inner).into()) },
})
}
}
diff --git a/rust/kernel/sync/arc/std_vendor.rs b/rust/kernel/sync/arc/std_vendor.rs
index a66a0c2831b3..11b3f4ecca5f 100644
--- a/rust/kernel/sync/arc/std_vendor.rs
+++ b/rust/kernel/sync/arc/std_vendor.rs
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
+//! Rust standard library vendored code.
+//!
//! The contents of this file come from the Rust standard library, hosted in
//! the <https://github.com/rust-lang/rust> repository, licensed under
//! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details,
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
index 0c3671caffeb..caebf03f553b 100644
--- a/rust/kernel/sync/condvar.rs
+++ b/rust/kernel/sync/condvar.rs
@@ -7,18 +7,16 @@
use super::{lock::Backend, lock::Guard, LockClassKey};
use crate::{
- bindings,
- init::PinInit,
- pin_init,
+ ffi::{c_int, c_long},
str::CStr,
- task::{MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE, TASK_NORMAL, TASK_UNINTERRUPTIBLE},
+ task::{
+ MAX_SCHEDULE_TIMEOUT, TASK_FREEZABLE, TASK_INTERRUPTIBLE, TASK_NORMAL, TASK_UNINTERRUPTIBLE,
+ },
time::Jiffies,
types::Opaque,
};
-use core::ffi::{c_int, c_long};
-use core::marker::PhantomPinned;
-use core::ptr;
-use macros::pin_data;
+use core::{marker::PhantomPinned, pin::Pin, ptr};
+use pin_init::{pin_data, pin_init, PinInit};
/// Creates a [`CondVar`] initialiser with the given name and a newly-created lock class.
#[macro_export]
@@ -38,7 +36,7 @@ pub use new_condvar;
/// spuriously.
///
/// Instances of [`CondVar`] need a lock class and to be pinned. The recommended way to create such
-/// instances is with the [`pin_init`](crate::pin_init) and [`new_condvar`] macros.
+/// instances is with the [`pin_init`](crate::pin_init!) and [`new_condvar`] macros.
///
/// # Examples
///
@@ -71,11 +69,11 @@ pub use new_condvar;
/// }
///
/// /// Allocates a new boxed `Example`.
-/// fn new_example() -> Result<Pin<Box<Example>>> {
-/// Box::pin_init(pin_init!(Example {
+/// fn new_example() -> Result<Pin<KBox<Example>>> {
+/// KBox::pin_init(pin_init!(Example {
/// value <- new_mutex!(0),
/// value_changed <- new_condvar!(),
-/// }))
+/// }), GFP_KERNEL)
/// }
/// ```
///
@@ -94,7 +92,6 @@ pub struct CondVar {
}
// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread.
-#[allow(clippy::non_send_fields_in_send_ty)]
unsafe impl Send for CondVar {}
// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads
@@ -103,7 +100,7 @@ unsafe impl Sync for CondVar {}
impl CondVar {
/// Constructs a new condvar initialiser.
- pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
+ pub fn new(name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> {
pin_init!(Self {
_pin: PhantomPinned,
// SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
@@ -161,6 +158,25 @@ impl CondVar {
crate::current!().signal_pending()
}
+ /// Releases the lock and waits for a notification in interruptible and freezable mode.
+ ///
+ /// The process is allowed to be frozen during this sleep. No lock should be held when calling
+ /// this function, and there is a lockdep assertion for this. Freezing a task that holds a lock
+ /// can trivially deadlock vs another task that needs that lock to complete before it too can
+ /// hit freezable.
+ #[must_use = "wait_interruptible_freezable returns if a signal is pending, so the caller must check the return value"]
+ pub fn wait_interruptible_freezable<T: ?Sized, B: Backend>(
+ &self,
+ guard: &mut Guard<'_, T, B>,
+ ) -> bool {
+ self.wait_internal(
+ TASK_INTERRUPTIBLE | TASK_FREEZABLE,
+ guard,
+ MAX_SCHEDULE_TIMEOUT,
+ );
+ crate::current!().signal_pending()
+ }
+
/// Releases the lock and waits for a notification in interruptible mode.
///
/// Atomically releases the given lock (whose ownership is proven by the guard) and puts the
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index 5b5c8efe427a..e82fa5be289c 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -6,13 +6,19 @@
//! spinlocks, raw spinlocks) to be provided with minimal effort.
use super::LockClassKey;
-use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard};
-use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned};
-use macros::pin_data;
+use crate::{
+ str::CStr,
+ types::{NotThreadSafe, Opaque, ScopeGuard},
+};
+use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
+use pin_init::{pin_data, pin_init, PinInit};
pub mod mutex;
pub mod spinlock;
+pub(super) mod global;
+pub use global::{GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
+
/// The "backend" of a lock.
///
/// It is the actual implementation of the lock, without the need to repeat patterns used in all
@@ -46,7 +52,7 @@ pub unsafe trait Backend {
/// remain valid for read indefinitely.
unsafe fn init(
ptr: *mut Self::State,
- name: *const core::ffi::c_char,
+ name: *const crate::ffi::c_char,
key: *mut bindings::lock_class_key,
);
@@ -58,6 +64,13 @@ pub unsafe trait Backend {
#[must_use]
unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
+ /// Tries to acquire the lock.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that [`Backend::init`] has been previously called.
+ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState>;
+
/// Releases the lock, giving up its ownership.
///
/// # Safety
@@ -75,12 +88,20 @@ pub unsafe trait Backend {
// SAFETY: The safety requirements ensure that the lock is initialised.
*guard_state = unsafe { Self::lock(ptr) };
}
+
+ /// Asserts that the lock is held using lockdep.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that [`Backend::init`] has been previously called.
+ unsafe fn assert_is_held(ptr: *mut Self::State);
}
/// A mutual exclusion primitive.
///
/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
/// [`Backend`] specified as the generic parameter `B`.
+#[repr(C)]
#[pin_data]
pub struct Lock<T: ?Sized, B: Backend> {
/// The kernel lock object.
@@ -106,7 +127,7 @@ unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
impl<T, B: Backend> Lock<T, B> {
/// Constructs a new lock initialiser.
- pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
+ pub fn new(t: T, name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> {
pin_init!(Self {
data: UnsafeCell::new(t),
_pin: PhantomPinned,
@@ -119,6 +140,28 @@ impl<T, B: Backend> Lock<T, B> {
}
}
+impl<B: Backend> Lock<(), B> {
+ /// Constructs a [`Lock`] from a raw pointer.
+ ///
+ /// This can be useful for interacting with a lock which was initialised outside of Rust.
+ ///
+ /// # Safety
+ ///
+ /// The caller promises that `ptr` points to a valid initialised instance of [`State`] during
+ /// the whole lifetime of `'a`.
+ ///
+ /// [`State`]: Backend::State
+ pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
+ // SAFETY:
+ // - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
+ // - Since the lock data type is `()` which is a ZST, `state` is the only non-ZST member of
+ // the struct
+ // - Combined with `#[repr(C)]`, this guarantees `Self` has an equivalent data layout to
+ // `B::State`.
+ unsafe { &*ptr.cast() }
+ }
+}
+
impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Acquires the lock and gives the caller access to the data protected by it.
pub fn lock(&self) -> Guard<'_, T, B> {
@@ -128,6 +171,15 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
// SAFETY: The lock was just acquired.
unsafe { Guard::new(self, state) }
}
+
+ /// Tries to acquire the lock.
+ ///
+ /// Returns a guard that can be used to access the data protected by the lock if successful.
+ pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
+ // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
+ // that `init` was called.
+ unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) }
+ }
}
/// A lock guard.
@@ -139,20 +191,50 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
pub struct Guard<'a, T: ?Sized, B: Backend> {
pub(crate) lock: &'a Lock<T, B>,
pub(crate) state: B::GuardState,
- _not_send: PhantomData<*mut ()>,
+ _not_send: NotThreadSafe,
}
// SAFETY: `Guard` is sync when the data protected by the lock is also sync.
unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
-impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
+impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
+ /// Returns the lock that this guard originates from.
+ ///
+ /// # Examples
+ ///
+ /// The following example shows how to use [`Guard::lock_ref()`] to assert the corresponding
+ /// lock is held.
+ ///
+ /// ```
+ /// # use kernel::{new_spinlock, sync::lock::{Backend, Guard, Lock}};
+ /// # use pin_init::stack_pin_init;
+ ///
+ /// fn assert_held<T, B: Backend>(guard: &Guard<'_, T, B>, lock: &Lock<T, B>) {
+ /// // Address-equal means the same lock.
+ /// assert!(core::ptr::eq(guard.lock_ref(), lock));
+ /// }
+ ///
+ /// // Creates a new lock on the stack.
+ /// stack_pin_init!{
+ /// let l = new_spinlock!(42)
+ /// }
+ ///
+ /// let g = l.lock();
+ ///
+ /// // `g` originates from `l`.
+ /// assert_held(&g, &l);
+ /// ```
+ pub fn lock_ref(&self) -> &'a Lock<T, B> {
+ self.lock
+ }
+
pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
// SAFETY: The caller owns the lock, so it is safe to unlock it.
unsafe { B::unlock(self.lock.state.get(), &self.state) };
- // SAFETY: The lock was just unlocked above and is being relocked now.
- let _relock =
- ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) });
+ let _relock = ScopeGuard::new(||
+ // SAFETY: The lock was just unlocked above and is being relocked now.
+ unsafe { B::relock(self.lock.state.get(), &mut self.state) });
cb()
}
@@ -187,11 +269,14 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
/// # Safety
///
/// The caller must ensure that it owns the lock.
- pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
+ pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
+ // SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
+ unsafe { B::assert_is_held(lock.state.get()) };
+
Self {
lock,
state,
- _not_send: PhantomData,
+ _not_send: NotThreadSafe,
}
}
}
diff --git a/rust/kernel/sync/lock/global.rs b/rust/kernel/sync/lock/global.rs
new file mode 100644
index 000000000000..d65f94b5caf2
--- /dev/null
+++ b/rust/kernel/sync/lock/global.rs
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Support for defining statics containing locks.
+
+use crate::{
+ str::CStr,
+ sync::lock::{Backend, Guard, Lock},
+ sync::{LockClassKey, LockedBy},
+ types::Opaque,
+};
+use core::{
+ cell::UnsafeCell,
+ marker::{PhantomData, PhantomPinned},
+ pin::Pin,
+};
+
+/// Trait implemented for marker types for global locks.
+///
+/// See [`global_lock!`] for examples.
+pub trait GlobalLockBackend {
+ /// The name for this global lock.
+ const NAME: &'static CStr;
+ /// Item type stored in this global lock.
+ type Item: 'static;
+ /// The backend used for this global lock.
+ type Backend: Backend + 'static;
+ /// The class for this global lock.
+ fn get_lock_class() -> Pin<&'static LockClassKey>;
+}
+
+/// Type used for global locks.
+///
+/// See [`global_lock!`] for examples.
+pub struct GlobalLock<B: GlobalLockBackend> {
+ inner: Lock<B::Item, B::Backend>,
+}
+
+impl<B: GlobalLockBackend> GlobalLock<B> {
+ /// Creates a global lock.
+ ///
+ /// # Safety
+ ///
+ /// * Before any other method on this lock is called, [`Self::init`] must be called.
+ /// * The type `B` must not be used with any other lock.
+ pub const unsafe fn new(data: B::Item) -> Self {
+ Self {
+ inner: Lock {
+ state: Opaque::uninit(),
+ data: UnsafeCell::new(data),
+ _pin: PhantomPinned,
+ },
+ }
+ }
+
+ /// Initializes a global lock.
+ ///
+ /// # Safety
+ ///
+ /// Must not be called more than once on a given lock.
+ pub unsafe fn init(&'static self) {
+ // SAFETY: The pointer to `state` is valid for the duration of this call, and both `name`
+ // and `key` are valid indefinitely. The `state` is pinned since we have a `'static`
+ // reference to `self`.
+ //
+ // We have exclusive access to the `state` since the caller of `new` promised to call
+ // `init` before using any other methods. As `init` can only be called once, all other
+ // uses of this lock must happen after this call.
+ unsafe {
+ B::Backend::init(
+ self.inner.state.get(),
+ B::NAME.as_char_ptr(),
+ B::get_lock_class().as_ptr(),
+ )
+ }
+ }
+
+ /// Lock this global lock.
+ pub fn lock(&'static self) -> GlobalGuard<B> {
+ GlobalGuard {
+ inner: self.inner.lock(),
+ }
+ }
+
+ /// Try to lock this global lock.
+ pub fn try_lock(&'static self) -> Option<GlobalGuard<B>> {
+ Some(GlobalGuard {
+ inner: self.inner.try_lock()?,
+ })
+ }
+}
+
+/// A guard for a [`GlobalLock`].
+///
+/// See [`global_lock!`] for examples.
+pub struct GlobalGuard<B: GlobalLockBackend> {
+ inner: Guard<'static, B::Item, B::Backend>,
+}
+
+impl<B: GlobalLockBackend> core::ops::Deref for GlobalGuard<B> {
+ type Target = B::Item;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl<B: GlobalLockBackend> core::ops::DerefMut for GlobalGuard<B> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.inner
+ }
+}
+
+/// A version of [`LockedBy`] for a [`GlobalLock`].
+///
+/// See [`global_lock!`] for examples.
+pub struct GlobalLockedBy<T: ?Sized, B: GlobalLockBackend> {
+ _backend: PhantomData<B>,
+ value: UnsafeCell<T>,
+}
+
+// SAFETY: The same thread-safety rules as `LockedBy` apply to `GlobalLockedBy`.
+unsafe impl<T, B> Send for GlobalLockedBy<T, B>
+where
+ T: ?Sized,
+ B: GlobalLockBackend,
+ LockedBy<T, B::Item>: Send,
+{
+}
+
+// SAFETY: The same thread-safety rules as `LockedBy` apply to `GlobalLockedBy`.
+unsafe impl<T, B> Sync for GlobalLockedBy<T, B>
+where
+ T: ?Sized,
+ B: GlobalLockBackend,
+ LockedBy<T, B::Item>: Sync,
+{
+}
+
+impl<T, B: GlobalLockBackend> GlobalLockedBy<T, B> {
+ /// Create a new [`GlobalLockedBy`].
+ ///
+ /// The provided value will be protected by the global lock indicated by `B`.
+ pub fn new(val: T) -> Self {
+ Self {
+ value: UnsafeCell::new(val),
+ _backend: PhantomData,
+ }
+ }
+}
+
+impl<T: ?Sized, B: GlobalLockBackend> GlobalLockedBy<T, B> {
+ /// Access the value immutably.
+ ///
+ /// The caller must prove shared access to the lock.
+ pub fn as_ref<'a>(&'a self, _guard: &'a GlobalGuard<B>) -> &'a T {
+ // SAFETY: The lock is globally unique, so there can only be one guard.
+ unsafe { &*self.value.get() }
+ }
+
+ /// Access the value mutably.
+ ///
+ /// The caller must prove shared exclusive to the lock.
+ pub fn as_mut<'a>(&'a self, _guard: &'a mut GlobalGuard<B>) -> &'a mut T {
+ // SAFETY: The lock is globally unique, so there can only be one guard.
+ unsafe { &mut *self.value.get() }
+ }
+
+ /// Access the value mutably directly.
+ ///
+ /// The caller has exclusive access to this `GlobalLockedBy`, so they do not need to hold the
+ /// lock.
+ pub fn get_mut(&mut self) -> &mut T {
+ self.value.get_mut()
+ }
+}
+
+/// Defines a global lock.
+///
+/// The global mutex must be initialized before first use. Usually this is done by calling
+/// [`GlobalLock::init`] in the module initializer.
+///
+/// # Examples
+///
+/// A global counter:
+///
+/// ```
+/// # mod ex {
+/// # use kernel::prelude::*;
+/// kernel::sync::global_lock! {
+/// // SAFETY: Initialized in module initializer before first use.
+/// unsafe(uninit) static MY_COUNTER: Mutex<u32> = 0;
+/// }
+///
+/// fn increment_counter() -> u32 {
+/// let mut guard = MY_COUNTER.lock();
+/// *guard += 1;
+/// *guard
+/// }
+///
+/// impl kernel::Module for MyModule {
+/// fn init(_module: &'static ThisModule) -> Result<Self> {
+/// // SAFETY: Called exactly once.
+/// unsafe { MY_COUNTER.init() };
+///
+/// Ok(MyModule {})
+/// }
+/// }
+/// # struct MyModule {}
+/// # }
+/// ```
+///
+/// A global mutex used to protect all instances of a given struct:
+///
+/// ```
+/// # mod ex {
+/// # use kernel::prelude::*;
+/// use kernel::sync::{GlobalGuard, GlobalLockedBy};
+///
+/// kernel::sync::global_lock! {
+/// // SAFETY: Initialized in module initializer before first use.
+/// unsafe(uninit) static MY_MUTEX: Mutex<()> = ();
+/// }
+///
+/// /// All instances of this struct are protected by `MY_MUTEX`.
+/// struct MyStruct {
+/// my_counter: GlobalLockedBy<u32, MY_MUTEX>,
+/// }
+///
+/// impl MyStruct {
+/// /// Increment the counter in this instance.
+/// ///
+/// /// The caller must hold the `MY_MUTEX` mutex.
+/// fn increment(&self, guard: &mut GlobalGuard<MY_MUTEX>) -> u32 {
+/// let my_counter = self.my_counter.as_mut(guard);
+/// *my_counter += 1;
+/// *my_counter
+/// }
+/// }
+///
+/// impl kernel::Module for MyModule {
+/// fn init(_module: &'static ThisModule) -> Result<Self> {
+/// // SAFETY: Called exactly once.
+/// unsafe { MY_MUTEX.init() };
+///
+/// Ok(MyModule {})
+/// }
+/// }
+/// # struct MyModule {}
+/// # }
+/// ```
+#[macro_export]
+macro_rules! global_lock {
+ {
+ $(#[$meta:meta])* $pub:vis
+ unsafe(uninit) static $name:ident: $kind:ident<$valuety:ty> = $value:expr;
+ } => {
+ #[doc = ::core::concat!(
+ "Backend type used by [`",
+ ::core::stringify!($name),
+ "`](static@",
+ ::core::stringify!($name),
+ ")."
+ )]
+ #[allow(non_camel_case_types, unreachable_pub)]
+ $pub enum $name {}
+
+ impl $crate::sync::lock::GlobalLockBackend for $name {
+ const NAME: &'static $crate::str::CStr = $crate::c_str!(::core::stringify!($name));
+ type Item = $valuety;
+ type Backend = $crate::global_lock_inner!(backend $kind);
+
+ fn get_lock_class() -> Pin<&'static $crate::sync::LockClassKey> {
+ $crate::static_lock_class!()
+ }
+ }
+
+ $(#[$meta])*
+ $pub static $name: $crate::sync::lock::GlobalLock<$name> = {
+ // Defined here to be outside the unsafe scope.
+ let init: $valuety = $value;
+
+ // SAFETY:
+ // * The user of this macro promises to initialize the macro before use.
+ // * We are only generating one static with this backend type.
+ unsafe { $crate::sync::lock::GlobalLock::new(init) }
+ };
+ };
+}
+pub use global_lock;
+
+#[doc(hidden)]
+#[macro_export]
+macro_rules! global_lock_inner {
+ (backend Mutex) => {
+ $crate::sync::lock::mutex::MutexBackend
+ };
+ (backend SpinLock) => {
+ $crate::sync::lock::spinlock::SpinLockBackend
+ };
+}
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index ef4c4634d294..581cee7ab842 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -4,8 +4,6 @@
//!
//! This module allows Rust code to use the kernel's `struct mutex`.
-use crate::bindings;
-
/// Creates a [`Mutex`] initialiser with the given name and a newly-created lock class.
///
/// It uses the name if one is given, otherwise it generates one based on the file name and line
@@ -28,7 +26,7 @@ pub use new_mutex;
/// Since it may block, [`Mutex`] needs to be used with care in atomic contexts.
///
/// Instances of [`Mutex`] need a lock class and to be pinned. The recommended way to create such
-/// instances is with the [`pin_init`](crate::pin_init) and [`new_mutex`] macros.
+/// instances is with the [`pin_init`](pin_init::pin_init) and [`new_mutex`] macros.
///
/// # Examples
///
@@ -60,7 +58,7 @@ pub use new_mutex;
/// }
///
/// // Allocate a boxed `Example`.
-/// let e = Box::pin_init(Example::new())?;
+/// let e = KBox::pin_init(Example::new(), GFP_KERNEL)?;
/// assert_eq!(e.c, 10);
/// assert_eq!(e.d.lock().a, 20);
/// assert_eq!(e.d.lock().b, 30);
@@ -88,6 +86,14 @@ pub use new_mutex;
/// [`struct mutex`]: srctree/include/linux/mutex.h
pub type Mutex<T> = super::Lock<T, MutexBackend>;
+/// A [`Guard`] acquired from locking a [`Mutex`].
+///
+/// This is simply a type alias for a [`Guard`] returned from locking a [`Mutex`]. It will unlock
+/// the [`Mutex`] upon being dropped.
+///
+/// [`Guard`]: super::Guard
+pub type MutexGuard<'a, T> = super::Guard<'a, T, MutexBackend>;
+
/// A kernel `struct mutex` lock backend.
pub struct MutexBackend;
@@ -98,7 +104,7 @@ unsafe impl super::Backend for MutexBackend {
unsafe fn init(
ptr: *mut Self::State,
- name: *const core::ffi::c_char,
+ name: *const crate::ffi::c_char,
key: *mut bindings::lock_class_key,
) {
// SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and
@@ -117,4 +123,20 @@ unsafe impl super::Backend for MutexBackend {
// caller is the owner of the mutex.
unsafe { bindings::mutex_unlock(ptr) };
}
+
+ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
+ // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
+ let result = unsafe { bindings::mutex_trylock(ptr) };
+
+ if result != 0 {
+ Some(())
+ } else {
+ None
+ }
+ }
+
+ unsafe fn assert_is_held(ptr: *mut Self::State) {
+ // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
+ unsafe { bindings::mutex_assert_is_held(ptr) }
+ }
}
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index 0b22c635634f..d7be38ccbdc7 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -4,8 +4,6 @@
//!
//! This module allows Rust code to use the kernel's `spinlock_t`.
-use crate::bindings;
-
/// Creates a [`SpinLock`] initialiser with the given name and a newly-created lock class.
///
/// It uses the name if one is given, otherwise it generates one based on the file name and line
@@ -26,7 +24,7 @@ pub use new_spinlock;
/// unlocked, at which point another CPU will be allowed to make progress.
///
/// Instances of [`SpinLock`] need a lock class and to be pinned. The recommended way to create such
-/// instances is with the [`pin_init`](crate::pin_init) and [`new_spinlock`] macros.
+/// instances is with the [`pin_init`](pin_init::pin_init) and [`new_spinlock`] macros.
///
/// # Examples
///
@@ -58,7 +56,7 @@ pub use new_spinlock;
/// }
///
/// // Allocate a boxed `Example`.
-/// let e = Box::pin_init(Example::new())?;
+/// let e = KBox::pin_init(Example::new(), GFP_KERNEL)?;
/// assert_eq!(e.c, 10);
/// assert_eq!(e.d.lock().a, 20);
/// assert_eq!(e.d.lock().b, 30);
@@ -89,6 +87,14 @@ pub type SpinLock<T> = super::Lock<T, SpinLockBackend>;
/// A kernel `spinlock_t` lock backend.
pub struct SpinLockBackend;
+/// A [`Guard`] acquired from locking a [`SpinLock`].
+///
+/// This is simply a type alias for a [`Guard`] returned from locking a [`SpinLock`]. It will unlock
+/// the [`SpinLock`] upon being dropped.
+///
+/// [`Guard`]: super::Guard
+pub type SpinLockGuard<'a, T> = super::Guard<'a, T, SpinLockBackend>;
+
// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. `relock` uses the
// default implementation that always calls the same locking method.
unsafe impl super::Backend for SpinLockBackend {
@@ -97,7 +103,7 @@ unsafe impl super::Backend for SpinLockBackend {
unsafe fn init(
ptr: *mut Self::State,
- name: *const core::ffi::c_char,
+ name: *const crate::ffi::c_char,
key: *mut bindings::lock_class_key,
) {
// SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and
@@ -116,4 +122,20 @@ unsafe impl super::Backend for SpinLockBackend {
// caller is the owner of the spinlock.
unsafe { bindings::spin_unlock(ptr) }
}
+
+ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
+ // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
+ let result = unsafe { bindings::spin_trylock(ptr) };
+
+ if result != 0 {
+ Some(())
+ } else {
+ None
+ }
+ }
+
+ unsafe fn assert_is_held(ptr: *mut Self::State) {
+ // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
+ unsafe { bindings::spin_assert_is_held(ptr) }
+ }
}
diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs
index babc731bd5f6..61f100a45b35 100644
--- a/rust/kernel/sync/locked_by.rs
+++ b/rust/kernel/sync/locked_by.rs
@@ -43,7 +43,7 @@ use core::{cell::UnsafeCell, mem::size_of, ptr};
/// struct InnerDirectory {
/// /// The sum of the bytes used by all files.
/// bytes_used: u64,
-/// _files: Vec<File>,
+/// _files: KVec<File>,
/// }
///
/// struct Directory {
@@ -55,7 +55,7 @@ use core::{cell::UnsafeCell, mem::size_of, ptr};
/// fn print_bytes_used(dir: &Directory, file: &File) {
/// let guard = dir.inner.lock();
/// let inner_file = file.inner.access(&guard);
-/// pr_info!("{} {}", guard.bytes_used, inner_file.bytes_used);
+/// pr_info!("{} {}\n", guard.bytes_used, inner_file.bytes_used);
/// }
///
/// /// Increments `bytes_used` for both the directory and file.
@@ -83,8 +83,12 @@ pub struct LockedBy<T: ?Sized, U: ?Sized> {
// SAFETY: `LockedBy` can be transferred across thread boundaries iff the data it protects can.
unsafe impl<T: ?Sized + Send, U: ?Sized> Send for LockedBy<T, U> {}
-// SAFETY: `LockedBy` serialises the interior mutability it provides, so it is `Sync` as long as the
-// data it protects is `Send`.
+// SAFETY: If `T` is not `Sync`, then parallel shared access to this `LockedBy` allows you to use
+// `access_mut` to hand out `&mut T` on one thread at the time. The requirement that `T: Send` is
+// sufficient to allow that.
+//
+// If `T` is `Sync`, then the `access` method also becomes available, which allows you to obtain
+// several `&T` from several threads at once. However, this is okay as `T` is `Sync`.
unsafe impl<T: ?Sized + Send, U: ?Sized> Sync for LockedBy<T, U> {}
impl<T, U> LockedBy<T, U> {
@@ -118,7 +122,10 @@ impl<T: ?Sized, U> LockedBy<T, U> {
///
/// Panics if `owner` is different from the data protected by the lock used in
/// [`new`](LockedBy::new).
- pub fn access<'a>(&'a self, owner: &'a U) -> &'a T {
+ pub fn access<'a>(&'a self, owner: &'a U) -> &'a T
+ where
+ T: Sync,
+ {
build_assert!(
size_of::<U>() > 0,
"`U` cannot be a ZST because `owner` wouldn't be unique"
@@ -127,7 +134,10 @@ impl<T: ?Sized, U> LockedBy<T, U> {
panic!("mismatched owners");
}
- // SAFETY: `owner` is evidence that the owner is locked.
+ // SAFETY: `owner` is evidence that there are only shared references to the owner for the
+ // duration of 'a, so it's not possible to use `Self::access_mut` to obtain a mutable
+ // reference to the inner value that aliases with this shared reference. The type is `Sync`
+ // so there are no other requirements.
unsafe { &*self.data.get() }
}
diff --git a/rust/kernel/sync/poll.rs b/rust/kernel/sync/poll.rs
new file mode 100644
index 000000000000..d7e6e59e124b
--- /dev/null
+++ b/rust/kernel/sync/poll.rs
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Utilities for working with `struct poll_table`.
+
+use crate::{
+ bindings,
+ fs::File,
+ prelude::*,
+ sync::{CondVar, LockClassKey},
+ types::Opaque,
+};
+use core::ops::Deref;
+
+/// Creates a [`PollCondVar`] initialiser with the given name and a newly-created lock class.
+#[macro_export]
+macro_rules! new_poll_condvar {
+ ($($name:literal)?) => {
+ $crate::sync::poll::PollCondVar::new(
+ $crate::optional_name!($($name)?), $crate::static_lock_class!()
+ )
+ };
+}
+
+/// Wraps the kernel's `struct poll_table`.
+///
+/// # Invariants
+///
+/// This struct contains a valid `struct poll_table`.
+///
+/// For a `struct poll_table` to be valid, its `_qproc` function must follow the safety
+/// requirements of `_qproc` functions:
+///
+/// * The `_qproc` function is given permission to enqueue a waiter to the provided `poll_table`
+/// during the call. Once the waiter is removed and an rcu grace period has passed, it must no
+/// longer access the `wait_queue_head`.
+#[repr(transparent)]
+pub struct PollTable(Opaque<bindings::poll_table>);
+
+impl PollTable {
+ /// Creates a reference to a [`PollTable`] from a valid pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that for the duration of `'a`, the pointer will point at a valid poll
+ /// table (as defined in the type invariants).
+ ///
+ /// The caller must also ensure that the `poll_table` is only accessed via the returned
+ /// reference for the duration of `'a`.
+ pub unsafe fn from_ptr<'a>(ptr: *mut bindings::poll_table) -> &'a mut PollTable {
+ // SAFETY: The safety requirements guarantee the validity of the dereference, while the
+ // `PollTable` type being transparent makes the cast ok.
+ unsafe { &mut *ptr.cast() }
+ }
+
+ fn get_qproc(&self) -> bindings::poll_queue_proc {
+ let ptr = self.0.get();
+ // SAFETY: The `ptr` is valid because it originates from a reference, and the `_qproc`
+ // field is not modified concurrently with this call since we have an immutable reference.
+ unsafe { (*ptr)._qproc }
+ }
+
+ /// Register this [`PollTable`] with the provided [`PollCondVar`], so that it can be notified
+ /// using the condition variable.
+ pub fn register_wait(&mut self, file: &File, cv: &PollCondVar) {
+ if let Some(qproc) = self.get_qproc() {
+ // SAFETY: The pointers to `file` and `self` need to be valid for the duration of this
+ // call to `qproc`, which they are because they are references.
+ //
+ // The `cv.wait_queue_head` pointer must be valid until an rcu grace period after the
+ // waiter is removed. The `PollCondVar` is pinned, so before `cv.wait_queue_head` can
+ // be destroyed, the destructor must run. That destructor first removes all waiters,
+ // and then waits for an rcu grace period. Therefore, `cv.wait_queue_head` is valid for
+ // long enough.
+ unsafe { qproc(file.as_ptr() as _, cv.wait_queue_head.get(), self.0.get()) };
+ }
+ }
+}
+
+/// A wrapper around [`CondVar`] that makes it usable with [`PollTable`].
+///
+/// [`CondVar`]: crate::sync::CondVar
+#[pin_data(PinnedDrop)]
+pub struct PollCondVar {
+ #[pin]
+ inner: CondVar,
+}
+
+impl PollCondVar {
+ /// Constructs a new condvar initialiser.
+ pub fn new(name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ inner <- CondVar::new(name, key),
+ })
+ }
+}
+
+// Make the `CondVar` methods callable on `PollCondVar`.
+impl Deref for PollCondVar {
+ type Target = CondVar;
+
+ fn deref(&self) -> &CondVar {
+ &self.inner
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for PollCondVar {
+ fn drop(self: Pin<&mut Self>) {
+ // Clear anything registered using `register_wait`.
+ //
+ // SAFETY: The pointer points at a valid `wait_queue_head`.
+ unsafe { bindings::__wake_up_pollfree(self.inner.wait_queue_head.get()) };
+
+ // Wait for epoll items to be properly removed.
+ //
+ // SAFETY: Just an FFI call.
+ unsafe { bindings::synchronize_rcu() };
+ }
+}
diff --git a/rust/kernel/sync/rcu.rs b/rust/kernel/sync/rcu.rs
new file mode 100644
index 000000000000..a32bef6e490b
--- /dev/null
+++ b/rust/kernel/sync/rcu.rs
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! RCU support.
+//!
+//! C header: [`include/linux/rcupdate.h`](srctree/include/linux/rcupdate.h)
+
+use crate::{bindings, types::NotThreadSafe};
+
+/// Evidence that the RCU read side lock is held on the current thread/CPU.
+///
+/// The type is explicitly not `Send` because this property is per-thread/CPU.
+///
+/// # Invariants
+///
+/// The RCU read side lock is actually held while instances of this guard exist.
+pub struct Guard(NotThreadSafe);
+
+impl Guard {
+ /// Acquires the RCU read side lock and returns a guard.
+ #[inline]
+ pub fn new() -> Self {
+ // SAFETY: An FFI call with no additional requirements.
+ unsafe { bindings::rcu_read_lock() };
+ // INVARIANT: The RCU read side lock was just acquired above.
+ Self(NotThreadSafe)
+ }
+
+ /// Explicitly releases the RCU read side lock.
+ #[inline]
+ pub fn unlock(self) {}
+}
+
+impl Default for Guard {
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl Drop for Guard {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, the RCU read side is locked, so it is ok to unlock it.
+ unsafe { bindings::rcu_read_unlock() };
+ }
+}
+
+/// Acquires the RCU read side lock.
+#[inline]
+pub fn read_lock() -> Guard {
+ Guard::new()
+}
diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs
index ca6e7e31d71c..927413d85484 100644
--- a/rust/kernel/task.rs
+++ b/rust/kernel/task.rs
@@ -4,10 +4,15 @@
//!
//! C header: [`include/linux/sched.h`](srctree/include/linux/sched.h).
-use crate::{bindings, types::Opaque};
-use core::{
+use crate::{
+ bindings,
ffi::{c_int, c_long, c_uint},
- marker::PhantomData,
+ mm::MmWithUser,
+ pid_namespace::PidNamespace,
+ types::{ARef, NotThreadSafe, Opaque},
+};
+use core::{
+ cmp::{Eq, PartialEq},
ops::Deref,
ptr,
};
@@ -19,6 +24,8 @@ pub const MAX_SCHEDULE_TIMEOUT: c_long = c_long::MAX;
pub const TASK_INTERRUPTIBLE: c_int = bindings::TASK_INTERRUPTIBLE as c_int;
/// Bitmask for tasks that are sleeping in an uninterruptible state.
pub const TASK_UNINTERRUPTIBLE: c_int = bindings::TASK_UNINTERRUPTIBLE as c_int;
+/// Bitmask for tasks that are sleeping in a freezable state.
+pub const TASK_FREEZABLE: c_int = bindings::TASK_FREEZABLE as c_int;
/// Convenience constant for waking up tasks regardless of whether they are in interruptible or
/// uninterruptible sleep.
pub const TASK_NORMAL: c_uint = bindings::TASK_NORMAL as c_uint;
@@ -27,8 +34,16 @@ pub const TASK_NORMAL: c_uint = bindings::TASK_NORMAL as c_uint;
#[macro_export]
macro_rules! current {
() => {
- // SAFETY: Deref + addr-of below create a temporary `TaskRef` that cannot outlive the
- // caller.
+ // SAFETY: This expression creates a temporary value that is dropped at the end of the
+ // caller's scope. The following mechanisms ensure that the resulting `&CurrentTask` cannot
+ // leave current task context:
+ //
+ // * To return to userspace, the caller must leave the current scope.
+ // * Operations such as `begin_new_exec()` are necessarily unsafe and the caller of
+ // `begin_new_exec()` is responsible for safety.
+ // * Rust abstractions for things such as a `kthread_use_mm()` scope must require the
+ // closure to be `Send`, so the `NotThreadSafe` field of `CurrentTask` ensures that the
+ // `&CurrentTask` cannot cross the scope in either direction.
unsafe { &*$crate::task::Task::current() }
};
}
@@ -71,7 +86,7 @@ macro_rules! current {
/// impl State {
/// fn new() -> Self {
/// Self {
-/// creator: current!().into(),
+/// creator: ARef::from(&**current!()),
/// index: 0,
/// }
/// }
@@ -91,10 +106,63 @@ unsafe impl Send for Task {}
// synchronised by C code (e.g., `signal_pending`).
unsafe impl Sync for Task {}
+/// Represents the [`Task`] in the `current` global.
+///
+/// This type exists to provide more efficient operations that are only valid on the current task.
+/// For example, to retrieve the pid-namespace of a task, you must use rcu protection unless it is
+/// the current task.
+///
+/// # Invariants
+///
+/// Each value of this type must only be accessed from the task context it was created within.
+///
+/// Of course, every thread is in a different task context, but for the purposes of this invariant,
+/// these operations also permanently leave the task context:
+///
+/// * Returning to userspace from system call context.
+/// * Calling `release_task()`.
+/// * Calling `begin_new_exec()` in a binary format loader.
+///
+/// Other operations temporarily create a new sub-context:
+///
+/// * Calling `kthread_use_mm()` creates a new context, and `kthread_unuse_mm()` returns to the
+/// old context.
+///
+/// This means that a `CurrentTask` obtained before a `kthread_use_mm()` call may be used again
+/// once `kthread_unuse_mm()` is called, but it must not be used between these two calls.
+/// Conversely, a `CurrentTask` obtained between a `kthread_use_mm()`/`kthread_unuse_mm()` pair
+/// must not be used after `kthread_unuse_mm()`.
+#[repr(transparent)]
+pub struct CurrentTask(Task, NotThreadSafe);
+
+// Make all `Task` methods available on `CurrentTask`.
+impl Deref for CurrentTask {
+ type Target = Task;
+ #[inline]
+ fn deref(&self) -> &Task {
+ &self.0
+ }
+}
+
/// The type of process identifiers (PIDs).
-type Pid = bindings::pid_t;
+pub type Pid = bindings::pid_t;
+
+/// The type of user identifiers (UIDs).
+#[derive(Copy, Clone)]
+pub struct Kuid {
+ kuid: bindings::kuid_t,
+}
impl Task {
+ /// Returns a raw pointer to the current task.
+ ///
+ /// It is up to the user to use the pointer correctly.
+ #[inline]
+ pub fn current_raw() -> *mut bindings::task_struct {
+ // SAFETY: Getting the current pointer is always safe.
+ unsafe { bindings::get_current() }
+ }
+
/// Returns a task reference for the currently executing task/thread.
///
/// The recommended way to get the current task/thread is to use the
@@ -102,38 +170,43 @@ impl Task {
///
/// # Safety
///
- /// Callers must ensure that the returned object doesn't outlive the current task/thread.
- pub unsafe fn current() -> impl Deref<Target = Task> {
- struct TaskRef<'a> {
- task: &'a Task,
- _not_send: PhantomData<*mut ()>,
+ /// Callers must ensure that the returned object is only used to access a [`CurrentTask`]
+ /// within the task context that was active when this function was called. For more details,
+ /// see the invariants section for [`CurrentTask`].
+ pub unsafe fn current() -> impl Deref<Target = CurrentTask> {
+ struct TaskRef {
+ task: *const CurrentTask,
}
- impl Deref for TaskRef<'_> {
- type Target = Task;
+ impl Deref for TaskRef {
+ type Target = CurrentTask;
fn deref(&self) -> &Self::Target {
- self.task
+ // SAFETY: The returned reference borrows from this `TaskRef`, so it cannot outlive
+ // the `TaskRef`, which the caller of `Task::current()` has promised will not
+ // outlive the task/thread for which `self.task` is the `current` pointer. Thus, it
+ // is okay to return a `CurrentTask` reference here.
+ unsafe { &*self.task }
}
}
- // SAFETY: Just an FFI call with no additional safety requirements.
- let ptr = unsafe { bindings::get_current() };
-
TaskRef {
- // SAFETY: If the current thread is still running, the current task is valid. Given
- // that `TaskRef` is not `Send`, we know it cannot be transferred to another thread
- // (where it could potentially outlive the caller).
- task: unsafe { &*ptr.cast() },
- _not_send: PhantomData,
+ // CAST: The layout of `struct task_struct` and `CurrentTask` is identical.
+ task: Task::current_raw().cast(),
}
}
+ /// Returns a raw pointer to the task.
+ #[inline]
+ pub fn as_ptr(&self) -> *mut bindings::task_struct {
+ self.0.get()
+ }
+
/// Returns the group leader of the given task.
pub fn group_leader(&self) -> &Task {
- // SAFETY: By the type invariant, we know that `self.0` is a valid task. Valid tasks always
- // have a valid `group_leader`.
- let ptr = unsafe { *ptr::addr_of!((*self.0.get()).group_leader) };
+ // SAFETY: The group leader of a task never changes after initialization, so reading this
+ // field is not a data race.
+ let ptr = unsafe { *ptr::addr_of!((*self.as_ptr()).group_leader) };
// SAFETY: The lifetime of the returned task reference is tied to the lifetime of `self`,
// and given that a task has a reference to its group leader, we know it must be valid for
@@ -143,23 +216,126 @@ impl Task {
/// Returns the PID of the given task.
pub fn pid(&self) -> Pid {
- // SAFETY: By the type invariant, we know that `self.0` is a valid task. Valid tasks always
- // have a valid pid.
- unsafe { *ptr::addr_of!((*self.0.get()).pid) }
+ // SAFETY: The pid of a task never changes after initialization, so reading this field is
+ // not a data race.
+ unsafe { *ptr::addr_of!((*self.as_ptr()).pid) }
+ }
+
+ /// Returns the UID of the given task.
+ pub fn uid(&self) -> Kuid {
+ // SAFETY: It's always safe to call `task_uid` on a valid task.
+ Kuid::from_raw(unsafe { bindings::task_uid(self.as_ptr()) })
+ }
+
+ /// Returns the effective UID of the given task.
+ pub fn euid(&self) -> Kuid {
+ // SAFETY: It's always safe to call `task_euid` on a valid task.
+ Kuid::from_raw(unsafe { bindings::task_euid(self.as_ptr()) })
}
/// Determines whether the given task has pending signals.
pub fn signal_pending(&self) -> bool {
+ // SAFETY: It's always safe to call `signal_pending` on a valid task.
+ unsafe { bindings::signal_pending(self.as_ptr()) != 0 }
+ }
+
+ /// Returns task's pid namespace with elevated reference count
+ pub fn get_pid_ns(&self) -> Option<ARef<PidNamespace>> {
// SAFETY: By the type invariant, we know that `self.0` is valid.
- unsafe { bindings::signal_pending(self.0.get()) != 0 }
+ let ptr = unsafe { bindings::task_get_pid_ns(self.as_ptr()) };
+ if ptr.is_null() {
+ None
+ } else {
+ // SAFETY: `ptr` is valid by the safety requirements of this function. And we own a
+ // reference count via `task_get_pid_ns()`.
+ // CAST: `Self` is a `repr(transparent)` wrapper around `bindings::pid_namespace`.
+ Some(unsafe { ARef::from_raw(ptr::NonNull::new_unchecked(ptr.cast::<PidNamespace>())) })
+ }
+ }
+
+ /// Returns the given task's pid in the provided pid namespace.
+ #[doc(alias = "task_tgid_nr_ns")]
+ pub fn tgid_nr_ns(&self, pidns: Option<&PidNamespace>) -> Pid {
+ let pidns = match pidns {
+ Some(pidns) => pidns.as_ptr(),
+ None => core::ptr::null_mut(),
+ };
+ // SAFETY: By the type invariant, we know that `self.0` is valid. We received a valid
+ // PidNamespace that we can use as a pointer or we received an empty PidNamespace and
+ // thus pass a null pointer. The underlying C function is safe to be used with NULL
+ // pointers.
+ unsafe { bindings::task_tgid_nr_ns(self.as_ptr(), pidns) }
}
/// Wakes up the task.
pub fn wake_up(&self) {
- // SAFETY: By the type invariant, we know that `self.0.get()` is non-null and valid.
- // And `wake_up_process` is safe to be called for any valid task, even if the task is
+ // SAFETY: It's always safe to call `wake_up_process` on a valid task, even if the task
// running.
- unsafe { bindings::wake_up_process(self.0.get()) };
+ unsafe { bindings::wake_up_process(self.as_ptr()) };
+ }
+}
+
+impl CurrentTask {
+ /// Access the address space of the current task.
+ ///
+ /// This function does not touch the refcount of the mm.
+ #[inline]
+ pub fn mm(&self) -> Option<&MmWithUser> {
+ // SAFETY: The `mm` field of `current` is not modified from other threads, so reading it is
+ // not a data race.
+ let mm = unsafe { (*self.as_ptr()).mm };
+
+ if mm.is_null() {
+ return None;
+ }
+
+ // SAFETY: If `current->mm` is non-null, then it references a valid mm with a non-zero
+ // value of `mm_users`. Furthermore, the returned `&MmWithUser` borrows from this
+ // `CurrentTask`, so it cannot escape the scope in which the current pointer was obtained.
+ //
+ // This is safe even if `kthread_use_mm()`/`kthread_unuse_mm()` are used. There are two
+ // relevant cases:
+ // * If the `&CurrentTask` was created before `kthread_use_mm()`, then it cannot be
+ // accessed during the `kthread_use_mm()`/`kthread_unuse_mm()` scope due to the
+ // `NotThreadSafe` field of `CurrentTask`.
+ // * If the `&CurrentTask` was created within a `kthread_use_mm()`/`kthread_unuse_mm()`
+ // scope, then the `&CurrentTask` cannot escape that scope, so the returned `&MmWithUser`
+ // also cannot escape that scope.
+ // In either case, it's not possible to read `current->mm` and keep using it after the
+ // scope is ended with `kthread_unuse_mm()`.
+ Some(unsafe { MmWithUser::from_raw(mm) })
+ }
+
+ /// Access the pid namespace of the current task.
+ ///
+ /// This function does not touch the refcount of the namespace or use RCU protection.
+ ///
+ /// To access the pid namespace of another task, see [`Task::get_pid_ns`].
+ #[doc(alias = "task_active_pid_ns")]
+ #[inline]
+ pub fn active_pid_ns(&self) -> Option<&PidNamespace> {
+ // SAFETY: It is safe to call `task_active_pid_ns` without RCU protection when calling it
+ // on the current task.
+ let active_ns = unsafe { bindings::task_active_pid_ns(self.as_ptr()) };
+
+ if active_ns.is_null() {
+ return None;
+ }
+
+ // The lifetime of `PidNamespace` is bound to `Task` and `struct pid`.
+ //
+ // The `PidNamespace` of a `Task` doesn't ever change once the `Task` is alive.
+ //
+ // From system call context retrieving the `PidNamespace` for the current task is always
+ // safe and requires neither RCU locking nor a reference count to be held. Retrieving the
+ // `PidNamespace` after `release_task()` for current will return `NULL` but no codepath
+ // like that is exposed to Rust.
+ //
+ // SAFETY: If `current`'s pid ns is non-null, then it references a valid pid ns.
+ // Furthermore, the returned `&PidNamespace` borrows from this `CurrentTask`, so it cannot
+ // escape the scope in which the current pointer was obtained, e.g. it cannot live past a
+ // `release_task()` call.
+ Some(unsafe { PidNamespace::from_ptr(active_ns) })
}
}
@@ -167,7 +343,7 @@ impl Task {
unsafe impl crate::types::AlwaysRefCounted for Task {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference means that the refcount is nonzero.
- unsafe { bindings::get_task_struct(self.0.get()) };
+ unsafe { bindings::get_task_struct(self.as_ptr()) };
}
unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
@@ -175,3 +351,43 @@ unsafe impl crate::types::AlwaysRefCounted for Task {
unsafe { bindings::put_task_struct(obj.cast().as_ptr()) }
}
}
+
+impl Kuid {
+ /// Get the current euid.
+ #[inline]
+ pub fn current_euid() -> Kuid {
+ // SAFETY: Just an FFI call.
+ Self::from_raw(unsafe { bindings::current_euid() })
+ }
+
+ /// Create a `Kuid` given the raw C type.
+ #[inline]
+ pub fn from_raw(kuid: bindings::kuid_t) -> Self {
+ Self { kuid }
+ }
+
+ /// Turn this kuid into the raw C type.
+ #[inline]
+ pub fn into_raw(self) -> bindings::kuid_t {
+ self.kuid
+ }
+
+ /// Converts this kernel UID into a userspace UID.
+ ///
+ /// Uses the namespace of the current task.
+ #[inline]
+ pub fn into_uid_in_current_ns(self) -> bindings::uid_t {
+ // SAFETY: Just an FFI call.
+ unsafe { bindings::from_kuid(bindings::current_user_ns(), self.kuid) }
+ }
+}
+
+impl PartialEq for Kuid {
+ #[inline]
+ fn eq(&self, other: &Kuid) -> bool {
+ // SAFETY: Just an FFI call.
+ unsafe { bindings::uid_eq(self.kuid, other.kuid) }
+ }
+}
+
+impl Eq for Kuid {}
diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs
index 25a896eed468..f509cb0eb71e 100644
--- a/rust/kernel/time.rs
+++ b/rust/kernel/time.rs
@@ -4,12 +4,20 @@
//!
//! This module contains the kernel APIs related to time and timers that
//! have been ported or wrapped for usage by Rust code in the kernel.
+//!
+//! C header: [`include/linux/jiffies.h`](srctree/include/linux/jiffies.h).
+//! C header: [`include/linux/ktime.h`](srctree/include/linux/ktime.h).
+
+pub mod hrtimer;
+
+/// The number of nanoseconds per millisecond.
+pub const NSEC_PER_MSEC: i64 = bindings::NSEC_PER_MSEC as i64;
/// The time unit of Linux kernel. One jiffy equals (1/HZ) second.
-pub type Jiffies = core::ffi::c_ulong;
+pub type Jiffies = crate::ffi::c_ulong;
/// The millisecond time unit.
-pub type Msecs = core::ffi::c_uint;
+pub type Msecs = crate::ffi::c_uint;
/// Converts milliseconds to jiffies.
#[inline]
@@ -18,3 +26,126 @@ pub fn msecs_to_jiffies(msecs: Msecs) -> Jiffies {
// matter what the argument is.
unsafe { bindings::__msecs_to_jiffies(msecs) }
}
+
+/// A Rust wrapper around a `ktime_t`.
+#[repr(transparent)]
+#[derive(Copy, Clone)]
+pub struct Ktime {
+ inner: bindings::ktime_t,
+}
+
+impl Ktime {
+ /// Create a `Ktime` from a raw `ktime_t`.
+ #[inline]
+ pub fn from_raw(inner: bindings::ktime_t) -> Self {
+ Self { inner }
+ }
+
+ /// Get the current time using `CLOCK_MONOTONIC`.
+ #[inline]
+ pub fn ktime_get() -> Self {
+ // SAFETY: It is always safe to call `ktime_get` outside of NMI context.
+ Self::from_raw(unsafe { bindings::ktime_get() })
+ }
+
+ /// Divide the number of nanoseconds by a compile-time constant.
+ #[inline]
+ fn divns_constant<const DIV: i64>(self) -> i64 {
+ self.to_ns() / DIV
+ }
+
+ /// Returns the number of nanoseconds.
+ #[inline]
+ pub fn to_ns(self) -> i64 {
+ self.inner
+ }
+
+ /// Returns the number of milliseconds.
+ #[inline]
+ pub fn to_ms(self) -> i64 {
+ self.divns_constant::<NSEC_PER_MSEC>()
+ }
+}
+
+/// Returns the number of milliseconds between two ktimes.
+#[inline]
+pub fn ktime_ms_delta(later: Ktime, earlier: Ktime) -> i64 {
+ (later - earlier).to_ms()
+}
+
+impl core::ops::Sub for Ktime {
+ type Output = Ktime;
+
+ #[inline]
+ fn sub(self, other: Ktime) -> Ktime {
+ Self {
+ inner: self.inner - other.inner,
+ }
+ }
+}
+
+/// An identifier for a clock. Used when specifying clock sources.
+///
+///
+/// Selection of the clock depends on the use case. In some cases the usage of a
+/// particular clock is mandatory, e.g. in network protocols, filesystems.In other
+/// cases the user of the clock has to decide which clock is best suited for the
+/// purpose. In most scenarios clock [`ClockId::Monotonic`] is the best choice as it
+/// provides a accurate monotonic notion of time (leap second smearing ignored).
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+#[repr(u32)]
+pub enum ClockId {
+ /// A settable system-wide clock that measures real (i.e., wall-clock) time.
+ ///
+ /// Setting this clock requires appropriate privileges. This clock is
+ /// affected by discontinuous jumps in the system time (e.g., if the system
+ /// administrator manually changes the clock), and by frequency adjustments
+ /// performed by NTP and similar applications via adjtime(3), adjtimex(2),
+ /// clock_adjtime(2), and ntp_adjtime(3). This clock normally counts the
+ /// number of seconds since 1970-01-01 00:00:00 Coordinated Universal Time
+ /// (UTC) except that it ignores leap seconds; near a leap second it may be
+ /// adjusted by leap second smearing to stay roughly in sync with UTC. Leap
+ /// second smearing applies frequency adjustments to the clock to speed up
+ /// or slow down the clock to account for the leap second without
+ /// discontinuities in the clock. If leap second smearing is not applied,
+ /// the clock will experience discontinuity around leap second adjustment.
+ RealTime = bindings::CLOCK_REALTIME,
+ /// A monotonically increasing clock.
+ ///
+ /// A nonsettable system-wide clock that represents monotonic time since—as
+ /// described by POSIX—"some unspecified point in the past". On Linux, that
+ /// point corresponds to the number of seconds that the system has been
+ /// running since it was booted.
+ ///
+ /// The CLOCK_MONOTONIC clock is not affected by discontinuous jumps in the
+ /// CLOCK_REAL (e.g., if the system administrator manually changes the
+ /// clock), but is affected by frequency adjustments. This clock does not
+ /// count time that the system is suspended.
+ Monotonic = bindings::CLOCK_MONOTONIC,
+ /// A monotonic that ticks while system is suspended.
+ ///
+ /// A nonsettable system-wide clock that is identical to CLOCK_MONOTONIC,
+ /// except that it also includes any time that the system is suspended. This
+ /// allows applications to get a suspend-aware monotonic clock without
+ /// having to deal with the complications of CLOCK_REALTIME, which may have
+ /// discontinuities if the time is changed using settimeofday(2) or similar.
+ BootTime = bindings::CLOCK_BOOTTIME,
+ /// International Atomic Time.
+ ///
+ /// A system-wide clock derived from wall-clock time but counting leap seconds.
+ ///
+ /// This clock is coupled to CLOCK_REALTIME and will be set when CLOCK_REALTIME is
+ /// set, or when the offset to CLOCK_REALTIME is changed via adjtimex(2). This
+ /// usually happens during boot and **should** not happen during normal operations.
+ /// However, if NTP or another application adjusts CLOCK_REALTIME by leap second
+ /// smearing, this clock will not be precise during leap second smearing.
+ ///
+ /// The acronym TAI refers to International Atomic Time.
+ TAI = bindings::CLOCK_TAI,
+}
+
+impl ClockId {
+ fn into_c(self) -> bindings::clockid_t {
+ self as bindings::clockid_t
+ }
+}
diff --git a/rust/kernel/time/hrtimer.rs b/rust/kernel/time/hrtimer.rs
new file mode 100644
index 000000000000..ce53f8579d18
--- /dev/null
+++ b/rust/kernel/time/hrtimer.rs
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Intrusive high resolution timers.
+//!
+//! Allows running timer callbacks without doing allocations at the time of
+//! starting the timer. For now, only one timer per type is allowed.
+//!
+//! # Vocabulary
+//!
+//! States:
+//!
+//! - Stopped: initialized but not started, or cancelled, or not restarted.
+//! - Started: initialized and started or restarted.
+//! - Running: executing the callback.
+//!
+//! Operations:
+//!
+//! * Start
+//! * Cancel
+//! * Restart
+//!
+//! Events:
+//!
+//! * Expire
+//!
+//! ## State Diagram
+//!
+//! ```text
+//! Return NoRestart
+//! +---------------------------------------------------------------------+
+//! | |
+//! | |
+//! | |
+//! | Return Restart |
+//! | +------------------------+ |
+//! | | | |
+//! | | | |
+//! v v | |
+//! +-----------------+ Start +------------------+ +--------+-----+--+
+//! | +---------------->| | | |
+//! Init | | | | Expire | |
+//! --------->| Stopped | | Started +---------->| Running |
+//! | | Cancel | | | |
+//! | |<----------------+ | | |
+//! +-----------------+ +---------------+--+ +-----------------+
+//! ^ |
+//! | |
+//! +---------+
+//! Restart
+//! ```
+//!
+//!
+//! A timer is initialized in the **stopped** state. A stopped timer can be
+//! **started** by the `start` operation, with an **expiry** time. After the
+//! `start` operation, the timer is in the **started** state. When the timer
+//! **expires**, the timer enters the **running** state and the handler is
+//! executed. After the handler has returned, the timer may enter the
+//! **started* or **stopped** state, depending on the return value of the
+//! handler. A timer in the **started** or **running** state may be **canceled**
+//! by the `cancel` operation. A timer that is cancelled enters the **stopped**
+//! state.
+//!
+//! A `cancel` or `restart` operation on a timer in the **running** state takes
+//! effect after the handler has returned and the timer has transitioned
+//! out of the **running** state.
+//!
+//! A `restart` operation on a timer in the **stopped** state is equivalent to a
+//! `start` operation.
+
+use super::ClockId;
+use crate::{prelude::*, time::Ktime, types::Opaque};
+use core::marker::PhantomData;
+use pin_init::PinInit;
+
+/// A timer backed by a C `struct hrtimer`.
+///
+/// # Invariants
+///
+/// * `self.timer` is initialized by `bindings::hrtimer_setup`.
+#[pin_data]
+#[repr(C)]
+pub struct HrTimer<T> {
+ #[pin]
+ timer: Opaque<bindings::hrtimer>,
+ mode: HrTimerMode,
+ _t: PhantomData<T>,
+}
+
+// SAFETY: Ownership of an `HrTimer` can be moved to other threads and
+// used/dropped from there.
+unsafe impl<T> Send for HrTimer<T> {}
+
+// SAFETY: Timer operations are locked on the C side, so it is safe to operate
+// on a timer from multiple threads.
+unsafe impl<T> Sync for HrTimer<T> {}
+
+impl<T> HrTimer<T> {
+ /// Return an initializer for a new timer instance.
+ pub fn new(mode: HrTimerMode, clock: ClockId) -> impl PinInit<Self>
+ where
+ T: HrTimerCallback,
+ {
+ pin_init!(Self {
+ // INVARIANT: We initialize `timer` with `hrtimer_setup` below.
+ timer <- Opaque::ffi_init(move |place: *mut bindings::hrtimer| {
+ // SAFETY: By design of `pin_init!`, `place` is a pointer to a
+ // live allocation. hrtimer_setup will initialize `place` and
+ // does not require `place` to be initialized prior to the call.
+ unsafe {
+ bindings::hrtimer_setup(
+ place,
+ Some(T::Pointer::run),
+ clock.into_c(),
+ mode.into_c(),
+ );
+ }
+ }),
+ mode: mode,
+ _t: PhantomData,
+ })
+ }
+
+ /// Get a pointer to the contained `bindings::hrtimer`.
+ ///
+ /// This function is useful to get access to the value without creating
+ /// intermediate references.
+ ///
+ /// # Safety
+ ///
+ /// `this` must point to a live allocation of at least the size of `Self`.
+ unsafe fn raw_get(this: *const Self) -> *mut bindings::hrtimer {
+ // SAFETY: The field projection to `timer` does not go out of bounds,
+ // because the caller of this function promises that `this` points to an
+ // allocation of at least the size of `Self`.
+ unsafe { Opaque::raw_get(core::ptr::addr_of!((*this).timer)) }
+ }
+
+ /// Cancel an initialized and potentially running timer.
+ ///
+ /// If the timer handler is running, this function will block until the
+ /// handler returns.
+ ///
+ /// Note that the timer might be started by a concurrent start operation. If
+ /// so, the timer might not be in the **stopped** state when this function
+ /// returns.
+ ///
+ /// Users of the `HrTimer` API would not usually call this method directly.
+ /// Instead they would use the safe [`HrTimerHandle::cancel`] on the handle
+ /// returned when the timer was started.
+ ///
+ /// This function is useful to get access to the value without creating
+ /// intermediate references.
+ ///
+ /// # Safety
+ ///
+ /// `this` must point to a valid `Self`.
+ pub(crate) unsafe fn raw_cancel(this: *const Self) -> bool {
+ // SAFETY: `this` points to an allocation of at least `HrTimer` size.
+ let c_timer_ptr = unsafe { HrTimer::raw_get(this) };
+
+ // If the handler is running, this will wait for the handler to return
+ // before returning.
+ // SAFETY: `c_timer_ptr` is initialized and valid. Synchronization is
+ // handled on the C side.
+ unsafe { bindings::hrtimer_cancel(c_timer_ptr) != 0 }
+ }
+}
+
+/// Implemented by pointer types that point to structs that contain a [`HrTimer`].
+///
+/// `Self` must be [`Sync`] because it is passed to timer callbacks in another
+/// thread of execution (hard or soft interrupt context).
+///
+/// Starting a timer returns a [`HrTimerHandle`] that can be used to manipulate
+/// the timer. Note that it is OK to call the start function repeatedly, and
+/// that more than one [`HrTimerHandle`] associated with a [`HrTimerPointer`] may
+/// exist. A timer can be manipulated through any of the handles, and a handle
+/// may represent a cancelled timer.
+pub trait HrTimerPointer: Sync + Sized {
+ /// A handle representing a started or restarted timer.
+ ///
+ /// If the timer is running or if the timer callback is executing when the
+ /// handle is dropped, the drop method of [`HrTimerHandle`] should not return
+ /// until the timer is stopped and the callback has completed.
+ ///
+ /// Note: When implementing this trait, consider that it is not unsafe to
+ /// leak the handle.
+ type TimerHandle: HrTimerHandle;
+
+ /// Start the timer with expiry after `expires` time units. If the timer was
+ /// already running, it is restarted with the new expiry time.
+ fn start(self, expires: Ktime) -> Self::TimerHandle;
+}
+
+/// Unsafe version of [`HrTimerPointer`] for situations where leaking the
+/// [`HrTimerHandle`] returned by `start` would be unsound. This is the case for
+/// stack allocated timers.
+///
+/// Typical implementers are pinned references such as [`Pin<&T>`].
+///
+/// # Safety
+///
+/// Implementers of this trait must ensure that instances of types implementing
+/// [`UnsafeHrTimerPointer`] outlives any associated [`HrTimerPointer::TimerHandle`]
+/// instances.
+pub unsafe trait UnsafeHrTimerPointer: Sync + Sized {
+ /// A handle representing a running timer.
+ ///
+ /// # Safety
+ ///
+ /// If the timer is running, or if the timer callback is executing when the
+ /// handle is dropped, the drop method of [`Self::TimerHandle`] must not return
+ /// until the timer is stopped and the callback has completed.
+ type TimerHandle: HrTimerHandle;
+
+ /// Start the timer after `expires` time units. If the timer was already
+ /// running, it is restarted at the new expiry time.
+ ///
+ /// # Safety
+ ///
+ /// Caller promises keep the timer structure alive until the timer is dead.
+ /// Caller can ensure this by not leaking the returned [`Self::TimerHandle`].
+ unsafe fn start(self, expires: Ktime) -> Self::TimerHandle;
+}
+
+/// A trait for stack allocated timers.
+///
+/// # Safety
+///
+/// Implementers must ensure that `start_scoped` does not return until the
+/// timer is dead and the timer handler is not running.
+pub unsafe trait ScopedHrTimerPointer {
+ /// Start the timer to run after `expires` time units and immediately
+ /// after call `f`. When `f` returns, the timer is cancelled.
+ fn start_scoped<T, F>(self, expires: Ktime, f: F) -> T
+ where
+ F: FnOnce() -> T;
+}
+
+// SAFETY: By the safety requirement of [`UnsafeHrTimerPointer`], dropping the
+// handle returned by [`UnsafeHrTimerPointer::start`] ensures that the timer is
+// killed.
+unsafe impl<T> ScopedHrTimerPointer for T
+where
+ T: UnsafeHrTimerPointer,
+{
+ fn start_scoped<U, F>(self, expires: Ktime, f: F) -> U
+ where
+ F: FnOnce() -> U,
+ {
+ // SAFETY: We drop the timer handle below before returning.
+ let handle = unsafe { UnsafeHrTimerPointer::start(self, expires) };
+ let t = f();
+ drop(handle);
+ t
+ }
+}
+
+/// Implemented by [`HrTimerPointer`] implementers to give the C timer callback a
+/// function to call.
+// This is split from `HrTimerPointer` to make it easier to specify trait bounds.
+pub trait RawHrTimerCallback {
+ /// Type of the parameter passed to [`HrTimerCallback::run`]. It may be
+ /// [`Self`], or a pointer type derived from [`Self`].
+ type CallbackTarget<'a>;
+
+ /// Callback to be called from C when timer fires.
+ ///
+ /// # Safety
+ ///
+ /// Only to be called by C code in the `hrtimer` subsystem. `this` must point
+ /// to the `bindings::hrtimer` structure that was used to start the timer.
+ unsafe extern "C" fn run(this: *mut bindings::hrtimer) -> bindings::hrtimer_restart;
+}
+
+/// Implemented by structs that can be the target of a timer callback.
+pub trait HrTimerCallback {
+ /// The type whose [`RawHrTimerCallback::run`] method will be invoked when
+ /// the timer expires.
+ type Pointer<'a>: RawHrTimerCallback;
+
+ /// Called by the timer logic when the timer fires.
+ fn run(this: <Self::Pointer<'_> as RawHrTimerCallback>::CallbackTarget<'_>) -> HrTimerRestart
+ where
+ Self: Sized;
+}
+
+/// A handle representing a potentially running timer.
+///
+/// More than one handle representing the same timer might exist.
+///
+/// # Safety
+///
+/// When dropped, the timer represented by this handle must be cancelled, if it
+/// is running. If the timer handler is running when the handle is dropped, the
+/// drop method must wait for the handler to return before returning.
+///
+/// Note: One way to satisfy the safety requirement is to call `Self::cancel` in
+/// the drop implementation for `Self.`
+pub unsafe trait HrTimerHandle {
+ /// Cancel the timer. If the timer is in the running state, block till the
+ /// handler has returned.
+ ///
+ /// Note that the timer might be started by a concurrent start operation. If
+ /// so, the timer might not be in the **stopped** state when this function
+ /// returns.
+ fn cancel(&mut self) -> bool;
+}
+
+/// Implemented by structs that contain timer nodes.
+///
+/// Clients of the timer API would usually safely implement this trait by using
+/// the [`crate::impl_has_hr_timer`] macro.
+///
+/// # Safety
+///
+/// Implementers of this trait must ensure that the implementer has a
+/// [`HrTimer`] field and that all trait methods are implemented according to
+/// their documentation. All the methods of this trait must operate on the same
+/// field.
+pub unsafe trait HasHrTimer<T> {
+ /// Return a pointer to the [`HrTimer`] within `Self`.
+ ///
+ /// This function is useful to get access to the value without creating
+ /// intermediate references.
+ ///
+ /// # Safety
+ ///
+ /// `this` must be a valid pointer.
+ unsafe fn raw_get_timer(this: *const Self) -> *const HrTimer<T>;
+
+ /// Return a pointer to the struct that is containing the [`HrTimer`] pointed
+ /// to by `ptr`.
+ ///
+ /// This function is useful to get access to the value without creating
+ /// intermediate references.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must point to a [`HrTimer<T>`] field in a struct of type `Self`.
+ unsafe fn timer_container_of(ptr: *mut HrTimer<T>) -> *mut Self
+ where
+ Self: Sized;
+
+ /// Get pointer to the contained `bindings::hrtimer` struct.
+ ///
+ /// This function is useful to get access to the value without creating
+ /// intermediate references.
+ ///
+ /// # Safety
+ ///
+ /// `this` must be a valid pointer.
+ unsafe fn c_timer_ptr(this: *const Self) -> *const bindings::hrtimer {
+ // SAFETY: `this` is a valid pointer to a `Self`.
+ let timer_ptr = unsafe { Self::raw_get_timer(this) };
+
+ // SAFETY: timer_ptr points to an allocation of at least `HrTimer` size.
+ unsafe { HrTimer::raw_get(timer_ptr) }
+ }
+
+ /// Start the timer contained in the `Self` pointed to by `self_ptr`. If
+ /// it is already running it is removed and inserted.
+ ///
+ /// # Safety
+ ///
+ /// - `this` must point to a valid `Self`.
+ /// - Caller must ensure that the pointee of `this` lives until the timer
+ /// fires or is canceled.
+ unsafe fn start(this: *const Self, expires: Ktime) {
+ // SAFETY: By function safety requirement, `this` is a valid `Self`.
+ unsafe {
+ bindings::hrtimer_start_range_ns(
+ Self::c_timer_ptr(this).cast_mut(),
+ expires.to_ns(),
+ 0,
+ (*Self::raw_get_timer(this)).mode.into_c(),
+ );
+ }
+ }
+}
+
+/// Restart policy for timers.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[repr(u32)]
+pub enum HrTimerRestart {
+ /// Timer should not be restarted.
+ #[allow(clippy::unnecessary_cast)]
+ NoRestart = bindings::hrtimer_restart_HRTIMER_NORESTART as u32,
+ /// Timer should be restarted.
+ #[allow(clippy::unnecessary_cast)]
+ Restart = bindings::hrtimer_restart_HRTIMER_RESTART as u32,
+}
+
+impl HrTimerRestart {
+ fn into_c(self) -> bindings::hrtimer_restart {
+ self as bindings::hrtimer_restart
+ }
+}
+
+/// Operational mode of [`HrTimer`].
+// NOTE: Some of these have the same encoding on the C side, so we keep
+// `repr(Rust)` and convert elsewhere.
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum HrTimerMode {
+ /// Timer expires at the given expiration time.
+ Absolute,
+ /// Timer expires after the given expiration time interpreted as a duration from now.
+ Relative,
+ /// Timer does not move between CPU cores.
+ Pinned,
+ /// Timer handler is executed in soft irq context.
+ Soft,
+ /// Timer handler is executed in hard irq context.
+ Hard,
+ /// Timer expires at the given expiration time.
+ /// Timer does not move between CPU cores.
+ AbsolutePinned,
+ /// Timer expires after the given expiration time interpreted as a duration from now.
+ /// Timer does not move between CPU cores.
+ RelativePinned,
+ /// Timer expires at the given expiration time.
+ /// Timer handler is executed in soft irq context.
+ AbsoluteSoft,
+ /// Timer expires after the given expiration time interpreted as a duration from now.
+ /// Timer handler is executed in soft irq context.
+ RelativeSoft,
+ /// Timer expires at the given expiration time.
+ /// Timer does not move between CPU cores.
+ /// Timer handler is executed in soft irq context.
+ AbsolutePinnedSoft,
+ /// Timer expires after the given expiration time interpreted as a duration from now.
+ /// Timer does not move between CPU cores.
+ /// Timer handler is executed in soft irq context.
+ RelativePinnedSoft,
+ /// Timer expires at the given expiration time.
+ /// Timer handler is executed in hard irq context.
+ AbsoluteHard,
+ /// Timer expires after the given expiration time interpreted as a duration from now.
+ /// Timer handler is executed in hard irq context.
+ RelativeHard,
+ /// Timer expires at the given expiration time.
+ /// Timer does not move between CPU cores.
+ /// Timer handler is executed in hard irq context.
+ AbsolutePinnedHard,
+ /// Timer expires after the given expiration time interpreted as a duration from now.
+ /// Timer does not move between CPU cores.
+ /// Timer handler is executed in hard irq context.
+ RelativePinnedHard,
+}
+
+impl HrTimerMode {
+ fn into_c(self) -> bindings::hrtimer_mode {
+ use bindings::*;
+ match self {
+ HrTimerMode::Absolute => hrtimer_mode_HRTIMER_MODE_ABS,
+ HrTimerMode::Relative => hrtimer_mode_HRTIMER_MODE_REL,
+ HrTimerMode::Pinned => hrtimer_mode_HRTIMER_MODE_PINNED,
+ HrTimerMode::Soft => hrtimer_mode_HRTIMER_MODE_SOFT,
+ HrTimerMode::Hard => hrtimer_mode_HRTIMER_MODE_HARD,
+ HrTimerMode::AbsolutePinned => hrtimer_mode_HRTIMER_MODE_ABS_PINNED,
+ HrTimerMode::RelativePinned => hrtimer_mode_HRTIMER_MODE_REL_PINNED,
+ HrTimerMode::AbsoluteSoft => hrtimer_mode_HRTIMER_MODE_ABS_SOFT,
+ HrTimerMode::RelativeSoft => hrtimer_mode_HRTIMER_MODE_REL_SOFT,
+ HrTimerMode::AbsolutePinnedSoft => hrtimer_mode_HRTIMER_MODE_ABS_PINNED_SOFT,
+ HrTimerMode::RelativePinnedSoft => hrtimer_mode_HRTIMER_MODE_REL_PINNED_SOFT,
+ HrTimerMode::AbsoluteHard => hrtimer_mode_HRTIMER_MODE_ABS_HARD,
+ HrTimerMode::RelativeHard => hrtimer_mode_HRTIMER_MODE_REL_HARD,
+ HrTimerMode::AbsolutePinnedHard => hrtimer_mode_HRTIMER_MODE_ABS_PINNED_HARD,
+ HrTimerMode::RelativePinnedHard => hrtimer_mode_HRTIMER_MODE_REL_PINNED_HARD,
+ }
+ }
+}
+
+/// Use to implement the [`HasHrTimer<T>`] trait.
+///
+/// See [`module`] documentation for an example.
+///
+/// [`module`]: crate::time::hrtimer
+#[macro_export]
+macro_rules! impl_has_hr_timer {
+ (
+ impl$({$($generics:tt)*})?
+ HasHrTimer<$timer_type:ty>
+ for $self:ty
+ { self.$field:ident }
+ $($rest:tt)*
+ ) => {
+ // SAFETY: This implementation of `raw_get_timer` only compiles if the
+ // field has the right type.
+ unsafe impl$(<$($generics)*>)? $crate::time::hrtimer::HasHrTimer<$timer_type> for $self {
+
+ #[inline]
+ unsafe fn raw_get_timer(
+ this: *const Self,
+ ) -> *const $crate::time::hrtimer::HrTimer<$timer_type> {
+ // SAFETY: The caller promises that the pointer is not dangling.
+ unsafe { ::core::ptr::addr_of!((*this).$field) }
+ }
+
+ #[inline]
+ unsafe fn timer_container_of(
+ ptr: *mut $crate::time::hrtimer::HrTimer<$timer_type>,
+ ) -> *mut Self {
+ // SAFETY: As per the safety requirement of this function, `ptr`
+ // is pointing inside a `$timer_type`.
+ unsafe { ::kernel::container_of!(ptr, $timer_type, $field).cast_mut() }
+ }
+ }
+ }
+}
+
+mod arc;
+pub use arc::ArcHrTimerHandle;
+mod pin;
+pub use pin::PinHrTimerHandle;
+mod pin_mut;
+pub use pin_mut::PinMutHrTimerHandle;
+// `box` is a reserved keyword, so prefix with `t` for timer
+mod tbox;
+pub use tbox::BoxHrTimerHandle;
diff --git a/rust/kernel/time/hrtimer/arc.rs b/rust/kernel/time/hrtimer/arc.rs
new file mode 100644
index 000000000000..4a984d85b4a1
--- /dev/null
+++ b/rust/kernel/time/hrtimer/arc.rs
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::HasHrTimer;
+use super::HrTimer;
+use super::HrTimerCallback;
+use super::HrTimerHandle;
+use super::HrTimerPointer;
+use super::RawHrTimerCallback;
+use crate::sync::Arc;
+use crate::sync::ArcBorrow;
+use crate::time::Ktime;
+
+/// A handle for an `Arc<HasHrTimer<T>>` returned by a call to
+/// [`HrTimerPointer::start`].
+pub struct ArcHrTimerHandle<T>
+where
+ T: HasHrTimer<T>,
+{
+ pub(crate) inner: Arc<T>,
+}
+
+// SAFETY: We implement drop below, and we cancel the timer in the drop
+// implementation.
+unsafe impl<T> HrTimerHandle for ArcHrTimerHandle<T>
+where
+ T: HasHrTimer<T>,
+{
+ fn cancel(&mut self) -> bool {
+ let self_ptr = Arc::as_ptr(&self.inner);
+
+ // SAFETY: As we obtained `self_ptr` from a valid reference above, it
+ // must point to a valid `T`.
+ let timer_ptr = unsafe { <T as HasHrTimer<T>>::raw_get_timer(self_ptr) };
+
+ // SAFETY: As `timer_ptr` points into `T` and `T` is valid, `timer_ptr`
+ // must point to a valid `HrTimer` instance.
+ unsafe { HrTimer::<T>::raw_cancel(timer_ptr) }
+ }
+}
+
+impl<T> Drop for ArcHrTimerHandle<T>
+where
+ T: HasHrTimer<T>,
+{
+ fn drop(&mut self) {
+ self.cancel();
+ }
+}
+
+impl<T> HrTimerPointer for Arc<T>
+where
+ T: 'static,
+ T: Send + Sync,
+ T: HasHrTimer<T>,
+ T: for<'a> HrTimerCallback<Pointer<'a> = Self>,
+{
+ type TimerHandle = ArcHrTimerHandle<T>;
+
+ fn start(self, expires: Ktime) -> ArcHrTimerHandle<T> {
+ // SAFETY:
+ // - We keep `self` alive by wrapping it in a handle below.
+ // - Since we generate the pointer passed to `start` from a valid
+ // reference, it is a valid pointer.
+ unsafe { T::start(Arc::as_ptr(&self), expires) };
+ ArcHrTimerHandle { inner: self }
+ }
+}
+
+impl<T> RawHrTimerCallback for Arc<T>
+where
+ T: 'static,
+ T: HasHrTimer<T>,
+ T: for<'a> HrTimerCallback<Pointer<'a> = Self>,
+{
+ type CallbackTarget<'a> = ArcBorrow<'a, T>;
+
+ unsafe extern "C" fn run(ptr: *mut bindings::hrtimer) -> bindings::hrtimer_restart {
+ // `HrTimer` is `repr(C)`
+ let timer_ptr = ptr.cast::<super::HrTimer<T>>();
+
+ // SAFETY: By C API contract `ptr` is the pointer we passed when
+ // queuing the timer, so it is a `HrTimer<T>` embedded in a `T`.
+ let data_ptr = unsafe { T::timer_container_of(timer_ptr) };
+
+ // SAFETY:
+ // - `data_ptr` is derived form the pointer to the `T` that was used to
+ // queue the timer.
+ // - As per the safety requirements of the trait `HrTimerHandle`, the
+ // `ArcHrTimerHandle` associated with this timer is guaranteed to
+ // be alive until this method returns. That handle borrows the `T`
+ // behind `data_ptr` thus guaranteeing the validity of
+ // the `ArcBorrow` created below.
+ // - We own one refcount in the `ArcTimerHandle` associated with this
+ // timer, so it is not possible to get a `UniqueArc` to this
+ // allocation from other `Arc` clones.
+ let receiver = unsafe { ArcBorrow::from_raw(data_ptr) };
+
+ T::run(receiver).into_c()
+ }
+}
diff --git a/rust/kernel/time/hrtimer/pin.rs b/rust/kernel/time/hrtimer/pin.rs
new file mode 100644
index 000000000000..f760db265c7b
--- /dev/null
+++ b/rust/kernel/time/hrtimer/pin.rs
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::HasHrTimer;
+use super::HrTimer;
+use super::HrTimerCallback;
+use super::HrTimerHandle;
+use super::RawHrTimerCallback;
+use super::UnsafeHrTimerPointer;
+use crate::time::Ktime;
+use core::pin::Pin;
+
+/// A handle for a `Pin<&HasHrTimer>`. When the handle exists, the timer might be
+/// running.
+pub struct PinHrTimerHandle<'a, T>
+where
+ T: HasHrTimer<T>,
+{
+ pub(crate) inner: Pin<&'a T>,
+}
+
+// SAFETY: We cancel the timer when the handle is dropped. The implementation of
+// the `cancel` method will block if the timer handler is running.
+unsafe impl<'a, T> HrTimerHandle for PinHrTimerHandle<'a, T>
+where
+ T: HasHrTimer<T>,
+{
+ fn cancel(&mut self) -> bool {
+ let self_ptr: *const T = self.inner.get_ref();
+
+ // SAFETY: As we got `self_ptr` from a reference above, it must point to
+ // a valid `T`.
+ let timer_ptr = unsafe { <T as HasHrTimer<T>>::raw_get_timer(self_ptr) };
+
+ // SAFETY: As `timer_ptr` is derived from a reference, it must point to
+ // a valid and initialized `HrTimer`.
+ unsafe { HrTimer::<T>::raw_cancel(timer_ptr) }
+ }
+}
+
+impl<'a, T> Drop for PinHrTimerHandle<'a, T>
+where
+ T: HasHrTimer<T>,
+{
+ fn drop(&mut self) {
+ self.cancel();
+ }
+}
+
+// SAFETY: We capture the lifetime of `Self` when we create a `PinHrTimerHandle`,
+// so `Self` will outlive the handle.
+unsafe impl<'a, T> UnsafeHrTimerPointer for Pin<&'a T>
+where
+ T: Send + Sync,
+ T: HasHrTimer<T>,
+ T: HrTimerCallback<Pointer<'a> = Self>,
+{
+ type TimerHandle = PinHrTimerHandle<'a, T>;
+
+ unsafe fn start(self, expires: Ktime) -> Self::TimerHandle {
+ // Cast to pointer
+ let self_ptr: *const T = self.get_ref();
+
+ // SAFETY:
+ // - As we derive `self_ptr` from a reference above, it must point to a
+ // valid `T`.
+ // - We keep `self` alive by wrapping it in a handle below.
+ unsafe { T::start(self_ptr, expires) };
+
+ PinHrTimerHandle { inner: self }
+ }
+}
+
+impl<'a, T> RawHrTimerCallback for Pin<&'a T>
+where
+ T: HasHrTimer<T>,
+ T: HrTimerCallback<Pointer<'a> = Self>,
+{
+ type CallbackTarget<'b> = Self;
+
+ unsafe extern "C" fn run(ptr: *mut bindings::hrtimer) -> bindings::hrtimer_restart {
+ // `HrTimer` is `repr(C)`
+ let timer_ptr = ptr as *mut HrTimer<T>;
+
+ // SAFETY: By the safety requirement of this function, `timer_ptr`
+ // points to a `HrTimer<T>` contained in an `T`.
+ let receiver_ptr = unsafe { T::timer_container_of(timer_ptr) };
+
+ // SAFETY:
+ // - By the safety requirement of this function, `timer_ptr`
+ // points to a `HrTimer<T>` contained in an `T`.
+ // - As per the safety requirements of the trait `HrTimerHandle`, the
+ // `PinHrTimerHandle` associated with this timer is guaranteed to
+ // be alive until this method returns. That handle borrows the `T`
+ // behind `receiver_ptr`, thus guaranteeing the validity of
+ // the reference created below.
+ let receiver_ref = unsafe { &*receiver_ptr };
+
+ // SAFETY: `receiver_ref` only exists as pinned, so it is safe to pin it
+ // here.
+ let receiver_pin = unsafe { Pin::new_unchecked(receiver_ref) };
+
+ T::run(receiver_pin).into_c()
+ }
+}
diff --git a/rust/kernel/time/hrtimer/pin_mut.rs b/rust/kernel/time/hrtimer/pin_mut.rs
new file mode 100644
index 000000000000..90c0351d62e4
--- /dev/null
+++ b/rust/kernel/time/hrtimer/pin_mut.rs
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::{
+ HasHrTimer, HrTimer, HrTimerCallback, HrTimerHandle, RawHrTimerCallback, UnsafeHrTimerPointer,
+};
+use crate::time::Ktime;
+use core::{marker::PhantomData, pin::Pin, ptr::NonNull};
+
+/// A handle for a `Pin<&mut HasHrTimer>`. When the handle exists, the timer might
+/// be running.
+pub struct PinMutHrTimerHandle<'a, T>
+where
+ T: HasHrTimer<T>,
+{
+ pub(crate) inner: NonNull<T>,
+ _p: PhantomData<&'a mut T>,
+}
+
+// SAFETY: We cancel the timer when the handle is dropped. The implementation of
+// the `cancel` method will block if the timer handler is running.
+unsafe impl<'a, T> HrTimerHandle for PinMutHrTimerHandle<'a, T>
+where
+ T: HasHrTimer<T>,
+{
+ fn cancel(&mut self) -> bool {
+ let self_ptr = self.inner.as_ptr();
+
+ // SAFETY: As we got `self_ptr` from a reference above, it must point to
+ // a valid `T`.
+ let timer_ptr = unsafe { <T as HasHrTimer<T>>::raw_get_timer(self_ptr) };
+
+ // SAFETY: As `timer_ptr` is derived from a reference, it must point to
+ // a valid and initialized `HrTimer`.
+ unsafe { HrTimer::<T>::raw_cancel(timer_ptr) }
+ }
+}
+
+impl<'a, T> Drop for PinMutHrTimerHandle<'a, T>
+where
+ T: HasHrTimer<T>,
+{
+ fn drop(&mut self) {
+ self.cancel();
+ }
+}
+
+// SAFETY: We capture the lifetime of `Self` when we create a
+// `PinMutHrTimerHandle`, so `Self` will outlive the handle.
+unsafe impl<'a, T> UnsafeHrTimerPointer for Pin<&'a mut T>
+where
+ T: Send + Sync,
+ T: HasHrTimer<T>,
+ T: HrTimerCallback<Pointer<'a> = Self>,
+{
+ type TimerHandle = PinMutHrTimerHandle<'a, T>;
+
+ unsafe fn start(mut self, expires: Ktime) -> Self::TimerHandle {
+ // SAFETY:
+ // - We promise not to move out of `self`. We only pass `self`
+ // back to the caller as a `Pin<&mut self>`.
+ // - The return value of `get_unchecked_mut` is guaranteed not to be null.
+ let self_ptr = unsafe { NonNull::new_unchecked(self.as_mut().get_unchecked_mut()) };
+
+ // SAFETY:
+ // - As we derive `self_ptr` from a reference above, it must point to a
+ // valid `T`.
+ // - We keep `self` alive by wrapping it in a handle below.
+ unsafe { T::start(self_ptr.as_ptr(), expires) };
+
+ PinMutHrTimerHandle {
+ inner: self_ptr,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<'a, T> RawHrTimerCallback for Pin<&'a mut T>
+where
+ T: HasHrTimer<T>,
+ T: HrTimerCallback<Pointer<'a> = Self>,
+{
+ type CallbackTarget<'b> = Self;
+
+ unsafe extern "C" fn run(ptr: *mut bindings::hrtimer) -> bindings::hrtimer_restart {
+ // `HrTimer` is `repr(C)`
+ let timer_ptr = ptr as *mut HrTimer<T>;
+
+ // SAFETY: By the safety requirement of this function, `timer_ptr`
+ // points to a `HrTimer<T>` contained in an `T`.
+ let receiver_ptr = unsafe { T::timer_container_of(timer_ptr) };
+
+ // SAFETY:
+ // - By the safety requirement of this function, `timer_ptr`
+ // points to a `HrTimer<T>` contained in an `T`.
+ // - As per the safety requirements of the trait `HrTimerHandle`, the
+ // `PinMutHrTimerHandle` associated with this timer is guaranteed to
+ // be alive until this method returns. That handle borrows the `T`
+ // behind `receiver_ptr` mutably thus guaranteeing the validity of
+ // the reference created below.
+ let receiver_ref = unsafe { &mut *receiver_ptr };
+
+ // SAFETY: `receiver_ref` only exists as pinned, so it is safe to pin it
+ // here.
+ let receiver_pin = unsafe { Pin::new_unchecked(receiver_ref) };
+
+ T::run(receiver_pin).into_c()
+ }
+}
diff --git a/rust/kernel/time/hrtimer/tbox.rs b/rust/kernel/time/hrtimer/tbox.rs
new file mode 100644
index 000000000000..2071cae07234
--- /dev/null
+++ b/rust/kernel/time/hrtimer/tbox.rs
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::HasHrTimer;
+use super::HrTimer;
+use super::HrTimerCallback;
+use super::HrTimerHandle;
+use super::HrTimerPointer;
+use super::RawHrTimerCallback;
+use crate::prelude::*;
+use crate::time::Ktime;
+use core::ptr::NonNull;
+
+/// A handle for a [`Box<HasHrTimer<T>>`] returned by a call to
+/// [`HrTimerPointer::start`].
+///
+/// # Invariants
+///
+/// - `self.inner` comes from a `Box::into_raw` call.
+pub struct BoxHrTimerHandle<T, A>
+where
+ T: HasHrTimer<T>,
+ A: crate::alloc::Allocator,
+{
+ pub(crate) inner: NonNull<T>,
+ _p: core::marker::PhantomData<A>,
+}
+
+// SAFETY: We implement drop below, and we cancel the timer in the drop
+// implementation.
+unsafe impl<T, A> HrTimerHandle for BoxHrTimerHandle<T, A>
+where
+ T: HasHrTimer<T>,
+ A: crate::alloc::Allocator,
+{
+ fn cancel(&mut self) -> bool {
+ // SAFETY: As we obtained `self.inner` from a valid reference when we
+ // created `self`, it must point to a valid `T`.
+ let timer_ptr = unsafe { <T as HasHrTimer<T>>::raw_get_timer(self.inner.as_ptr()) };
+
+ // SAFETY: As `timer_ptr` points into `T` and `T` is valid, `timer_ptr`
+ // must point to a valid `HrTimer` instance.
+ unsafe { HrTimer::<T>::raw_cancel(timer_ptr) }
+ }
+}
+
+impl<T, A> Drop for BoxHrTimerHandle<T, A>
+where
+ T: HasHrTimer<T>,
+ A: crate::alloc::Allocator,
+{
+ fn drop(&mut self) {
+ self.cancel();
+ // SAFETY: By type invariant, `self.inner` came from a `Box::into_raw`
+ // call.
+ drop(unsafe { Box::<T, A>::from_raw(self.inner.as_ptr()) })
+ }
+}
+
+impl<T, A> HrTimerPointer for Pin<Box<T, A>>
+where
+ T: 'static,
+ T: Send + Sync,
+ T: HasHrTimer<T>,
+ T: for<'a> HrTimerCallback<Pointer<'a> = Pin<Box<T, A>>>,
+ A: crate::alloc::Allocator,
+{
+ type TimerHandle = BoxHrTimerHandle<T, A>;
+
+ fn start(self, expires: Ktime) -> Self::TimerHandle {
+ // SAFETY:
+ // - We will not move out of this box during timer callback (we pass an
+ // immutable reference to the callback).
+ // - `Box::into_raw` is guaranteed to return a valid pointer.
+ let inner =
+ unsafe { NonNull::new_unchecked(Box::into_raw(Pin::into_inner_unchecked(self))) };
+
+ // SAFETY:
+ // - We keep `self` alive by wrapping it in a handle below.
+ // - Since we generate the pointer passed to `start` from a valid
+ // reference, it is a valid pointer.
+ unsafe { T::start(inner.as_ptr(), expires) };
+
+ // INVARIANT: `inner` came from `Box::into_raw` above.
+ BoxHrTimerHandle {
+ inner,
+ _p: core::marker::PhantomData,
+ }
+ }
+}
+
+impl<T, A> RawHrTimerCallback for Pin<Box<T, A>>
+where
+ T: 'static,
+ T: HasHrTimer<T>,
+ T: for<'a> HrTimerCallback<Pointer<'a> = Pin<Box<T, A>>>,
+ A: crate::alloc::Allocator,
+{
+ type CallbackTarget<'a> = Pin<&'a mut T>;
+
+ unsafe extern "C" fn run(ptr: *mut bindings::hrtimer) -> bindings::hrtimer_restart {
+ // `HrTimer` is `repr(C)`
+ let timer_ptr = ptr.cast::<super::HrTimer<T>>();
+
+ // SAFETY: By C API contract `ptr` is the pointer we passed when
+ // queuing the timer, so it is a `HrTimer<T>` embedded in a `T`.
+ let data_ptr = unsafe { T::timer_container_of(timer_ptr) };
+
+ // SAFETY:
+ // - As per the safety requirements of the trait `HrTimerHandle`, the
+ // `BoxHrTimerHandle` associated with this timer is guaranteed to
+ // be alive until this method returns. That handle owns the `T`
+ // behind `data_ptr` thus guaranteeing the validity of
+ // the reference created below.
+ // - As `data_ptr` comes from a `Pin<Box<T>>`, only pinned references to
+ // `data_ptr` exist.
+ let data_mut_ref = unsafe { Pin::new_unchecked(&mut *data_ptr) };
+
+ T::run(data_mut_ref).into_c()
+ }
+}
diff --git a/rust/kernel/tracepoint.rs b/rust/kernel/tracepoint.rs
new file mode 100644
index 000000000000..c6e80aa99e8e
--- /dev/null
+++ b/rust/kernel/tracepoint.rs
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Logic for tracepoints.
+
+/// Declare the Rust entry point for a tracepoint.
+///
+/// This macro generates an unsafe function that calls into C, and its safety requirements will be
+/// whatever the relevant C code requires. To document these safety requirements, you may add
+/// doc-comments when invoking the macro.
+#[macro_export]
+macro_rules! declare_trace {
+ ($($(#[$attr:meta])* $pub:vis unsafe fn $name:ident($($argname:ident : $argtyp:ty),* $(,)?);)*) => {$(
+ $( #[$attr] )*
+ #[inline(always)]
+ $pub unsafe fn $name($($argname : $argtyp),*) {
+ #[cfg(CONFIG_TRACEPOINTS)]
+ {
+ // SAFETY: It's always okay to query the static key for a tracepoint.
+ let should_trace = unsafe {
+ $crate::macros::paste! {
+ $crate::jump_label::static_branch_unlikely!(
+ $crate::bindings::[< __tracepoint_ $name >],
+ $crate::bindings::tracepoint,
+ key
+ )
+ }
+ };
+
+ if should_trace {
+ $crate::macros::paste! {
+ // SAFETY: The caller guarantees that it is okay to call this tracepoint.
+ unsafe { $crate::bindings::[< rust_do_trace_ $name >]($($argname),*) };
+ }
+ }
+ }
+
+ #[cfg(not(CONFIG_TRACEPOINTS))]
+ {
+ // If tracepoints are disabled, insert a trivial use of each argument
+ // to avoid unused argument warnings.
+ $( let _unused = $argname; )*
+ }
+ }
+ )*}
+}
+
+pub use declare_trace;
diff --git a/rust/kernel/transmute.rs b/rust/kernel/transmute.rs
new file mode 100644
index 000000000000..1c7d43771a37
--- /dev/null
+++ b/rust/kernel/transmute.rs
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Traits for transmuting types.
+
+/// Types for which any bit pattern is valid.
+///
+/// Not all types are valid for all values. For example, a `bool` must be either zero or one, so
+/// reading arbitrary bytes into something that contains a `bool` is not okay.
+///
+/// It's okay for the type to have padding, as initializing those bytes has no effect.
+///
+/// # Safety
+///
+/// All bit-patterns must be valid for this type. This type must not have interior mutability.
+pub unsafe trait FromBytes {}
+
+macro_rules! impl_frombytes {
+ ($($({$($generics:tt)*})? $t:ty, )*) => {
+ // SAFETY: Safety comments written in the macro invocation.
+ $(unsafe impl$($($generics)*)? FromBytes for $t {})*
+ };
+}
+
+impl_frombytes! {
+ // SAFETY: All bit patterns are acceptable values of the types below.
+ u8, u16, u32, u64, usize,
+ i8, i16, i32, i64, isize,
+
+ // SAFETY: If all bit patterns are acceptable for individual values in an array, then all bit
+ // patterns are also acceptable for arrays of that type.
+ {<T: FromBytes>} [T],
+ {<T: FromBytes, const N: usize>} [T; N],
+}
+
+/// Types that can be viewed as an immutable slice of initialized bytes.
+///
+/// If a struct implements this trait, then it is okay to copy it byte-for-byte to userspace. This
+/// means that it should not have any padding, as padding bytes are uninitialized. Reading
+/// uninitialized memory is not just undefined behavior, it may even lead to leaking sensitive
+/// information on the stack to userspace.
+///
+/// The struct should also not hold kernel pointers, as kernel pointer addresses are also considered
+/// sensitive. However, leaking kernel pointers is not considered undefined behavior by Rust, so
+/// this is a correctness requirement, but not a safety requirement.
+///
+/// # Safety
+///
+/// Values of this type may not contain any uninitialized bytes. This type must not have interior
+/// mutability.
+pub unsafe trait AsBytes {}
+
+macro_rules! impl_asbytes {
+ ($($({$($generics:tt)*})? $t:ty, )*) => {
+ // SAFETY: Safety comments written in the macro invocation.
+ $(unsafe impl$($($generics)*)? AsBytes for $t {})*
+ };
+}
+
+impl_asbytes! {
+ // SAFETY: Instances of the following types have no uninitialized portions.
+ u8, u16, u32, u64, usize,
+ i8, i16, i32, i64, isize,
+ bool,
+ char,
+ str,
+
+ // SAFETY: If individual values in an array have no uninitialized portions, then the array
+ // itself does not have any uninitialized portions either.
+ {<T: AsBytes>} [T],
+ {<T: AsBytes, const N: usize>} [T; N],
+}
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index aa77bad9bce4..eee387727d1a 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -2,15 +2,14 @@
//! Kernel types.
-use crate::init::{self, PinInit};
-use alloc::boxed::Box;
use core::{
cell::UnsafeCell,
marker::{PhantomData, PhantomPinned},
- mem::MaybeUninit,
+ mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
ptr::NonNull,
};
+use pin_init::{PinInit, Zeroable};
/// Used to transfer ownership to and from foreign (non-Rust) languages.
///
@@ -20,32 +19,34 @@ use core::{
/// This trait is meant to be used in cases when Rust objects are stored in C objects and
/// eventually "freed" back to Rust.
pub trait ForeignOwnable: Sized {
- /// Type of values borrowed between calls to [`ForeignOwnable::into_foreign`] and
- /// [`ForeignOwnable::from_foreign`].
+ /// Type used to immutably borrow a value that is currently foreign-owned.
type Borrowed<'a>;
- /// Converts a Rust-owned object to a foreign-owned one.
- ///
- /// The foreign representation is a pointer to void.
- fn into_foreign(self) -> *const core::ffi::c_void;
+ /// Type used to mutably borrow a value that is currently foreign-owned.
+ type BorrowedMut<'a>;
- /// Borrows a foreign-owned object.
+ /// Converts a Rust-owned object to a foreign-owned one.
///
- /// # Safety
+ /// The foreign representation is a pointer to void. There are no guarantees for this pointer.
+ /// For example, it might be invalid, dangling or pointing to uninitialized memory. Using it in
+ /// any way except for [`from_foreign`], [`try_from_foreign`], [`borrow`], or [`borrow_mut`] can
+ /// result in undefined behavior.
///
- /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for
- /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.
- unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Self::Borrowed<'a>;
+ /// [`from_foreign`]: Self::from_foreign
+ /// [`try_from_foreign`]: Self::try_from_foreign
+ /// [`borrow`]: Self::borrow
+ /// [`borrow_mut`]: Self::borrow_mut
+ fn into_foreign(self) -> *mut crate::ffi::c_void;
/// Converts a foreign-owned object back to a Rust-owned one.
///
/// # Safety
///
- /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for
- /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.
- /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] for
- /// this object must have been dropped.
- unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self;
+ /// The provided pointer must have been returned by a previous call to [`into_foreign`], and it
+ /// must not be passed to `from_foreign` more than once.
+ ///
+ /// [`into_foreign`]: Self::into_foreign
+ unsafe fn from_foreign(ptr: *mut crate::ffi::c_void) -> Self;
/// Tries to convert a foreign-owned object back to a Rust-owned one.
///
@@ -54,9 +55,10 @@ pub trait ForeignOwnable: Sized {
///
/// # Safety
///
- /// `ptr` must either be null or satisfy the safety requirements for
- /// [`ForeignOwnable::from_foreign`].
- unsafe fn try_from_foreign(ptr: *const core::ffi::c_void) -> Option<Self> {
+ /// `ptr` must either be null or satisfy the safety requirements for [`from_foreign`].
+ ///
+ /// [`from_foreign`]: Self::from_foreign
+ unsafe fn try_from_foreign(ptr: *mut crate::ffi::c_void) -> Option<Self> {
if ptr.is_null() {
None
} else {
@@ -65,40 +67,63 @@ pub trait ForeignOwnable: Sized {
unsafe { Some(Self::from_foreign(ptr)) }
}
}
-}
-
-impl<T: 'static> ForeignOwnable for Box<T> {
- type Borrowed<'a> = &'a T;
- fn into_foreign(self) -> *const core::ffi::c_void {
- Box::into_raw(self) as _
- }
-
- unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> &'a T {
- // SAFETY: The safety requirements for this function ensure that the object is still alive,
- // so it is safe to dereference the raw pointer.
- // The safety requirements of `from_foreign` also ensure that the object remains alive for
- // the lifetime of the returned value.
- unsafe { &*ptr.cast() }
- }
+ /// Borrows a foreign-owned object immutably.
+ ///
+ /// This method provides a way to access a foreign-owned value from Rust immutably. It provides
+ /// you with exactly the same abilities as an `&Self` when the value is Rust-owned.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must have been returned by a previous call to [`into_foreign`], and if
+ /// the pointer is ever passed to [`from_foreign`], then that call must happen after the end of
+ /// the lifetime `'a`.
+ ///
+ /// [`into_foreign`]: Self::into_foreign
+ /// [`from_foreign`]: Self::from_foreign
+ unsafe fn borrow<'a>(ptr: *mut crate::ffi::c_void) -> Self::Borrowed<'a>;
- unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
- // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
- // call to `Self::into_foreign`.
- unsafe { Box::from_raw(ptr as _) }
- }
+ /// Borrows a foreign-owned object mutably.
+ ///
+ /// This method provides a way to access a foreign-owned value from Rust mutably. It provides
+ /// you with exactly the same abilities as an `&mut Self` when the value is Rust-owned, except
+ /// that the address of the object must not be changed.
+ ///
+ /// Note that for types like [`Arc`], an `&mut Arc<T>` only gives you immutable access to the
+ /// inner value, so this method also only provides immutable access in that case.
+ ///
+ /// In the case of `Box<T>`, this method gives you the ability to modify the inner `T`, but it
+ /// does not let you change the box itself. That is, you cannot change which allocation the box
+ /// points at.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must have been returned by a previous call to [`into_foreign`], and if
+ /// the pointer is ever passed to [`from_foreign`], then that call must happen after the end of
+ /// the lifetime `'a`.
+ ///
+ /// The lifetime `'a` must not overlap with the lifetime of any other call to [`borrow`] or
+ /// `borrow_mut` on the same object.
+ ///
+ /// [`into_foreign`]: Self::into_foreign
+ /// [`from_foreign`]: Self::from_foreign
+ /// [`borrow`]: Self::borrow
+ /// [`Arc`]: crate::sync::Arc
+ unsafe fn borrow_mut<'a>(ptr: *mut crate::ffi::c_void) -> Self::BorrowedMut<'a>;
}
impl ForeignOwnable for () {
type Borrowed<'a> = ();
+ type BorrowedMut<'a> = ();
- fn into_foreign(self) -> *const core::ffi::c_void {
+ fn into_foreign(self) -> *mut crate::ffi::c_void {
core::ptr::NonNull::dangling().as_ptr()
}
- unsafe fn borrow<'a>(_: *const core::ffi::c_void) -> Self::Borrowed<'a> {}
+ unsafe fn from_foreign(_: *mut crate::ffi::c_void) -> Self {}
- unsafe fn from_foreign(_: *const core::ffi::c_void) -> Self {}
+ unsafe fn borrow<'a>(_: *mut crate::ffi::c_void) -> Self::Borrowed<'a> {}
+ unsafe fn borrow_mut<'a>(_: *mut crate::ffi::c_void) -> Self::BorrowedMut<'a> {}
}
/// Runs a cleanup function/closure when dropped.
@@ -155,13 +180,13 @@ impl ForeignOwnable for () {
/// # use kernel::types::ScopeGuard;
/// fn example3(arg: bool) -> Result {
/// let mut vec =
-/// ScopeGuard::new_with_data(Vec::new(), |v| pr_info!("vec had {} elements\n", v.len()));
+/// ScopeGuard::new_with_data(KVec::new(), |v| pr_info!("vec had {} elements\n", v.len()));
///
-/// vec.try_push(10u8)?;
+/// vec.push(10u8, GFP_KERNEL)?;
/// if arg {
/// return Ok(());
/// }
-/// vec.try_push(20u8)?;
+/// vec.push(20u8, GFP_KERNEL)?;
/// Ok(())
/// }
///
@@ -195,7 +220,7 @@ impl<T, F: FnOnce(T)> ScopeGuard<T, F> {
impl ScopeGuard<(), fn(())> {
/// Creates a new guarded object with the given cleanup function.
pub fn new(cleanup: impl FnOnce()) -> ScopeGuard<(), impl FnOnce(())> {
- ScopeGuard::new_with_data((), move |_| cleanup())
+ ScopeGuard::new_with_data((), move |()| cleanup())
}
}
@@ -226,13 +251,67 @@ impl<T, F: FnOnce(T)> Drop for ScopeGuard<T, F> {
/// Stores an opaque value.
///
-/// This is meant to be used with FFI objects that are never interpreted by Rust code.
+/// [`Opaque<T>`] is meant to be used with FFI objects that are never interpreted by Rust code.
+///
+/// It is used to wrap structs from the C side, like for example `Opaque<bindings::mutex>`.
+/// It gets rid of all the usual assumptions that Rust has for a value:
+///
+/// * The value is allowed to be uninitialized (for example have invalid bit patterns: `3` for a
+/// [`bool`]).
+/// * The value is allowed to be mutated, when a `&Opaque<T>` exists on the Rust side.
+/// * No uniqueness for mutable references: it is fine to have multiple `&mut Opaque<T>` point to
+/// the same value.
+/// * The value is not allowed to be shared with other threads (i.e. it is `!Sync`).
+///
+/// This has to be used for all values that the C side has access to, because it can't be ensured
+/// that the C side is adhering to the usual constraints that Rust needs.
+///
+/// Using [`Opaque<T>`] allows to continue to use references on the Rust side even for values shared
+/// with C.
+///
+/// # Examples
+///
+/// ```
+/// # #![expect(unreachable_pub, clippy::disallowed_names)]
+/// use kernel::types::Opaque;
+/// # // Emulate a C struct binding which is from C, maybe uninitialized or not, only the C side
+/// # // knows.
+/// # mod bindings {
+/// # pub struct Foo {
+/// # pub val: u8,
+/// # }
+/// # }
+///
+/// // `foo.val` is assumed to be handled on the C side, so we use `Opaque` to wrap it.
+/// pub struct Foo {
+/// foo: Opaque<bindings::Foo>,
+/// }
+///
+/// impl Foo {
+/// pub fn get_val(&self) -> u8 {
+/// let ptr = Opaque::get(&self.foo);
+///
+/// // SAFETY: `Self` is valid from C side.
+/// unsafe { (*ptr).val }
+/// }
+/// }
+///
+/// // Create an instance of `Foo` with the `Opaque` wrapper.
+/// let foo = Foo {
+/// foo: Opaque::new(bindings::Foo { val: 0xdb }),
+/// };
+///
+/// assert_eq!(foo.get_val(), 0xdb);
+/// ```
#[repr(transparent)]
pub struct Opaque<T> {
value: UnsafeCell<MaybeUninit<T>>,
_pin: PhantomPinned,
}
+// SAFETY: `Opaque<T>` allows the inner value to be any bit pattern, including all zeros.
+unsafe impl<T> Zeroable for Opaque<T> {}
+
impl<T> Opaque<T> {
/// Creates a new opaque value.
pub const fn new(value: T) -> Self {
@@ -250,6 +329,25 @@ impl<T> Opaque<T> {
}
}
+ /// Creates a new zeroed opaque value.
+ pub const fn zeroed() -> Self {
+ Self {
+ value: UnsafeCell::new(MaybeUninit::zeroed()),
+ _pin: PhantomPinned,
+ }
+ }
+
+ /// Create an opaque pin-initializer from the given pin-initializer.
+ pub fn pin_init(slot: impl PinInit<T>) -> impl PinInit<Self> {
+ Self::ffi_init(|ptr: *mut T| {
+ // SAFETY:
+ // - `ptr` is a valid pointer to uninitialized memory,
+ // - `slot` is not accessed on error; the call is infallible,
+ // - `slot` is pinned in memory.
+ let _ = unsafe { PinInit::<T>::__pinned_init(slot, ptr) };
+ })
+ }
+
/// Creates a pin-initializer from the given initializer closure.
///
/// The returned initializer calls the given closure with the pointer to the inner `T` of this
@@ -262,15 +360,33 @@ impl<T> Opaque<T> {
// SAFETY: We contain a `MaybeUninit`, so it is OK for the `init_func` to not fully
// initialize the `T`.
unsafe {
- init::pin_init_from_closure::<_, ::core::convert::Infallible>(move |slot| {
+ pin_init::pin_init_from_closure::<_, ::core::convert::Infallible>(move |slot| {
init_func(Self::raw_get(slot));
Ok(())
})
}
}
+ /// Creates a fallible pin-initializer from the given initializer closure.
+ ///
+ /// The returned initializer calls the given closure with the pointer to the inner `T` of this
+ /// `Opaque`. Since this memory is uninitialized, the closure is not allowed to read from it.
+ ///
+ /// This function is safe, because the `T` inside of an `Opaque` is allowed to be
+ /// uninitialized. Additionally, access to the inner `T` requires `unsafe`, so the caller needs
+ /// to verify at that point that the inner value is valid.
+ pub fn try_ffi_init<E>(
+ init_func: impl FnOnce(*mut T) -> Result<(), E>,
+ ) -> impl PinInit<Self, E> {
+ // SAFETY: We contain a `MaybeUninit`, so it is OK for the `init_func` to not fully
+ // initialize the `T`.
+ unsafe {
+ pin_init::pin_init_from_closure::<_, E>(move |slot| init_func(Self::raw_get(slot)))
+ }
+ }
+
/// Returns a raw pointer to the opaque data.
- pub fn get(&self) -> *mut T {
+ pub const fn get(&self) -> *mut T {
UnsafeCell::get(&self.value).cast::<T>()
}
@@ -366,6 +482,37 @@ impl<T: AlwaysRefCounted> ARef<T> {
_p: PhantomData,
}
}
+
+ /// Consumes the `ARef`, returning a raw pointer.
+ ///
+ /// This function does not change the refcount. After calling this function, the caller is
+ /// responsible for the refcount previously managed by the `ARef`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use core::ptr::NonNull;
+ /// use kernel::types::{ARef, AlwaysRefCounted};
+ ///
+ /// struct Empty {}
+ ///
+ /// # // SAFETY: TODO.
+ /// unsafe impl AlwaysRefCounted for Empty {
+ /// fn inc_ref(&self) {}
+ /// unsafe fn dec_ref(_obj: NonNull<Self>) {}
+ /// }
+ ///
+ /// let mut data = Empty {};
+ /// let ptr = NonNull::<Empty>::new(&mut data).unwrap();
+ /// # // SAFETY: TODO.
+ /// let data_ref: ARef<Empty> = unsafe { ARef::from_raw(ptr) };
+ /// let raw_ptr: NonNull<Empty> = ARef::into_raw(data_ref);
+ ///
+ /// assert_eq!(ptr, raw_ptr);
+ /// ```
+ pub fn into_raw(me: Self) -> NonNull<T> {
+ ManuallyDrop::new(me).ptr
+ }
}
impl<T: AlwaysRefCounted> Clone for ARef<T> {
@@ -402,6 +549,15 @@ impl<T: AlwaysRefCounted> Drop for ARef<T> {
}
/// A sum type that always holds either a value of type `L` or `R`.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::types::Either;
+///
+/// let left_value: Either<i32, &str> = Either::Left(7);
+/// let right_value: Either<i32, &str> = Either::Right("right value");
+/// ```
pub enum Either<L, R> {
/// Constructs an instance of [`Either`] containing a value of type `L`.
Left(L),
@@ -409,3 +565,24 @@ pub enum Either<L, R> {
/// Constructs an instance of [`Either`] containing a value of type `R`.
Right(R),
}
+
+/// Zero-sized type to mark types not [`Send`].
+///
+/// Add this type as a field to your struct if your type should not be sent to a different task.
+/// Since [`Send`] is an auto trait, adding a single field that is `!Send` will ensure that the
+/// whole type is `!Send`.
+///
+/// If a type is `!Send` it is impossible to give control over an instance of the type to another
+/// task. This is useful to include in types that store or reference task-local information. A file
+/// descriptor is an example of such task-local information.
+///
+/// This type also makes the type `!Sync`, which prevents immutable access to the value from
+/// several threads in parallel.
+pub type NotThreadSafe = PhantomData<*mut ()>;
+
+/// Used to construct instances of type [`NotThreadSafe`] similar to how `PhantomData` is
+/// constructed.
+///
+/// [`NotThreadSafe`]: type@NotThreadSafe
+#[allow(non_upper_case_globals)]
+pub const NotThreadSafe: NotThreadSafe = PhantomData;
diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs
new file mode 100644
index 000000000000..80a9782b1c6e
--- /dev/null
+++ b/rust/kernel/uaccess.rs
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Slices to user space memory regions.
+//!
+//! C header: [`include/linux/uaccess.h`](srctree/include/linux/uaccess.h)
+
+use crate::{
+ alloc::{Allocator, Flags},
+ bindings,
+ error::Result,
+ ffi::c_void,
+ prelude::*,
+ transmute::{AsBytes, FromBytes},
+};
+use core::mem::{size_of, MaybeUninit};
+
+/// The type used for userspace addresses.
+pub type UserPtr = usize;
+
+/// A pointer to an area in userspace memory, which can be either read-only or read-write.
+///
+/// All methods on this struct are safe: attempting to read or write on bad addresses (either out of
+/// the bound of the slice or unmapped addresses) will return [`EFAULT`]. Concurrent access,
+/// *including data races to/from userspace memory*, is permitted, because fundamentally another
+/// userspace thread/process could always be modifying memory at the same time (in the same way that
+/// userspace Rust's [`std::io`] permits data races with the contents of files on disk). In the
+/// presence of a race, the exact byte values read/written are unspecified but the operation is
+/// well-defined. Kernelspace code should validate its copy of data after completing a read, and not
+/// expect that multiple reads of the same address will return the same value.
+///
+/// These APIs are designed to make it difficult to accidentally write TOCTOU (time-of-check to
+/// time-of-use) bugs. Every time a memory location is read, the reader's position is advanced by
+/// the read length and the next read will start from there. This helps prevent accidentally reading
+/// the same location twice and causing a TOCTOU bug.
+///
+/// Creating a [`UserSliceReader`] and/or [`UserSliceWriter`] consumes the `UserSlice`, helping
+/// ensure that there aren't multiple readers or writers to the same location.
+///
+/// If double-fetching a memory location is necessary for some reason, then that is done by creating
+/// multiple readers to the same memory location, e.g. using [`clone_reader`].
+///
+/// # Examples
+///
+/// Takes a region of userspace memory from the current process, and modify it by adding one to
+/// every byte in the region.
+///
+/// ```no_run
+/// use kernel::ffi::c_void;
+/// use kernel::error::Result;
+/// use kernel::uaccess::{UserPtr, UserSlice};
+///
+/// fn bytes_add_one(uptr: UserPtr, len: usize) -> Result<()> {
+/// let (read, mut write) = UserSlice::new(uptr, len).reader_writer();
+///
+/// let mut buf = KVec::new();
+/// read.read_all(&mut buf, GFP_KERNEL)?;
+///
+/// for b in &mut buf {
+/// *b = b.wrapping_add(1);
+/// }
+///
+/// write.write_slice(&buf)?;
+/// Ok(())
+/// }
+/// ```
+///
+/// Example illustrating a TOCTOU (time-of-check to time-of-use) bug.
+///
+/// ```no_run
+/// use kernel::ffi::c_void;
+/// use kernel::error::{code::EINVAL, Result};
+/// use kernel::uaccess::{UserPtr, UserSlice};
+///
+/// /// Returns whether the data in this region is valid.
+/// fn is_valid(uptr: UserPtr, len: usize) -> Result<bool> {
+/// let read = UserSlice::new(uptr, len).reader();
+///
+/// let mut buf = KVec::new();
+/// read.read_all(&mut buf, GFP_KERNEL)?;
+///
+/// todo!()
+/// }
+///
+/// /// Returns the bytes behind this user pointer if they are valid.
+/// fn get_bytes_if_valid(uptr: UserPtr, len: usize) -> Result<KVec<u8>> {
+/// if !is_valid(uptr, len)? {
+/// return Err(EINVAL);
+/// }
+///
+/// let read = UserSlice::new(uptr, len).reader();
+///
+/// let mut buf = KVec::new();
+/// read.read_all(&mut buf, GFP_KERNEL)?;
+///
+/// // THIS IS A BUG! The bytes could have changed since we checked them.
+/// //
+/// // To avoid this kind of bug, don't call `UserSlice::new` multiple
+/// // times with the same address.
+/// Ok(buf)
+/// }
+/// ```
+///
+/// [`std::io`]: https://doc.rust-lang.org/std/io/index.html
+/// [`clone_reader`]: UserSliceReader::clone_reader
+pub struct UserSlice {
+ ptr: UserPtr,
+ length: usize,
+}
+
+impl UserSlice {
+ /// Constructs a user slice from a raw pointer and a length in bytes.
+ ///
+ /// Constructing a [`UserSlice`] performs no checks on the provided address and length, it can
+ /// safely be constructed inside a kernel thread with no current userspace process. Reads and
+ /// writes wrap the kernel APIs `copy_from_user` and `copy_to_user`, which check the memory map
+ /// of the current process and enforce that the address range is within the user range (no
+ /// additional calls to `access_ok` are needed). Validity of the pointer is checked when you
+ /// attempt to read or write, not in the call to `UserSlice::new`.
+ ///
+ /// Callers must be careful to avoid time-of-check-time-of-use (TOCTOU) issues. The simplest way
+ /// is to create a single instance of [`UserSlice`] per user memory block as it reads each byte
+ /// at most once.
+ pub fn new(ptr: UserPtr, length: usize) -> Self {
+ UserSlice { ptr, length }
+ }
+
+ /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address.
+ pub fn read_all<A: Allocator>(self, buf: &mut Vec<u8, A>, flags: Flags) -> Result {
+ self.reader().read_all(buf, flags)
+ }
+
+ /// Constructs a [`UserSliceReader`].
+ pub fn reader(self) -> UserSliceReader {
+ UserSliceReader {
+ ptr: self.ptr,
+ length: self.length,
+ }
+ }
+
+ /// Constructs a [`UserSliceWriter`].
+ pub fn writer(self) -> UserSliceWriter {
+ UserSliceWriter {
+ ptr: self.ptr,
+ length: self.length,
+ }
+ }
+
+ /// Constructs both a [`UserSliceReader`] and a [`UserSliceWriter`].
+ ///
+ /// Usually when this is used, you will first read the data, and then overwrite it afterwards.
+ pub fn reader_writer(self) -> (UserSliceReader, UserSliceWriter) {
+ (
+ UserSliceReader {
+ ptr: self.ptr,
+ length: self.length,
+ },
+ UserSliceWriter {
+ ptr: self.ptr,
+ length: self.length,
+ },
+ )
+ }
+}
+
+/// A reader for [`UserSlice`].
+///
+/// Used to incrementally read from the user slice.
+pub struct UserSliceReader {
+ ptr: UserPtr,
+ length: usize,
+}
+
+impl UserSliceReader {
+ /// Skip the provided number of bytes.
+ ///
+ /// Returns an error if skipping more than the length of the buffer.
+ pub fn skip(&mut self, num_skip: usize) -> Result {
+ // Update `self.length` first since that's the fallible part of this operation.
+ self.length = self.length.checked_sub(num_skip).ok_or(EFAULT)?;
+ self.ptr = self.ptr.wrapping_add(num_skip);
+ Ok(())
+ }
+
+ /// Create a reader that can access the same range of data.
+ ///
+ /// Reading from the clone does not advance the current reader.
+ ///
+ /// The caller should take care to not introduce TOCTOU issues, as described in the
+ /// documentation for [`UserSlice`].
+ pub fn clone_reader(&self) -> UserSliceReader {
+ UserSliceReader {
+ ptr: self.ptr,
+ length: self.length,
+ }
+ }
+
+ /// Returns the number of bytes left to be read from this reader.
+ ///
+ /// Note that even reading less than this number of bytes may fail.
+ pub fn len(&self) -> usize {
+ self.length
+ }
+
+ /// Returns `true` if no data is available in the io buffer.
+ pub fn is_empty(&self) -> bool {
+ self.length == 0
+ }
+
+ /// Reads raw data from the user slice into a kernel buffer.
+ ///
+ /// For a version that uses `&mut [u8]`, please see [`UserSliceReader::read_slice`].
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
+ /// bounds of this [`UserSliceReader`]. This call may modify `out` even if it returns an error.
+ ///
+ /// # Guarantees
+ ///
+ /// After a successful call to this method, all bytes in `out` are initialized.
+ pub fn read_raw(&mut self, out: &mut [MaybeUninit<u8>]) -> Result {
+ let len = out.len();
+ let out_ptr = out.as_mut_ptr().cast::<c_void>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ // SAFETY: `out_ptr` points into a mutable slice of length `len`, so we may write
+ // that many bytes to it.
+ let res = unsafe { bindings::copy_from_user(out_ptr, self.ptr as *const c_void, len) };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ Ok(())
+ }
+
+ /// Reads raw data from the user slice into a kernel buffer.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
+ /// bounds of this [`UserSliceReader`]. This call may modify `out` even if it returns an error.
+ pub fn read_slice(&mut self, out: &mut [u8]) -> Result {
+ // SAFETY: The types are compatible and `read_raw` doesn't write uninitialized bytes to
+ // `out`.
+ let out = unsafe { &mut *(out as *mut [u8] as *mut [MaybeUninit<u8>]) };
+ self.read_raw(out)
+ }
+
+ /// Reads a value of the specified type.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
+ /// bounds of this [`UserSliceReader`].
+ pub fn read<T: FromBytes>(&mut self) -> Result<T> {
+ let len = size_of::<T>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let mut out: MaybeUninit<T> = MaybeUninit::uninit();
+ // SAFETY: The local variable `out` is valid for writing `size_of::<T>()` bytes.
+ //
+ // By using the _copy_from_user variant, we skip the check_object_size check that verifies
+ // the kernel pointer. This mirrors the logic on the C side that skips the check when the
+ // length is a compile-time constant.
+ let res = unsafe {
+ bindings::_copy_from_user(
+ out.as_mut_ptr().cast::<c_void>(),
+ self.ptr as *const c_void,
+ len,
+ )
+ };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ // SAFETY: The read above has initialized all bytes in `out`, and since `T` implements
+ // `FromBytes`, any bit-pattern is a valid value for this type.
+ Ok(unsafe { out.assume_init() })
+ }
+
+ /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address.
+ pub fn read_all<A: Allocator>(mut self, buf: &mut Vec<u8, A>, flags: Flags) -> Result {
+ let len = self.length;
+ buf.reserve(len, flags)?;
+
+ // The call to `reserve` was successful, so the spare capacity is at least `len` bytes long.
+ self.read_raw(&mut buf.spare_capacity_mut()[..len])?;
+
+ // SAFETY: Since the call to `read_raw` was successful, so the next `len` bytes of the
+ // vector have been initialized.
+ unsafe { buf.set_len(buf.len() + len) };
+ Ok(())
+ }
+}
+
+/// A writer for [`UserSlice`].
+///
+/// Used to incrementally write into the user slice.
+pub struct UserSliceWriter {
+ ptr: UserPtr,
+ length: usize,
+}
+
+impl UserSliceWriter {
+ /// Returns the amount of space remaining in this buffer.
+ ///
+ /// Note that even writing less than this number of bytes may fail.
+ pub fn len(&self) -> usize {
+ self.length
+ }
+
+ /// Returns `true` if no more data can be written to this buffer.
+ pub fn is_empty(&self) -> bool {
+ self.length == 0
+ }
+
+ /// Writes raw data to this user pointer from a kernel buffer.
+ ///
+ /// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
+ /// bounds of this [`UserSliceWriter`]. This call may modify the associated userspace slice even
+ /// if it returns an error.
+ pub fn write_slice(&mut self, data: &[u8]) -> Result {
+ let len = data.len();
+ let data_ptr = data.as_ptr().cast::<c_void>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ // SAFETY: `data_ptr` points into an immutable slice of length `len`, so we may read
+ // that many bytes from it.
+ let res = unsafe { bindings::copy_to_user(self.ptr as *mut c_void, data_ptr, len) };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ Ok(())
+ }
+
+ /// Writes the provided Rust value to this userspace pointer.
+ ///
+ /// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
+ /// bounds of this [`UserSliceWriter`]. This call may modify the associated userspace slice even
+ /// if it returns an error.
+ pub fn write<T: AsBytes>(&mut self, value: &T) -> Result {
+ let len = size_of::<T>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ // SAFETY: The reference points to a value of type `T`, so it is valid for reading
+ // `size_of::<T>()` bytes.
+ //
+ // By using the _copy_to_user variant, we skip the check_object_size check that verifies the
+ // kernel pointer. This mirrors the logic on the C side that skips the check when the length
+ // is a compile-time constant.
+ let res = unsafe {
+ bindings::_copy_to_user(
+ self.ptr as *mut c_void,
+ (value as *const T).cast::<c_void>(),
+ len,
+ )
+ };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ Ok(())
+ }
+}
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
index 480cb292e7c2..f98bd02b838f 100644
--- a/rust/kernel/workqueue.rs
+++ b/rust/kernel/workqueue.rs
@@ -33,7 +33,6 @@
//! we do not need to specify ids for the fields.
//!
//! ```
-//! use kernel::prelude::*;
//! use kernel::sync::Arc;
//! use kernel::workqueue::{self, impl_has_work, new_work, Work, WorkItem};
//!
@@ -53,7 +52,7 @@
//! Arc::pin_init(pin_init!(MyStruct {
//! value,
//! work <- new_work!("MyStruct::work"),
-//! }))
+//! }), GFP_KERNEL)
//! }
//! }
//!
@@ -61,7 +60,7 @@
//! type Pointer = Arc<MyStruct>;
//!
//! fn run(this: Arc<MyStruct>) {
-//! pr_info!("The value is: {}", this.value);
+//! pr_info!("The value is: {}\n", this.value);
//! }
//! }
//!
@@ -70,12 +69,12 @@
//! fn print_later(val: Arc<MyStruct>) {
//! let _ = workqueue::system().enqueue(val);
//! }
+//! # print_later(MyStruct::new(42).unwrap());
//! ```
//!
//! The following example shows how multiple `work_struct` fields can be used:
//!
//! ```
-//! use kernel::prelude::*;
//! use kernel::sync::Arc;
//! use kernel::workqueue::{self, impl_has_work, new_work, Work, WorkItem};
//!
@@ -101,7 +100,7 @@
//! value_2,
//! work_1 <- new_work!("MyStruct::work_1"),
//! work_2 <- new_work!("MyStruct::work_2"),
-//! }))
+//! }), GFP_KERNEL)
//! }
//! }
//!
@@ -109,7 +108,7 @@
//! type Pointer = Arc<MyStruct>;
//!
//! fn run(this: Arc<MyStruct>) {
-//! pr_info!("The value is: {}", this.value_1);
+//! pr_info!("The value is: {}\n", this.value_1);
//! }
//! }
//!
@@ -117,7 +116,7 @@
//! type Pointer = Arc<MyStruct>;
//!
//! fn run(this: Arc<MyStruct>) {
-//! pr_info!("The second value is: {}", this.value_2);
+//! pr_info!("The second value is: {}\n", this.value_2);
//! }
//! }
//!
@@ -128,15 +127,15 @@
//! fn print_2_later(val: Arc<MyStruct>) {
//! let _ = workqueue::system().enqueue::<Arc<MyStruct>, 2>(val);
//! }
+//! # print_1_later(MyStruct::new(24, 25).unwrap());
+//! # print_2_later(MyStruct::new(41, 42).unwrap());
//! ```
//!
//! C header: [`include/linux/workqueue.h`](srctree/include/linux/workqueue.h)
-use crate::{bindings, prelude::*, sync::Arc, sync::LockClassKey, types::Opaque};
-use alloc::alloc::AllocError;
-use alloc::boxed::Box;
+use crate::alloc::{AllocError, Flags};
+use crate::{prelude::*, sync::Arc, sync::LockClassKey, types::Opaque};
use core::marker::PhantomData;
-use core::pin::Pin;
/// Creates a [`Work`] initialiser with the given name and a newly-created lock class.
#[macro_export]
@@ -210,13 +209,17 @@ impl Queue {
/// Tries to spawn the given function or closure as a work item.
///
/// This method can fail because it allocates memory to store the work item.
- pub fn try_spawn<T: 'static + Send + FnOnce()>(&self, func: T) -> Result<(), AllocError> {
+ pub fn try_spawn<T: 'static + Send + FnOnce()>(
+ &self,
+ flags: Flags,
+ func: T,
+ ) -> Result<(), AllocError> {
let init = pin_init!(ClosureWork {
work <- new_work!("Queue::try_spawn"),
func: Some(func),
});
- self.enqueue(Box::pin_init(init).map_err(|_| AllocError)?);
+ self.enqueue(KBox::pin_init(init, flags).map_err(|_| AllocError)?);
Ok(())
}
}
@@ -239,9 +242,9 @@ impl<T> ClosureWork<T> {
}
impl<T: FnOnce()> WorkItem for ClosureWork<T> {
- type Pointer = Pin<Box<Self>>;
+ type Pointer = Pin<KBox<Self>>;
- fn run(mut this: Pin<Box<Self>>) {
+ fn run(mut this: Pin<KBox<Self>>) {
if let Some(func) = this.as_mut().project().take() {
(func)()
}
@@ -297,7 +300,7 @@ pub unsafe trait RawWorkItem<const ID: u64> {
/// Defines the method that should be called directly when a work item is executed.
///
-/// This trait is implemented by `Pin<Box<T>>` and [`Arc<T>`], and is mainly intended to be
+/// This trait is implemented by `Pin<KBox<T>>` and [`Arc<T>`], and is mainly intended to be
/// implemented for smart pointer types. For your own structs, you would implement [`WorkItem`]
/// instead. The [`run`] method on this trait will usually just perform the appropriate
/// `container_of` translation and then call into the [`run`][WorkItem::run] method from the
@@ -329,7 +332,7 @@ pub unsafe trait WorkItemPointer<const ID: u64>: RawWorkItem<ID> {
/// This trait is used when the `work_struct` field is defined using the [`Work`] helper.
pub trait WorkItem<const ID: u64 = 0> {
/// The pointer type that this struct is wrapped in. This will typically be `Arc<Self>` or
- /// `Pin<Box<Self>>`.
+ /// `Pin<KBox<Self>>`.
type Pointer: WorkItemPointer<ID>;
/// The method that should be called when this work item is executed.
@@ -346,8 +349,10 @@ pub trait WorkItem<const ID: u64 = 0> {
/// This is a helper type used to associate a `work_struct` with the [`WorkItem`] that uses it.
///
/// [`run`]: WorkItemPointer::run
+#[pin_data]
#[repr(transparent)]
pub struct Work<T: ?Sized, const ID: u64 = 0> {
+ #[pin]
work: Opaque<bindings::work_struct>,
_inner: PhantomData<T>,
}
@@ -364,26 +369,26 @@ unsafe impl<T: ?Sized, const ID: u64> Sync for Work<T, ID> {}
impl<T: ?Sized, const ID: u64> Work<T, ID> {
/// Creates a new instance of [`Work`].
#[inline]
- #[allow(clippy::new_ret_no_self)]
- pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self>
+ pub fn new(name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self>
where
T: WorkItem<ID>,
{
- // SAFETY: The `WorkItemPointer` implementation promises that `run` can be used as the work
- // item function.
- unsafe {
- kernel::init::pin_init_from_closure(move |slot| {
- let slot = Self::raw_get(slot);
- bindings::init_work_with_key(
- slot,
- Some(T::Pointer::run),
- false,
- name.as_char_ptr(),
- key.as_ptr(),
- );
- Ok(())
- })
- }
+ pin_init!(Self {
+ work <- Opaque::ffi_init(|slot| {
+ // SAFETY: The `WorkItemPointer` implementation promises that `run` can be used as
+ // the work item function.
+ unsafe {
+ bindings::init_work_with_key(
+ slot,
+ Some(T::Pointer::run),
+ false,
+ name.as_char_ptr(),
+ key.as_ptr(),
+ )
+ }
+ }),
+ _inner: PhantomData,
+ })
}
/// Get a pointer to the inner `work_struct`.
@@ -408,7 +413,6 @@ impl<T: ?Sized, const ID: u64> Work<T, ID> {
/// like this:
///
/// ```no_run
-/// use kernel::prelude::*;
/// use kernel::workqueue::{impl_has_work, Work};
///
/// struct MyWorkItem {
@@ -480,24 +484,26 @@ pub unsafe trait HasWork<T, const ID: u64 = 0> {
/// use kernel::sync::Arc;
/// use kernel::workqueue::{self, impl_has_work, Work};
///
-/// struct MyStruct {
-/// work_field: Work<MyStruct, 17>,
+/// struct MyStruct<'a, T, const N: usize> {
+/// work_field: Work<MyStruct<'a, T, N>, 17>,
+/// f: fn(&'a [T; N]),
/// }
///
/// impl_has_work! {
-/// impl HasWork<MyStruct, 17> for MyStruct { self.work_field }
+/// impl{'a, T, const N: usize} HasWork<MyStruct<'a, T, N>, 17>
+/// for MyStruct<'a, T, N> { self.work_field }
/// }
/// ```
#[macro_export]
macro_rules! impl_has_work {
- ($(impl$(<$($implarg:ident),*>)?
+ ($(impl$({$($generics:tt)*})?
HasWork<$work_type:ty $(, $id:tt)?>
- for $self:ident $(<$($selfarg:ident),*>)?
+ for $self:ty
{ self.$field:ident }
)*) => {$(
// SAFETY: The implementation of `raw_get_work` only compiles if the field has the right
// type.
- unsafe impl$(<$($implarg),*>)? $crate::workqueue::HasWork<$work_type $(, $id)?> for $self $(<$($selfarg),*>)? {
+ unsafe impl$(<$($generics)+>)? $crate::workqueue::HasWork<$work_type $(, $id)?> for $self {
const OFFSET: usize = ::core::mem::offset_of!(Self, $field) as usize;
#[inline]
@@ -513,16 +519,25 @@ macro_rules! impl_has_work {
pub use impl_has_work;
impl_has_work! {
- impl<T> HasWork<Self> for ClosureWork<T> { self.work }
+ impl{T} HasWork<Self> for ClosureWork<T> { self.work }
}
+// SAFETY: The `__enqueue` implementation in RawWorkItem uses a `work_struct` initialized with the
+// `run` method of this trait as the function pointer because:
+// - `__enqueue` gets the `work_struct` from the `Work` field, using `T::raw_get_work`.
+// - The only safe way to create a `Work` object is through `Work::new`.
+// - `Work::new` makes sure that `T::Pointer::run` is passed to `init_work_with_key`.
+// - Finally `Work` and `RawWorkItem` guarantee that the correct `Work` field
+// will be used because of the ID const generic bound. This makes sure that `T::raw_get_work`
+// uses the correct offset for the `Work` field, and `Work::new` picks the correct
+// implementation of `WorkItemPointer` for `Arc<T>`.
unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T>
where
T: WorkItem<ID, Pointer = Self>,
T: HasWork<T, ID>,
{
unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
- // SAFETY: The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
+ // The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
let ptr = ptr as *mut Work<T, ID>;
// SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.
let ptr = unsafe { T::work_container_of(ptr) };
@@ -533,6 +548,13 @@ where
}
}
+// SAFETY: The `work_struct` raw pointer is guaranteed to be valid for the duration of the call to
+// the closure because we get it from an `Arc`, which means that the ref count will be at least 1,
+// and we don't drop the `Arc` ourselves. If `queue_work_on` returns true, it is further guaranteed
+// to be valid until a call to the function pointer in `work_struct` because we leak the memory it
+// points to, and only reclaim it if the closure returns false, or in `WorkItemPointer::run`, which
+// is what the function pointer in the `work_struct` must be pointing to, according to the safety
+// requirements of `WorkItemPointer`.
unsafe impl<T, const ID: u64> RawWorkItem<ID> for Arc<T>
where
T: WorkItem<ID, Pointer = Self>,
@@ -561,18 +583,19 @@ where
}
}
-unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<Box<T>>
+// SAFETY: TODO.
+unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<KBox<T>>
where
T: WorkItem<ID, Pointer = Self>,
T: HasWork<T, ID>,
{
unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
- // SAFETY: The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
+ // The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
let ptr = ptr as *mut Work<T, ID>;
// SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.
let ptr = unsafe { T::work_container_of(ptr) };
// SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership.
- let boxed = unsafe { Box::from_raw(ptr) };
+ let boxed = unsafe { KBox::from_raw(ptr) };
// SAFETY: The box was already pinned when it was enqueued.
let pinned = unsafe { Pin::new_unchecked(boxed) };
@@ -580,7 +603,8 @@ where
}
}
-unsafe impl<T, const ID: u64> RawWorkItem<ID> for Pin<Box<T>>
+// SAFETY: TODO.
+unsafe impl<T, const ID: u64> RawWorkItem<ID> for Pin<KBox<T>>
where
T: WorkItem<ID, Pointer = Self>,
T: HasWork<T, ID>,
@@ -594,9 +618,9 @@ where
// SAFETY: We're not going to move `self` or any of its fields, so its okay to temporarily
// remove the `Pin` wrapper.
let boxed = unsafe { Pin::into_inner_unchecked(self) };
- let ptr = Box::into_raw(boxed);
+ let ptr = KBox::into_raw(boxed);
- // SAFETY: Pointers into a `Box` point at a valid value.
+ // SAFETY: Pointers into a `KBox` point at a valid value.
let work_ptr = unsafe { T::raw_get_work(ptr) };
// SAFETY: `raw_get_work` returns a pointer to a valid value.
let work_ptr = unsafe { Work::raw_get(work_ptr) };
@@ -679,3 +703,21 @@ pub fn system_freezable_power_efficient() -> &'static Queue {
// SAFETY: `system_freezable_power_efficient_wq` is a C global, always available.
unsafe { Queue::from_raw(bindings::system_freezable_power_efficient_wq) }
}
+
+/// Returns the system bottom halves work queue (`system_bh_wq`).
+///
+/// It is similar to the one returned by [`system`] but for work items which
+/// need to run from a softirq context.
+pub fn system_bh() -> &'static Queue {
+ // SAFETY: `system_bh_wq` is a C global, always available.
+ unsafe { Queue::from_raw(bindings::system_bh_wq) }
+}
+
+/// Returns the system bottom halves high-priority work queue (`system_bh_highpri_wq`).
+///
+/// It is similar to the one returned by [`system_bh`] but for work items which
+/// require higher scheduling priority.
+pub fn system_bh_highpri() -> &'static Queue {
+ // SAFETY: `system_bh_highpri_wq` is a C global, always available.
+ unsafe { Queue::from_raw(bindings::system_bh_highpri_wq) }
+}
diff --git a/rust/macros/export.rs b/rust/macros/export.rs
new file mode 100644
index 000000000000..a08f6337d5c8
--- /dev/null
+++ b/rust/macros/export.rs
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::helpers::function_name;
+use proc_macro::TokenStream;
+
+/// Please see [`crate::export`] for documentation.
+pub(crate) fn export(_attr: TokenStream, ts: TokenStream) -> TokenStream {
+ let Some(name) = function_name(ts.clone()) else {
+ return "::core::compile_error!(\"The #[export] attribute must be used on a function.\");"
+ .parse::<TokenStream>()
+ .unwrap();
+ };
+
+ // This verifies that the function has the same signature as the declaration generated by
+ // bindgen. It makes use of the fact that all branches of an if/else must have the same type.
+ let signature_check = quote!(
+ const _: () = {
+ if true {
+ ::kernel::bindings::#name
+ } else {
+ #name
+ };
+ };
+ );
+
+ let no_mangle = quote!(#[no_mangle]);
+
+ TokenStream::from_iter([signature_check, no_mangle, ts])
+}
diff --git a/rust/macros/helpers.rs b/rust/macros/helpers.rs
index afb0f2e3a36a..a3ee27e29a6f 100644
--- a/rust/macros/helpers.rs
+++ b/rust/macros/helpers.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-use proc_macro::{token_stream, Group, Punct, Spacing, TokenStream, TokenTree};
+use proc_macro::{token_stream, Group, Ident, TokenStream, TokenTree};
pub(crate) fn try_ident(it: &mut token_stream::IntoIter) -> Option<String> {
if let Some(TokenTree::Ident(ident)) = it.next() {
@@ -70,86 +70,19 @@ pub(crate) fn expect_end(it: &mut token_stream::IntoIter) {
}
}
-pub(crate) struct Generics {
- pub(crate) impl_generics: Vec<TokenTree>,
- pub(crate) ty_generics: Vec<TokenTree>,
-}
-
-/// Parses the given `TokenStream` into `Generics` and the rest.
-///
-/// The generics are not present in the rest, but a where clause might remain.
-pub(crate) fn parse_generics(input: TokenStream) -> (Generics, Vec<TokenTree>) {
- // `impl_generics`, the declared generics with their bounds.
- let mut impl_generics = vec![];
- // Only the names of the generics, without any bounds.
- let mut ty_generics = vec![];
- // Tokens not related to the generics e.g. the `where` token and definition.
- let mut rest = vec![];
- // The current level of `<`.
- let mut nesting = 0;
- let mut toks = input.into_iter();
- // If we are at the beginning of a generic parameter.
- let mut at_start = true;
- for tt in &mut toks {
- match tt.clone() {
- TokenTree::Punct(p) if p.as_char() == '<' => {
- if nesting >= 1 {
- // This is inside of the generics and part of some bound.
- impl_generics.push(tt);
- }
- nesting += 1;
- }
- TokenTree::Punct(p) if p.as_char() == '>' => {
- // This is a parsing error, so we just end it here.
- if nesting == 0 {
- break;
- } else {
- nesting -= 1;
- if nesting >= 1 {
- // We are still inside of the generics and part of some bound.
- impl_generics.push(tt);
- }
- if nesting == 0 {
- break;
- }
- }
- }
- tt => {
- if nesting == 1 {
- // Here depending on the token, it might be a generic variable name.
- match &tt {
- // Ignore const.
- TokenTree::Ident(i) if i.to_string() == "const" => {}
- TokenTree::Ident(_) if at_start => {
- ty_generics.push(tt.clone());
- // We also already push the `,` token, this makes it easier to append
- // generics.
- ty_generics.push(TokenTree::Punct(Punct::new(',', Spacing::Alone)));
- at_start = false;
- }
- TokenTree::Punct(p) if p.as_char() == ',' => at_start = true,
- // Lifetimes begin with `'`.
- TokenTree::Punct(p) if p.as_char() == '\'' && at_start => {
- ty_generics.push(tt.clone());
- }
- _ => {}
- }
- }
- if nesting >= 1 {
- impl_generics.push(tt);
- } else if nesting == 0 {
- // If we haven't entered the generics yet, we still want to keep these tokens.
- rest.push(tt);
+/// Given a function declaration, finds the name of the function.
+pub(crate) fn function_name(input: TokenStream) -> Option<Ident> {
+ let mut input = input.into_iter();
+ while let Some(token) = input.next() {
+ match token {
+ TokenTree::Ident(i) if i.to_string() == "fn" => {
+ if let Some(TokenTree::Ident(i)) = input.next() {
+ return Some(i);
}
+ return None;
}
+ _ => continue,
}
}
- rest.extend(toks);
- (
- Generics {
- impl_generics,
- ty_generics,
- },
- rest,
- )
+ None
}
diff --git a/rust/macros/kunit.rs b/rust/macros/kunit.rs
new file mode 100644
index 000000000000..99ccac82edde
--- /dev/null
+++ b/rust/macros/kunit.rs
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Procedural macro to run KUnit tests using a user-space like syntax.
+//!
+//! Copyright (c) 2023 José Expósito <jose.exposito89@gmail.com>
+
+use proc_macro::{Delimiter, Group, TokenStream, TokenTree};
+use std::fmt::Write;
+
+pub(crate) fn kunit_tests(attr: TokenStream, ts: TokenStream) -> TokenStream {
+ let attr = attr.to_string();
+
+ if attr.is_empty() {
+ panic!("Missing test name in `#[kunit_tests(test_name)]` macro")
+ }
+
+ if attr.len() > 255 {
+ panic!("The test suite name `{attr}` exceeds the maximum length of 255 bytes")
+ }
+
+ let mut tokens: Vec<_> = ts.into_iter().collect();
+
+ // Scan for the `mod` keyword.
+ tokens
+ .iter()
+ .find_map(|token| match token {
+ TokenTree::Ident(ident) => match ident.to_string().as_str() {
+ "mod" => Some(true),
+ _ => None,
+ },
+ _ => None,
+ })
+ .expect("`#[kunit_tests(test_name)]` attribute should only be applied to modules");
+
+ // Retrieve the main body. The main body should be the last token tree.
+ let body = match tokens.pop() {
+ Some(TokenTree::Group(group)) if group.delimiter() == Delimiter::Brace => group,
+ _ => panic!("Cannot locate main body of module"),
+ };
+
+ // Get the functions set as tests. Search for `[test]` -> `fn`.
+ let mut body_it = body.stream().into_iter();
+ let mut tests = Vec::new();
+ while let Some(token) = body_it.next() {
+ match token {
+ TokenTree::Group(ident) if ident.to_string() == "[test]" => match body_it.next() {
+ Some(TokenTree::Ident(ident)) if ident.to_string() == "fn" => {
+ let test_name = match body_it.next() {
+ Some(TokenTree::Ident(ident)) => ident.to_string(),
+ _ => continue,
+ };
+ tests.push(test_name);
+ }
+ _ => continue,
+ },
+ _ => (),
+ }
+ }
+
+ // Add `#[cfg(CONFIG_KUNIT)]` before the module declaration.
+ let config_kunit = "#[cfg(CONFIG_KUNIT)]".to_owned().parse().unwrap();
+ tokens.insert(
+ 0,
+ TokenTree::Group(Group::new(Delimiter::None, config_kunit)),
+ );
+
+ // Generate the test KUnit test suite and a test case for each `#[test]`.
+ // The code generated for the following test module:
+ //
+ // ```
+ // #[kunit_tests(kunit_test_suit_name)]
+ // mod tests {
+ // #[test]
+ // fn foo() {
+ // assert_eq!(1, 1);
+ // }
+ //
+ // #[test]
+ // fn bar() {
+ // assert_eq!(2, 2);
+ // }
+ // }
+ // ```
+ //
+ // Looks like:
+ //
+ // ```
+ // unsafe extern "C" fn kunit_rust_wrapper_foo(_test: *mut kernel::bindings::kunit) { foo(); }
+ // unsafe extern "C" fn kunit_rust_wrapper_bar(_test: *mut kernel::bindings::kunit) { bar(); }
+ //
+ // static mut TEST_CASES: [kernel::bindings::kunit_case; 3] = [
+ // kernel::kunit::kunit_case(kernel::c_str!("foo"), kunit_rust_wrapper_foo),
+ // kernel::kunit::kunit_case(kernel::c_str!("bar"), kunit_rust_wrapper_bar),
+ // kernel::kunit::kunit_case_null(),
+ // ];
+ //
+ // kernel::kunit_unsafe_test_suite!(kunit_test_suit_name, TEST_CASES);
+ // ```
+ let mut kunit_macros = "".to_owned();
+ let mut test_cases = "".to_owned();
+ for test in &tests {
+ let kunit_wrapper_fn_name = format!("kunit_rust_wrapper_{test}");
+ let kunit_wrapper = format!(
+ "unsafe extern \"C\" fn {kunit_wrapper_fn_name}(_test: *mut kernel::bindings::kunit) {{ {test}(); }}"
+ );
+ writeln!(kunit_macros, "{kunit_wrapper}").unwrap();
+ writeln!(
+ test_cases,
+ " kernel::kunit::kunit_case(kernel::c_str!(\"{test}\"), {kunit_wrapper_fn_name}),"
+ )
+ .unwrap();
+ }
+
+ writeln!(kunit_macros).unwrap();
+ writeln!(
+ kunit_macros,
+ "static mut TEST_CASES: [kernel::bindings::kunit_case; {}] = [\n{test_cases} kernel::kunit::kunit_case_null(),\n];",
+ tests.len() + 1
+ )
+ .unwrap();
+
+ writeln!(
+ kunit_macros,
+ "kernel::kunit_unsafe_test_suite!({attr}, TEST_CASES);"
+ )
+ .unwrap();
+
+ // Remove the `#[test]` macros.
+ // We do this at a token level, in order to preserve span information.
+ let mut new_body = vec![];
+ let mut body_it = body.stream().into_iter();
+
+ while let Some(token) = body_it.next() {
+ match token {
+ TokenTree::Punct(ref c) if c.as_char() == '#' => match body_it.next() {
+ Some(TokenTree::Group(group)) if group.to_string() == "[test]" => (),
+ Some(next) => {
+ new_body.extend([token, next]);
+ }
+ _ => {
+ new_body.push(token);
+ }
+ },
+ _ => {
+ new_body.push(token);
+ }
+ }
+ }
+
+ let mut new_body = TokenStream::from_iter(new_body);
+ new_body.extend::<TokenStream>(kunit_macros.parse().unwrap());
+
+ tokens.push(TokenTree::Group(Group::new(Delimiter::Brace, new_body)));
+
+ tokens.into_iter().collect()
+}
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index 520eae5fd792..9acaa68c974e 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -2,16 +2,19 @@
//! Crate for all kernel procedural macros.
+// When fixdep scans this, it will find this string `CONFIG_RUSTC_VERSION_TEXT`
+// and thus add a dependency on `include/config/RUSTC_VERSION_TEXT`, which is
+// touched by Kconfig when the version string from the compiler changes.
+
#[macro_use]
mod quote;
mod concat_idents;
+mod export;
mod helpers;
+mod kunit;
mod module;
mod paste;
-mod pin_data;
-mod pinned_drop;
mod vtable;
-mod zeroable;
use proc_macro::TokenStream;
@@ -26,42 +29,70 @@ use proc_macro::TokenStream;
///
/// # Examples
///
-/// ```ignore
+/// ```
/// use kernel::prelude::*;
///
/// module!{
/// type: MyModule,
/// name: "my_kernel_module",
-/// author: "Rust for Linux Contributors",
+/// authors: ["Rust for Linux Contributors"],
/// description: "My very own kernel module!",
/// license: "GPL",
+/// alias: ["alternate_module_name"],
/// }
///
-/// struct MyModule;
+/// struct MyModule(i32);
///
/// impl kernel::Module for MyModule {
-/// fn init() -> Result<Self> {
-/// // If the parameter is writeable, then the kparam lock must be
-/// // taken to read the parameter:
-/// {
-/// let lock = THIS_MODULE.kernel_param_lock();
-/// pr_info!("i32 param is: {}\n", writeable_i32.read(&lock));
-/// }
-/// // If the parameter is read only, it can be read without locking
-/// // the kernel parameters:
-/// pr_info!("i32 param is: {}\n", my_i32.read());
+/// fn init(_module: &'static ThisModule) -> Result<Self> {
+/// let foo: i32 = 42;
+/// pr_info!("I contain: {}\n", foo);
+/// Ok(Self(foo))
+/// }
+/// }
+/// # fn main() {}
+/// ```
+///
+/// ## Firmware
+///
+/// The following example shows how to declare a kernel module that needs
+/// to load binary firmware files. You need to specify the file names of
+/// the firmware in the `firmware` field. The information is embedded
+/// in the `modinfo` section of the kernel module. For example, a tool to
+/// build an initramfs uses this information to put the firmware files into
+/// the initramfs image.
+///
+/// ```
+/// use kernel::prelude::*;
+///
+/// module!{
+/// type: MyDeviceDriverModule,
+/// name: "my_device_driver_module",
+/// authors: ["Rust for Linux Contributors"],
+/// description: "My device driver requires firmware",
+/// license: "GPL",
+/// firmware: ["my_device_firmware1.bin", "my_device_firmware2.bin"],
+/// }
+///
+/// struct MyDeviceDriverModule;
+///
+/// impl kernel::Module for MyDeviceDriverModule {
+/// fn init(_module: &'static ThisModule) -> Result<Self> {
/// Ok(Self)
/// }
/// }
+/// # fn main() {}
/// ```
///
/// # Supported argument types
/// - `type`: type which implements the [`Module`] trait (required).
-/// - `name`: byte array of the name of the kernel module (required).
-/// - `author`: byte array of the author of the kernel module.
-/// - `description`: byte array of the description of the kernel module.
-/// - `license`: byte array of the license of the kernel module (required).
-/// - `alias`: byte array of alias name of the kernel module.
+/// - `name`: ASCII string literal of the name of the kernel module (required).
+/// - `authors`: array of ASCII string literals of the authors of the kernel module.
+/// - `description`: string literal of the description of the kernel module.
+/// - `license`: ASCII string literal of the license of the kernel module (required).
+/// - `alias`: array of ASCII string literals of the alias names of the kernel module.
+/// - `firmware`: array of ASCII string literals of the firmware files of
+/// the kernel module.
#[proc_macro]
pub fn module(ts: TokenStream) -> TokenStream {
module::module(ts)
@@ -91,12 +122,12 @@ pub fn module(ts: TokenStream) -> TokenStream {
/// used on the Rust side, it should not be possible to call the default
/// implementation. This is done to ensure that we call the vtable methods
/// through the C vtable, and not through the Rust vtable. Therefore, the
-/// default implementation should call `kernel::build_error`, which prevents
+/// default implementation should call `build_error!`, which prevents
/// calls to this function at compile time:
///
/// ```compile_fail
-/// # use kernel::error::VTABLE_DEFAULT_ERROR;
-/// kernel::build_error(VTABLE_DEFAULT_ERROR)
+/// # // Intentionally missing `use`s to simplify `rusttest`.
+/// build_error!(VTABLE_DEFAULT_ERROR)
/// ```
///
/// Note that you might need to import [`kernel::error::VTABLE_DEFAULT_ERROR`].
@@ -105,7 +136,7 @@ pub fn module(ts: TokenStream) -> TokenStream {
///
/// # Examples
///
-/// ```ignore
+/// ```
/// use kernel::error::VTABLE_DEFAULT_ERROR;
/// use kernel::prelude::*;
///
@@ -113,11 +144,11 @@ pub fn module(ts: TokenStream) -> TokenStream {
/// #[vtable]
/// pub trait Operations: Send + Sync + Sized {
/// fn foo(&self) -> Result<()> {
-/// kernel::build_error(VTABLE_DEFAULT_ERROR)
+/// build_error!(VTABLE_DEFAULT_ERROR)
/// }
///
/// fn bar(&self) -> Result<()> {
-/// kernel::build_error(VTABLE_DEFAULT_ERROR)
+/// build_error!(VTABLE_DEFAULT_ERROR)
/// }
/// }
///
@@ -142,6 +173,29 @@ pub fn vtable(attr: TokenStream, ts: TokenStream) -> TokenStream {
vtable::vtable(attr, ts)
}
+/// Export a function so that C code can call it via a header file.
+///
+/// Functions exported using this macro can be called from C code using the declaration in the
+/// appropriate header file. It should only be used in cases where C calls the function through a
+/// header file; cases where C calls into Rust via a function pointer in a vtable (such as
+/// `file_operations`) should not use this macro.
+///
+/// This macro has the following effect:
+///
+/// * Disables name mangling for this function.
+/// * Verifies at compile-time that the function signature matches the declaration in the header
+/// file.
+///
+/// You must declare the signature of the Rust function in a header file that is included by
+/// `rust/bindings/bindings_helper.h`.
+///
+/// This macro is *not* the same as the C macros `EXPORT_SYMBOL_*`. All Rust symbols are currently
+/// automatically exported with `EXPORT_SYMBOL_GPL`.
+#[proc_macro_attribute]
+pub fn export(attr: TokenStream, ts: TokenStream) -> TokenStream {
+ export::export(attr, ts)
+}
+
/// Concatenate two identifiers.
///
/// This is useful in macros that need to declare or reference items with names
@@ -150,12 +204,27 @@ pub fn vtable(attr: TokenStream, ts: TokenStream) -> TokenStream {
///
/// # Examples
///
-/// ```ignore
-/// use kernel::macro::concat_idents;
+/// ```
+/// # const binder_driver_return_protocol_BR_OK: u32 = 0;
+/// # const binder_driver_return_protocol_BR_ERROR: u32 = 1;
+/// # const binder_driver_return_protocol_BR_TRANSACTION: u32 = 2;
+/// # const binder_driver_return_protocol_BR_REPLY: u32 = 3;
+/// # const binder_driver_return_protocol_BR_DEAD_REPLY: u32 = 4;
+/// # const binder_driver_return_protocol_BR_TRANSACTION_COMPLETE: u32 = 5;
+/// # const binder_driver_return_protocol_BR_INCREFS: u32 = 6;
+/// # const binder_driver_return_protocol_BR_ACQUIRE: u32 = 7;
+/// # const binder_driver_return_protocol_BR_RELEASE: u32 = 8;
+/// # const binder_driver_return_protocol_BR_DECREFS: u32 = 9;
+/// # const binder_driver_return_protocol_BR_NOOP: u32 = 10;
+/// # const binder_driver_return_protocol_BR_SPAWN_LOOPER: u32 = 11;
+/// # const binder_driver_return_protocol_BR_DEAD_BINDER: u32 = 12;
+/// # const binder_driver_return_protocol_BR_CLEAR_DEATH_NOTIFICATION_DONE: u32 = 13;
+/// # const binder_driver_return_protocol_BR_FAILED_REPLY: u32 = 14;
+/// use kernel::macros::concat_idents;
///
/// macro_rules! pub_no_prefix {
/// ($prefix:ident, $($newname:ident),+) => {
-/// $(pub(crate) const $newname: u32 = kernel::macros::concat_idents!($prefix, $newname);)+
+/// $(pub(crate) const $newname: u32 = concat_idents!($prefix, $newname);)+
/// };
/// }
///
@@ -185,82 +254,6 @@ pub fn concat_idents(ts: TokenStream) -> TokenStream {
concat_idents::concat_idents(ts)
}
-/// Used to specify the pinning information of the fields of a struct.
-///
-/// This is somewhat similar in purpose as
-/// [pin-project-lite](https://crates.io/crates/pin-project-lite).
-/// Place this macro on a struct definition and then `#[pin]` in front of the attributes of each
-/// field you want to structurally pin.
-///
-/// This macro enables the use of the [`pin_init!`] macro. When pin-initializing a `struct`,
-/// then `#[pin]` directs the type of initializer that is required.
-///
-/// If your `struct` implements `Drop`, then you need to add `PinnedDrop` as arguments to this
-/// macro, and change your `Drop` implementation to `PinnedDrop` annotated with
-/// `#[`[`macro@pinned_drop`]`]`, since dropping pinned values requires extra care.
-///
-/// # Examples
-///
-/// ```rust,ignore
-/// #[pin_data]
-/// struct DriverData {
-/// #[pin]
-/// queue: Mutex<Vec<Command>>,
-/// buf: Box<[u8; 1024 * 1024]>,
-/// }
-/// ```
-///
-/// ```rust,ignore
-/// #[pin_data(PinnedDrop)]
-/// struct DriverData {
-/// #[pin]
-/// queue: Mutex<Vec<Command>>,
-/// buf: Box<[u8; 1024 * 1024]>,
-/// raw_info: *mut Info,
-/// }
-///
-/// #[pinned_drop]
-/// impl PinnedDrop for DriverData {
-/// fn drop(self: Pin<&mut Self>) {
-/// unsafe { bindings::destroy_info(self.raw_info) };
-/// }
-/// }
-/// ```
-///
-/// [`pin_init!`]: ../kernel/macro.pin_init.html
-// ^ cannot use direct link, since `kernel` is not a dependency of `macros`.
-#[proc_macro_attribute]
-pub fn pin_data(inner: TokenStream, item: TokenStream) -> TokenStream {
- pin_data::pin_data(inner, item)
-}
-
-/// Used to implement `PinnedDrop` safely.
-///
-/// Only works on structs that are annotated via `#[`[`macro@pin_data`]`]`.
-///
-/// # Examples
-///
-/// ```rust,ignore
-/// #[pin_data(PinnedDrop)]
-/// struct DriverData {
-/// #[pin]
-/// queue: Mutex<Vec<Command>>,
-/// buf: Box<[u8; 1024 * 1024]>,
-/// raw_info: *mut Info,
-/// }
-///
-/// #[pinned_drop]
-/// impl PinnedDrop for DriverData {
-/// fn drop(self: Pin<&mut Self>) {
-/// unsafe { bindings::destroy_info(self.raw_info) };
-/// }
-/// }
-/// ```
-#[proc_macro_attribute]
-pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream {
- pinned_drop::pinned_drop(args, input)
-}
-
/// Paste identifiers together.
///
/// Within the `paste!` macro, identifiers inside `[<` and `>]` are concatenated together to form a
@@ -272,12 +265,25 @@ pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream {
///
/// # Example
///
-/// ```ignore
-/// use kernel::macro::paste;
-///
+/// ```
+/// # const binder_driver_return_protocol_BR_OK: u32 = 0;
+/// # const binder_driver_return_protocol_BR_ERROR: u32 = 1;
+/// # const binder_driver_return_protocol_BR_TRANSACTION: u32 = 2;
+/// # const binder_driver_return_protocol_BR_REPLY: u32 = 3;
+/// # const binder_driver_return_protocol_BR_DEAD_REPLY: u32 = 4;
+/// # const binder_driver_return_protocol_BR_TRANSACTION_COMPLETE: u32 = 5;
+/// # const binder_driver_return_protocol_BR_INCREFS: u32 = 6;
+/// # const binder_driver_return_protocol_BR_ACQUIRE: u32 = 7;
+/// # const binder_driver_return_protocol_BR_RELEASE: u32 = 8;
+/// # const binder_driver_return_protocol_BR_DECREFS: u32 = 9;
+/// # const binder_driver_return_protocol_BR_NOOP: u32 = 10;
+/// # const binder_driver_return_protocol_BR_SPAWN_LOOPER: u32 = 11;
+/// # const binder_driver_return_protocol_BR_DEAD_BINDER: u32 = 12;
+/// # const binder_driver_return_protocol_BR_CLEAR_DEATH_NOTIFICATION_DONE: u32 = 13;
+/// # const binder_driver_return_protocol_BR_FAILED_REPLY: u32 = 14;
/// macro_rules! pub_no_prefix {
/// ($prefix:ident, $($newname:ident),+) => {
-/// paste! {
+/// kernel::macros::paste! {
/// $(pub(crate) const $newname: u32 = [<$prefix $newname>];)+
/// }
/// };
@@ -312,17 +318,30 @@ pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream {
///
/// Currently supported modifiers are:
/// * `span`: change the span of concatenated identifier to the span of the specified token. By
-/// default the span of the `[< >]` group is used.
+/// default the span of the `[< >]` group is used.
/// * `lower`: change the identifier to lower case.
/// * `upper`: change the identifier to upper case.
///
-/// ```ignore
-/// use kernel::macro::paste;
-///
+/// ```
+/// # const binder_driver_return_protocol_BR_OK: u32 = 0;
+/// # const binder_driver_return_protocol_BR_ERROR: u32 = 1;
+/// # const binder_driver_return_protocol_BR_TRANSACTION: u32 = 2;
+/// # const binder_driver_return_protocol_BR_REPLY: u32 = 3;
+/// # const binder_driver_return_protocol_BR_DEAD_REPLY: u32 = 4;
+/// # const binder_driver_return_protocol_BR_TRANSACTION_COMPLETE: u32 = 5;
+/// # const binder_driver_return_protocol_BR_INCREFS: u32 = 6;
+/// # const binder_driver_return_protocol_BR_ACQUIRE: u32 = 7;
+/// # const binder_driver_return_protocol_BR_RELEASE: u32 = 8;
+/// # const binder_driver_return_protocol_BR_DECREFS: u32 = 9;
+/// # const binder_driver_return_protocol_BR_NOOP: u32 = 10;
+/// # const binder_driver_return_protocol_BR_SPAWN_LOOPER: u32 = 11;
+/// # const binder_driver_return_protocol_BR_DEAD_BINDER: u32 = 12;
+/// # const binder_driver_return_protocol_BR_CLEAR_DEATH_NOTIFICATION_DONE: u32 = 13;
+/// # const binder_driver_return_protocol_BR_FAILED_REPLY: u32 = 14;
/// macro_rules! pub_no_prefix {
/// ($prefix:ident, $($newname:ident),+) => {
/// kernel::macros::paste! {
-/// $(pub(crate) const fn [<$newname:lower:span>]: u32 = [<$prefix $newname:span>];)+
+/// $(pub(crate) const fn [<$newname:lower:span>]() -> u32 { [<$prefix $newname:span>] })+
/// }
/// };
/// }
@@ -353,7 +372,7 @@ pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream {
///
/// Literals can also be concatenated with other identifiers:
///
-/// ```ignore
+/// ```
/// macro_rules! create_numbered_fn {
/// ($name:literal, $val:literal) => {
/// kernel::macros::paste! {
@@ -375,21 +394,29 @@ pub fn paste(input: TokenStream) -> TokenStream {
tokens.into_iter().collect()
}
-/// Derives the [`Zeroable`] trait for the given struct.
+/// Registers a KUnit test suite and its test cases using a user-space like syntax.
///
-/// This can only be used for structs where every field implements the [`Zeroable`] trait.
+/// This macro should be used on modules. If `CONFIG_KUNIT` (in `.config`) is `n`, the target module
+/// is ignored.
///
/// # Examples
///
-/// ```rust,ignore
-/// #[derive(Zeroable)]
-/// pub struct DriverData {
-/// id: i64,
-/// buf_ptr: *mut u8,
-/// len: usize,
+/// ```ignore
+/// # use macros::kunit_tests;
+/// #[kunit_tests(kunit_test_suit_name)]
+/// mod tests {
+/// #[test]
+/// fn foo() {
+/// assert_eq!(1, 1);
+/// }
+///
+/// #[test]
+/// fn bar() {
+/// assert_eq!(2, 2);
+/// }
/// }
/// ```
-#[proc_macro_derive(Zeroable)]
-pub fn derive_zeroable(input: TokenStream) -> TokenStream {
- zeroable::derive(input)
+#[proc_macro_attribute]
+pub fn kunit_tests(attr: TokenStream, ts: TokenStream) -> TokenStream {
+ kunit::kunit_tests(attr, ts)
}
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index acd0393b5095..c4afdd69e490 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -48,7 +48,7 @@ impl<'a> ModInfoBuilder<'a> {
)
} else {
// Loadable modules' modinfo strings go as-is.
- format!("{field}={content}\0", field = field, content = content)
+ format!("{field}={content}\0")
};
write!(
@@ -56,7 +56,7 @@ impl<'a> ModInfoBuilder<'a> {
"
{cfg}
#[doc(hidden)]
- #[link_section = \".modinfo\"]
+ #[cfg_attr(not(target_os = \"macos\"), link_section = \".modinfo\")]
#[used]
pub static __{module}_{counter}: [u8; {length}] = *{string};
",
@@ -95,16 +95,26 @@ struct ModuleInfo {
license: String,
name: String,
author: Option<String>,
+ authors: Option<Vec<String>>,
description: Option<String>,
alias: Option<Vec<String>>,
+ firmware: Option<Vec<String>>,
}
impl ModuleInfo {
fn parse(it: &mut token_stream::IntoIter) -> Self {
let mut info = ModuleInfo::default();
- const EXPECTED_KEYS: &[&str] =
- &["type", "name", "author", "description", "license", "alias"];
+ const EXPECTED_KEYS: &[&str] = &[
+ "type",
+ "name",
+ "author",
+ "authors",
+ "description",
+ "license",
+ "alias",
+ "firmware",
+ ];
const REQUIRED_KEYS: &[&str] = &["type", "name", "license"];
let mut seen_keys = Vec::new();
@@ -116,10 +126,7 @@ impl ModuleInfo {
};
if seen_keys.contains(&key) {
- panic!(
- "Duplicated key \"{}\". Keys can only be specified once.",
- key
- );
+ panic!("Duplicated key \"{key}\". Keys can only be specified once.");
}
assert_eq!(expect_punct(it), ':');
@@ -128,13 +135,12 @@ impl ModuleInfo {
"type" => info.type_ = expect_ident(it),
"name" => info.name = expect_string_ascii(it),
"author" => info.author = Some(expect_string(it)),
+ "authors" => info.authors = Some(expect_string_array(it)),
"description" => info.description = Some(expect_string(it)),
"license" => info.license = expect_string_ascii(it),
"alias" => info.alias = Some(expect_string_array(it)),
- _ => panic!(
- "Unknown key \"{}\". Valid keys are: {:?}.",
- key, EXPECTED_KEYS
- ),
+ "firmware" => info.firmware = Some(expect_string_array(it)),
+ _ => panic!("Unknown key \"{key}\". Valid keys are: {EXPECTED_KEYS:?}."),
}
assert_eq!(expect_punct(it), ',');
@@ -146,7 +152,7 @@ impl ModuleInfo {
for key in REQUIRED_KEYS {
if !seen_keys.iter().any(|e| e == key) {
- panic!("Missing required key \"{}\".", key);
+ panic!("Missing required key \"{key}\".");
}
}
@@ -158,10 +164,7 @@ impl ModuleInfo {
}
if seen_keys != ordered_keys {
- panic!(
- "Keys are not ordered as expected. Order them like: {:?}.",
- ordered_keys
- );
+ panic!("Keys are not ordered as expected. Order them like: {ordered_keys:?}.");
}
info
@@ -173,10 +176,17 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
let info = ModuleInfo::parse(&mut it);
- let mut modinfo = ModInfoBuilder::new(info.name.as_ref());
+ // Rust does not allow hyphens in identifiers, use underscore instead.
+ let ident = info.name.replace('-', "_");
+ let mut modinfo = ModInfoBuilder::new(ident.as_ref());
if let Some(author) = info.author {
modinfo.emit("author", &author);
}
+ if let Some(authors) = info.authors {
+ for author in authors {
+ modinfo.emit("author", &author);
+ }
+ }
if let Some(description) = info.description {
modinfo.emit("description", &description);
}
@@ -186,6 +196,11 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
modinfo.emit("alias", &alias);
}
}
+ if let Some(firmware) = info.firmware {
+ for fw in firmware {
+ modinfo.emit("firmware", &fw);
+ }
+ }
// Built-in modules also export the `file` modinfo string.
let file =
@@ -203,17 +218,30 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
// freed until the module is unloaded.
#[cfg(MODULE)]
static THIS_MODULE: kernel::ThisModule = unsafe {{
- kernel::ThisModule::from_ptr(&kernel::bindings::__this_module as *const _ as *mut _)
+ extern \"C\" {{
+ static __this_module: kernel::types::Opaque<kernel::bindings::module>;
+ }}
+
+ kernel::ThisModule::from_ptr(__this_module.get())
}};
#[cfg(not(MODULE))]
static THIS_MODULE: kernel::ThisModule = unsafe {{
kernel::ThisModule::from_ptr(core::ptr::null_mut())
}};
+ /// The `LocalModule` type is the type of the module created by `module!`,
+ /// `module_pci_driver!`, `module_platform_driver!`, etc.
+ type LocalModule = {type_};
+
+ impl kernel::ModuleMetadata for {type_} {{
+ const NAME: &'static kernel::str::CStr = kernel::c_str!(\"{name}\");
+ }}
+
// Double nested modules, since then nobody can access the public items inside.
mod __module_init {{
mod __module_init {{
use super::super::{type_};
+ use pin_init::PinInit;
/// The \"Rust loadable module\" mark.
//
@@ -224,7 +252,8 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
#[used]
static __IS_RUST_MODULE: () = ();
- static mut __MOD: Option<{type_}> = None;
+ static mut __MOD: core::mem::MaybeUninit<{type_}> =
+ core::mem::MaybeUninit::uninit();
// Loadable modules need to export the `{{init,cleanup}}_module` identifiers.
/// # Safety
@@ -235,7 +264,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
#[doc(hidden)]
#[no_mangle]
#[link_section = \".init.text\"]
- pub unsafe extern \"C\" fn init_module() -> core::ffi::c_int {{
+ pub unsafe extern \"C\" fn init_module() -> kernel::ffi::c_int {{
// SAFETY: This function is inaccessible to the outside due to the double
// module wrapping it. It is called exactly once by the C side via its
// unique name.
@@ -244,6 +273,12 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
#[cfg(MODULE)]
#[doc(hidden)]
+ #[used]
+ #[link_section = \".init.data\"]
+ static __UNIQUE_ID___addressable_init_module: unsafe extern \"C\" fn() -> i32 = init_module;
+
+ #[cfg(MODULE)]
+ #[doc(hidden)]
#[no_mangle]
pub extern \"C\" fn cleanup_module() {{
// SAFETY:
@@ -255,6 +290,12 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
unsafe {{ __exit() }}
}}
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[used]
+ #[link_section = \".exit.data\"]
+ static __UNIQUE_ID___addressable_cleanup_module: extern \"C\" fn() = cleanup_module;
+
// Built-in modules are initialized through an initcall pointer
// and the identifiers need to be unique.
#[cfg(not(MODULE))]
@@ -262,14 +303,15 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
#[doc(hidden)]
#[link_section = \"{initcall_section}\"]
#[used]
- pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init;
+ pub static __{ident}_initcall: extern \"C\" fn() ->
+ kernel::ffi::c_int = __{ident}_init;
#[cfg(not(MODULE))]
#[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)]
core::arch::global_asm!(
r#\".section \"{initcall_section}\", \"a\"
- __{name}_initcall:
- .long __{name}_init - .
+ __{ident}_initcall:
+ .long __{ident}_init - .
.previous
\"#
);
@@ -277,7 +319,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
#[cfg(not(MODULE))]
#[doc(hidden)]
#[no_mangle]
- pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{
+ pub extern \"C\" fn __{ident}_init() -> kernel::ffi::c_int {{
// SAFETY: This function is inaccessible to the outside due to the double
// module wrapping it. It is called exactly once by the C side via its
// placement above in the initcall section.
@@ -287,34 +329,28 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
#[cfg(not(MODULE))]
#[doc(hidden)]
#[no_mangle]
- pub extern \"C\" fn __{name}_exit() {{
+ pub extern \"C\" fn __{ident}_exit() {{
// SAFETY:
// - This function is inaccessible to the outside due to the double
// module wrapping it. It is called exactly once by the C side via its
// unique name,
- // - furthermore it is only called after `__{name}_init` has returned `0`
- // (which delegates to `__init`).
+ // - furthermore it is only called after `__{ident}_init` has
+ // returned `0` (which delegates to `__init`).
unsafe {{ __exit() }}
}}
/// # Safety
///
/// This function must only be called once.
- unsafe fn __init() -> core::ffi::c_int {{
- match <{type_} as kernel::Module>::init(&super::super::THIS_MODULE) {{
- Ok(m) => {{
- // SAFETY: No data race, since `__MOD` can only be accessed by this
- // module and there only `__init` and `__exit` access it. These
- // functions are only called once and `__exit` cannot be called
- // before or during `__init`.
- unsafe {{
- __MOD = Some(m);
- }}
- return 0;
- }}
- Err(e) => {{
- return e.to_errno();
- }}
+ unsafe fn __init() -> kernel::ffi::c_int {{
+ let initer =
+ <{type_} as kernel::InPlaceModule>::init(&super::super::THIS_MODULE);
+ // SAFETY: No data race, since `__MOD` can only be accessed by this module
+ // and there only `__init` and `__exit` access it. These functions are only
+ // called once and `__exit` cannot be called before or during `__init`.
+ match unsafe {{ initer.__pinned_init(__MOD.as_mut_ptr()) }} {{
+ Ok(m) => 0,
+ Err(e) => e.to_errno(),
}}
}}
@@ -329,7 +365,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
// called once and `__init` was already called.
unsafe {{
// Invokes `drop()` on `__MOD`, which should be used for cleanup.
- __MOD = None;
+ __MOD.assume_init_drop();
}}
}}
@@ -339,6 +375,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
",
type_ = info.type_,
name = info.name,
+ ident = ident,
modinfo = modinfo.buffer,
initcall_section = ".initcall6.init"
)
diff --git a/rust/macros/paste.rs b/rust/macros/paste.rs
index f40d42b35b58..cce712d19855 100644
--- a/rust/macros/paste.rs
+++ b/rust/macros/paste.rs
@@ -2,7 +2,7 @@
use proc_macro::{Delimiter, Group, Ident, Spacing, Span, TokenTree};
-fn concat(tokens: &[TokenTree], group_span: Span) -> TokenTree {
+fn concat_helper(tokens: &[TokenTree]) -> Vec<(String, Span)> {
let mut tokens = tokens.iter();
let mut segments = Vec::new();
let mut span = None;
@@ -46,12 +46,21 @@ fn concat(tokens: &[TokenTree], group_span: Span) -> TokenTree {
};
segments.push((value, sp));
}
- _ => panic!("unexpected token in paste segments"),
+ Some(TokenTree::Group(group)) if group.delimiter() == Delimiter::None => {
+ let tokens = group.stream().into_iter().collect::<Vec<TokenTree>>();
+ segments.append(&mut concat_helper(tokens.as_slice()));
+ }
+ token => panic!("unexpected token in paste segments: {token:?}"),
};
}
+ segments
+}
+
+fn concat(tokens: &[TokenTree], group_span: Span) -> TokenTree {
+ let segments = concat_helper(tokens);
let pasted: String = segments.into_iter().map(|x| x.0).collect();
- TokenTree::Ident(Ident::new(&pasted, span.unwrap_or(group_span)))
+ TokenTree::Ident(Ident::new(&pasted, group_span))
}
pub(crate) fn expand(tokens: &mut Vec<TokenTree>) {
diff --git a/rust/macros/quote.rs b/rust/macros/quote.rs
index 33a199e4f176..92cacc4067c9 100644
--- a/rust/macros/quote.rs
+++ b/rust/macros/quote.rs
@@ -2,6 +2,7 @@
use proc_macro::{TokenStream, TokenTree};
+#[allow(dead_code)]
pub(crate) trait ToTokens {
fn to_tokens(&self, tokens: &mut TokenStream);
}
@@ -20,6 +21,12 @@ impl ToTokens for proc_macro::Group {
}
}
+impl ToTokens for proc_macro::Ident {
+ fn to_tokens(&self, tokens: &mut TokenStream) {
+ tokens.extend([TokenTree::from(self.clone())]);
+ }
+}
+
impl ToTokens for TokenTree {
fn to_tokens(&self, tokens: &mut TokenStream) {
tokens.extend([self.clone()]);
@@ -40,7 +47,7 @@ impl ToTokens for TokenStream {
/// `quote` crate but provides only just enough functionality needed by the current `macros` crate.
macro_rules! quote_spanned {
($span:expr => $($tt:tt)*) => {{
- let mut tokens;
+ let mut tokens: ::std::vec::Vec<::proc_macro::TokenTree>;
#[allow(clippy::vec_init_then_push)]
{
tokens = ::std::vec::Vec::new();
@@ -65,7 +72,8 @@ macro_rules! quote_spanned {
quote_spanned!(@proc $v $span $($tt)*);
};
(@proc $v:ident $span:ident ( $($inner:tt)* ) $($tt:tt)*) => {
- let mut tokens = ::std::vec::Vec::new();
+ #[allow(unused_mut)]
+ let mut tokens = ::std::vec::Vec::<::proc_macro::TokenTree>::new();
quote_spanned!(@proc tokens $span $($inner)*);
$v.push(::proc_macro::TokenTree::Group(::proc_macro::Group::new(
::proc_macro::Delimiter::Parenthesis,
@@ -136,6 +144,22 @@ macro_rules! quote_spanned {
));
quote_spanned!(@proc $v $span $($tt)*);
};
+ (@proc $v:ident $span:ident = $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new('=', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident # $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new('#', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident _ $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Ident(::proc_macro::Ident::new("_", $span)));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
(@proc $v:ident $span:ident $id:ident $($tt:tt)*) => {
$v.push(::proc_macro::TokenTree::Ident(::proc_macro::Ident::new(stringify!($id), $span)));
quote_spanned!(@proc $v $span $($tt)*);
diff --git a/rust/pin-init/CONTRIBUTING.md b/rust/pin-init/CONTRIBUTING.md
new file mode 100644
index 000000000000..16c899a7ae0b
--- /dev/null
+++ b/rust/pin-init/CONTRIBUTING.md
@@ -0,0 +1,72 @@
+# Contributing to `pin-init`
+
+Thanks for showing interest in contributing to `pin-init`! This document outlines the guidelines for
+contributing to `pin-init`.
+
+All contributions are double-licensed under Apache 2.0 and MIT. You can find the respective licenses
+in the `LICENSE-APACHE` and `LICENSE-MIT` files.
+
+## Non-Code Contributions
+
+### Bug Reports
+
+For any type of bug report, please submit an issue using the bug report issue template.
+
+If the issue is a soundness issue, please privately report it as a security vulnerability via the
+GitHub web interface.
+
+### Feature Requests
+
+If you have any feature requests, please submit an issue using the feature request issue template.
+
+### Questions and Getting Help
+
+You can ask questions in the Discussions page of the GitHub repository. If you're encountering
+problems or just have questions related to `pin-init` in the Linux kernel, you can also ask your
+questions in the [Rust-for-Linux Zulip](https://rust-for-linux.zulipchat.com/) or see
+<https://rust-for-linux.com/contact>.
+
+## Contributing Code
+
+### Linux Kernel
+
+`pin-init` is used by the Linux kernel and all commits are synchronized to it. For this reason, the
+same requirements for commits apply to `pin-init`. See [the kernel's documentation] for details. The
+rest of this document will also cover some of the rules listed there and additional ones.
+
+[the kernel's documentation]: https://docs.kernel.org/process/submitting-patches.html
+
+Contributions to `pin-init` ideally go through the [GitHub repository], because that repository runs
+a CI with lots of tests not present in the kernel. However, patches are also accepted (though not
+preferred). Do note that there are some files that are only present in the GitHub repository such as
+tests, licenses and cargo related files. Making changes to them can only happen via GitHub.
+
+[GitHub repository]: https://github.com/Rust-for-Linux/pin-init
+
+### Commit Style
+
+Everything must compile without errors or warnings and all tests must pass after **every commit**.
+This is important for bisection and also required by the kernel.
+
+Each commit should be a single, logically cohesive change. Of course it's best to keep the changes
+small and digestible, but logically linked changes should be made in the same commit. For example,
+when fixing typos, create a single commit that fixes all of them instead of one commit per typo.
+
+Commits must have a meaningful commit title. Commits with changes to files in the `internal`
+directory should have a title prefixed with `internal:`. The commit message should explain the
+change and its rationale. You also have to add your `Signed-off-by` tag, see [Developer's
+Certificate of Origin]. This has to be done for both mailing list submissions as well as GitHub
+submissions.
+
+[Developer's Certificate of Origin]: https://docs.kernel.org/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin
+
+Any changes made to public APIs must be documented not only in the commit message, but also in the
+`CHANGELOG.md` file. This is especially important for breaking changes, as those warrant a major
+version bump.
+
+If you make changes to the top-level crate documentation, you also need to update the `README.md`
+via `cargo rdme`.
+
+Some of these rules can be ignored if the change is done solely to files that are not present in the
+kernel version of this library. Those files are documented in the `sync-kernel.sh` script at the
+very bottom in the `--exclude` flag given to the `git am` command.
diff --git a/rust/pin-init/README.md b/rust/pin-init/README.md
new file mode 100644
index 000000000000..3d04796b212b
--- /dev/null
+++ b/rust/pin-init/README.md
@@ -0,0 +1,228 @@
+[![Crates.io](https://img.shields.io/crates/v/pin-init.svg)](https://crates.io/crates/pin-init)
+[![Documentation](https://docs.rs/pin-init/badge.svg)](https://docs.rs/pin-init/)
+[![Dependency status](https://deps.rs/repo/github/Rust-for-Linux/pin-init/status.svg)](https://deps.rs/repo/github/Rust-for-Linux/pin-init)
+![License](https://img.shields.io/crates/l/pin-init)
+[![Toolchain](https://img.shields.io/badge/toolchain-nightly-red)](#nightly-only)
+![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/Rust-for-Linux/pin-init/test.yml)
+# `pin-init`
+
+<!-- cargo-rdme start -->
+
+Library to safely and fallibly initialize pinned `struct`s using in-place constructors.
+
+[Pinning][pinning] is Rust's way of ensuring data does not move.
+
+It also allows in-place initialization of big `struct`s that would otherwise produce a stack
+overflow.
+
+This library's main use-case is in [Rust-for-Linux]. Although this version can be used
+standalone.
+
+There are cases when you want to in-place initialize a struct. For example when it is very big
+and moving it from the stack is not an option, because it is bigger than the stack itself.
+Another reason would be that you need the address of the object to initialize it. This stands
+in direct conflict with Rust's normal process of first initializing an object and then moving
+it into it's final memory location. For more information, see
+<https://rust-for-linux.com/the-safe-pinned-initialization-problem>.
+
+This library allows you to do in-place initialization safely.
+
+### Nightly Needed for `alloc` feature
+
+This library requires the [`allocator_api` unstable feature] when the `alloc` feature is
+enabled and thus this feature can only be used with a nightly compiler. When enabling the
+`alloc` feature, the user will be required to activate `allocator_api` as well.
+
+[`allocator_api` unstable feature]: https://doc.rust-lang.org/nightly/unstable-book/library-features/allocator-api.html
+
+The feature is enabled by default, thus by default `pin-init` will require a nightly compiler.
+However, using the crate on stable compilers is possible by disabling `alloc`. In practice this
+will require the `std` feature, because stable compilers have neither `Box` nor `Arc` in no-std
+mode.
+
+## Overview
+
+To initialize a `struct` with an in-place constructor you will need two things:
+- an in-place constructor,
+- a memory location that can hold your `struct` (this can be the [stack], an [`Arc<T>`],
+ [`Box<T>`] or any other smart pointer that supports this library).
+
+To get an in-place constructor there are generally three options:
+- directly creating an in-place constructor using the [`pin_init!`] macro,
+- a custom function/macro returning an in-place constructor provided by someone else,
+- using the unsafe function [`pin_init_from_closure()`] to manually create an initializer.
+
+Aside from pinned initialization, this library also supports in-place construction without
+pinning, the macros/types/functions are generally named like the pinned variants without the
+`pin_` prefix.
+
+## Examples
+
+Throughout the examples we will often make use of the `CMutex` type which can be found in
+`../examples/mutex.rs`. It is essentially a userland rebuild of the `struct mutex` type from
+the Linux kernel. It also uses a wait list and a basic spinlock. Importantly the wait list
+requires it to be pinned to be locked and thus is a prime candidate for using this library.
+
+### Using the [`pin_init!`] macro
+
+If you want to use [`PinInit`], then you will have to annotate your `struct` with
+`#[`[`pin_data`]`]`. It is a macro that uses `#[pin]` as a marker for
+[structurally pinned fields]. After doing this, you can then create an in-place constructor via
+[`pin_init!`]. The syntax is almost the same as normal `struct` initializers. The difference is
+that you need to write `<-` instead of `:` for fields that you want to initialize in-place.
+
+```rust
+use pin_init::{pin_data, pin_init, InPlaceInit};
+
+#[pin_data]
+struct Foo {
+ #[pin]
+ a: CMutex<usize>,
+ b: u32,
+}
+
+let foo = pin_init!(Foo {
+ a <- CMutex::new(42),
+ b: 24,
+});
+```
+
+`foo` now is of the type [`impl PinInit<Foo>`]. We can now use any smart pointer that we like
+(or just the stack) to actually initialize a `Foo`:
+
+```rust
+let foo: Result<Pin<Box<Foo>>, AllocError> = Box::pin_init(foo);
+```
+
+For more information see the [`pin_init!`] macro.
+
+### Using a custom function/macro that returns an initializer
+
+Many types that use this library supply a function/macro that returns an initializer, because
+the above method only works for types where you can access the fields.
+
+```rust
+let mtx: Result<Pin<Arc<CMutex<usize>>>, _> = Arc::pin_init(CMutex::new(42));
+```
+
+To declare an init macro/function you just return an [`impl PinInit<T, E>`]:
+
+```rust
+#[pin_data]
+struct DriverData {
+ #[pin]
+ status: CMutex<i32>,
+ buffer: Box<[u8; 1_000_000]>,
+}
+
+impl DriverData {
+ fn new() -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ status <- CMutex::new(0),
+ buffer: Box::init(pin_init::zeroed())?,
+ }? Error)
+ }
+}
+```
+
+### Manual creation of an initializer
+
+Often when working with primitives the previous approaches are not sufficient. That is where
+[`pin_init_from_closure()`] comes in. This `unsafe` function allows you to create a
+[`impl PinInit<T, E>`] directly from a closure. Of course you have to ensure that the closure
+actually does the initialization in the correct way. Here are the things to look out for
+(we are calling the parameter to the closure `slot`):
+- when the closure returns `Ok(())`, then it has completed the initialization successfully, so
+ `slot` now contains a valid bit pattern for the type `T`,
+- when the closure returns `Err(e)`, then the caller may deallocate the memory at `slot`, so
+ you need to take care to clean up anything if your initialization fails mid-way,
+- you may assume that `slot` will stay pinned even after the closure returns until `drop` of
+ `slot` gets called.
+
+```rust
+use pin_init::{pin_data, pinned_drop, PinInit, PinnedDrop, pin_init_from_closure};
+use core::{
+ ptr::addr_of_mut,
+ marker::PhantomPinned,
+ cell::UnsafeCell,
+ pin::Pin,
+ mem::MaybeUninit,
+};
+mod bindings {
+ #[repr(C)]
+ pub struct foo {
+ /* fields from C ... */
+ }
+ extern "C" {
+ pub fn init_foo(ptr: *mut foo);
+ pub fn destroy_foo(ptr: *mut foo);
+ #[must_use = "you must check the error return code"]
+ pub fn enable_foo(ptr: *mut foo, flags: u32) -> i32;
+ }
+}
+
+/// # Invariants
+///
+/// `foo` is always initialized
+#[pin_data(PinnedDrop)]
+pub struct RawFoo {
+ #[pin]
+ _p: PhantomPinned,
+ #[pin]
+ foo: UnsafeCell<MaybeUninit<bindings::foo>>,
+}
+
+impl RawFoo {
+ pub fn new(flags: u32) -> impl PinInit<Self, i32> {
+ // SAFETY:
+ // - when the closure returns `Ok(())`, then it has successfully initialized and
+ // enabled `foo`,
+ // - when it returns `Err(e)`, then it has cleaned up before
+ unsafe {
+ pin_init_from_closure(move |slot: *mut Self| {
+ // `slot` contains uninit memory, avoid creating a reference.
+ let foo = addr_of_mut!((*slot).foo);
+ let foo = UnsafeCell::raw_get(foo).cast::<bindings::foo>();
+
+ // Initialize the `foo`
+ bindings::init_foo(foo);
+
+ // Try to enable it.
+ let err = bindings::enable_foo(foo, flags);
+ if err != 0 {
+ // Enabling has failed, first clean up the foo and then return the error.
+ bindings::destroy_foo(foo);
+ Err(err)
+ } else {
+ // All fields of `RawFoo` have been initialized, since `_p` is a ZST.
+ Ok(())
+ }
+ })
+ }
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for RawFoo {
+ fn drop(self: Pin<&mut Self>) {
+ // SAFETY: Since `foo` is initialized, destroying is safe.
+ unsafe { bindings::destroy_foo(self.foo.get().cast::<bindings::foo>()) };
+ }
+}
+```
+
+For more information on how to use [`pin_init_from_closure()`], take a look at the uses inside
+the `kernel` crate. The [`sync`] module is a good starting point.
+
+[`sync`]: https://rust.docs.kernel.org/kernel/sync/index.html
+[pinning]: https://doc.rust-lang.org/std/pin/index.html
+[structurally pinned fields]: https://doc.rust-lang.org/std/pin/index.html#pinning-is-structural-for-field
+[stack]: https://docs.rs/pin-init/latest/pin_init/macro.stack_pin_init.html
+[`Arc<T>`]: https://doc.rust-lang.org/stable/alloc/sync/struct.Arc.html
+[`Box<T>`]: https://doc.rust-lang.org/stable/alloc/boxed/struct.Box.html
+[`impl PinInit<Foo>`]: https://docs.rs/pin-init/latest/pin_init/trait.PinInit.html
+[`impl PinInit<T, E>`]: https://docs.rs/pin-init/latest/pin_init/trait.PinInit.html
+[`impl Init<T, E>`]: https://docs.rs/pin-init/latest/pin_init/trait.Init.html
+[Rust-for-Linux]: https://rust-for-linux.com/
+
+<!-- cargo-rdme end -->
diff --git a/rust/pin-init/examples/big_struct_in_place.rs b/rust/pin-init/examples/big_struct_in_place.rs
new file mode 100644
index 000000000000..30d44a334ffd
--- /dev/null
+++ b/rust/pin-init/examples/big_struct_in_place.rs
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use pin_init::*;
+
+// Struct with size over 1GiB
+#[derive(Debug)]
+pub struct BigStruct {
+ buf: [u8; 1024 * 1024 * 1024],
+ a: u64,
+ b: u64,
+ c: u64,
+ d: u64,
+ managed_buf: ManagedBuf,
+}
+
+#[derive(Debug)]
+pub struct ManagedBuf {
+ buf: [u8; 1024 * 1024],
+}
+
+impl ManagedBuf {
+ pub fn new() -> impl Init<Self> {
+ init!(ManagedBuf { buf <- zeroed() })
+ }
+}
+
+fn main() {
+ // we want to initialize the struct in-place, otherwise we would get a stackoverflow
+ let buf: Box<BigStruct> = Box::init(init!(BigStruct {
+ buf <- zeroed(),
+ a: 7,
+ b: 186,
+ c: 7789,
+ d: 34,
+ managed_buf <- ManagedBuf::new(),
+ }))
+ .unwrap();
+ println!("{}", core::mem::size_of_val(&*buf));
+}
diff --git a/rust/pin-init/examples/error.rs b/rust/pin-init/examples/error.rs
new file mode 100644
index 000000000000..e0cc258746ce
--- /dev/null
+++ b/rust/pin-init/examples/error.rs
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+#![cfg_attr(feature = "alloc", feature(allocator_api))]
+
+use core::convert::Infallible;
+
+#[cfg(feature = "alloc")]
+use std::alloc::AllocError;
+
+#[derive(Debug)]
+pub struct Error;
+
+impl From<Infallible> for Error {
+ fn from(e: Infallible) -> Self {
+ match e {}
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl From<AllocError> for Error {
+ fn from(_: AllocError) -> Self {
+ Self
+ }
+}
+
+#[allow(dead_code)]
+fn main() {}
diff --git a/rust/pin-init/examples/linked_list.rs b/rust/pin-init/examples/linked_list.rs
new file mode 100644
index 000000000000..6d7eb0a0ec0d
--- /dev/null
+++ b/rust/pin-init/examples/linked_list.rs
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+#![allow(clippy::undocumented_unsafe_blocks)]
+#![cfg_attr(feature = "alloc", feature(allocator_api))]
+
+use core::{
+ cell::Cell,
+ convert::Infallible,
+ marker::PhantomPinned,
+ pin::Pin,
+ ptr::{self, NonNull},
+};
+
+use pin_init::*;
+
+#[expect(unused_attributes)]
+mod error;
+use error::Error;
+
+#[pin_data(PinnedDrop)]
+#[repr(C)]
+#[derive(Debug)]
+pub struct ListHead {
+ next: Link,
+ prev: Link,
+ #[pin]
+ pin: PhantomPinned,
+}
+
+impl ListHead {
+ #[inline]
+ pub fn new() -> impl PinInit<Self, Infallible> {
+ try_pin_init!(&this in Self {
+ next: unsafe { Link::new_unchecked(this) },
+ prev: unsafe { Link::new_unchecked(this) },
+ pin: PhantomPinned,
+ }? Infallible)
+ }
+
+ #[inline]
+ pub fn insert_next(list: &ListHead) -> impl PinInit<Self, Infallible> + '_ {
+ try_pin_init!(&this in Self {
+ prev: list.next.prev().replace(unsafe { Link::new_unchecked(this)}),
+ next: list.next.replace(unsafe { Link::new_unchecked(this)}),
+ pin: PhantomPinned,
+ }? Infallible)
+ }
+
+ #[inline]
+ pub fn insert_prev(list: &ListHead) -> impl PinInit<Self, Infallible> + '_ {
+ try_pin_init!(&this in Self {
+ next: list.prev.next().replace(unsafe { Link::new_unchecked(this)}),
+ prev: list.prev.replace(unsafe { Link::new_unchecked(this)}),
+ pin: PhantomPinned,
+ }? Infallible)
+ }
+
+ #[inline]
+ pub fn next(&self) -> Option<NonNull<Self>> {
+ if ptr::eq(self.next.as_ptr(), self) {
+ None
+ } else {
+ Some(unsafe { NonNull::new_unchecked(self.next.as_ptr() as *mut Self) })
+ }
+ }
+
+ #[allow(dead_code)]
+ pub fn size(&self) -> usize {
+ let mut size = 1;
+ let mut cur = self.next.clone();
+ while !ptr::eq(self, cur.cur()) {
+ cur = cur.next().clone();
+ size += 1;
+ }
+ size
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for ListHead {
+ //#[inline]
+ fn drop(self: Pin<&mut Self>) {
+ if !ptr::eq(self.next.as_ptr(), &*self) {
+ let next = unsafe { &*self.next.as_ptr() };
+ let prev = unsafe { &*self.prev.as_ptr() };
+ next.prev.set(&self.prev);
+ prev.next.set(&self.next);
+ }
+ }
+}
+
+#[repr(transparent)]
+#[derive(Clone, Debug)]
+struct Link(Cell<NonNull<ListHead>>);
+
+impl Link {
+ /// # Safety
+ ///
+ /// The contents of the pointer should form a consistent circular
+ /// linked list; for example, a "next" link should be pointed back
+ /// by the target `ListHead`'s "prev" link and a "prev" link should be
+ /// pointed back by the target `ListHead`'s "next" link.
+ #[inline]
+ unsafe fn new_unchecked(ptr: NonNull<ListHead>) -> Self {
+ Self(Cell::new(ptr))
+ }
+
+ #[inline]
+ fn next(&self) -> &Link {
+ unsafe { &(*self.0.get().as_ptr()).next }
+ }
+
+ #[inline]
+ fn prev(&self) -> &Link {
+ unsafe { &(*self.0.get().as_ptr()).prev }
+ }
+
+ #[allow(dead_code)]
+ fn cur(&self) -> &ListHead {
+ unsafe { &*self.0.get().as_ptr() }
+ }
+
+ #[inline]
+ fn replace(&self, other: Link) -> Link {
+ unsafe { Link::new_unchecked(self.0.replace(other.0.get())) }
+ }
+
+ #[inline]
+ fn as_ptr(&self) -> *const ListHead {
+ self.0.get().as_ptr()
+ }
+
+ #[inline]
+ fn set(&self, val: &Link) {
+ self.0.set(val.0.get());
+ }
+}
+
+#[allow(dead_code)]
+#[cfg_attr(test, test)]
+fn main() -> Result<(), Error> {
+ let a = Box::pin_init(ListHead::new())?;
+ stack_pin_init!(let b = ListHead::insert_next(&a));
+ stack_pin_init!(let c = ListHead::insert_next(&a));
+ stack_pin_init!(let d = ListHead::insert_next(&b));
+ let e = Box::pin_init(ListHead::insert_next(&b))?;
+ println!("a ({a:p}): {a:?}");
+ println!("b ({b:p}): {b:?}");
+ println!("c ({c:p}): {c:?}");
+ println!("d ({d:p}): {d:?}");
+ println!("e ({e:p}): {e:?}");
+ let mut inspect = &*a;
+ while let Some(next) = inspect.next() {
+ println!("({inspect:p}): {inspect:?}");
+ inspect = unsafe { &*next.as_ptr() };
+ if core::ptr::eq(inspect, &*a) {
+ break;
+ }
+ }
+ Ok(())
+}
diff --git a/rust/pin-init/examples/mutex.rs b/rust/pin-init/examples/mutex.rs
new file mode 100644
index 000000000000..073bb79341d1
--- /dev/null
+++ b/rust/pin-init/examples/mutex.rs
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+#![allow(clippy::undocumented_unsafe_blocks)]
+#![cfg_attr(feature = "alloc", feature(allocator_api))]
+#![allow(clippy::missing_safety_doc)]
+
+use core::{
+ cell::{Cell, UnsafeCell},
+ marker::PhantomPinned,
+ ops::{Deref, DerefMut},
+ pin::Pin,
+ sync::atomic::{AtomicBool, Ordering},
+};
+use std::{
+ sync::Arc,
+ thread::{self, park, sleep, Builder, Thread},
+ time::Duration,
+};
+
+use pin_init::*;
+#[expect(unused_attributes)]
+#[path = "./linked_list.rs"]
+pub mod linked_list;
+use linked_list::*;
+
+pub struct SpinLock {
+ inner: AtomicBool,
+}
+
+impl SpinLock {
+ #[inline]
+ pub fn acquire(&self) -> SpinLockGuard<'_> {
+ while self
+ .inner
+ .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_err()
+ {
+ while self.inner.load(Ordering::Relaxed) {
+ thread::yield_now();
+ }
+ }
+ SpinLockGuard(self)
+ }
+
+ #[inline]
+ #[allow(clippy::new_without_default)]
+ pub const fn new() -> Self {
+ Self {
+ inner: AtomicBool::new(false),
+ }
+ }
+}
+
+pub struct SpinLockGuard<'a>(&'a SpinLock);
+
+impl Drop for SpinLockGuard<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ self.0.inner.store(false, Ordering::Release);
+ }
+}
+
+#[pin_data]
+pub struct CMutex<T> {
+ #[pin]
+ wait_list: ListHead,
+ spin_lock: SpinLock,
+ locked: Cell<bool>,
+ #[pin]
+ data: UnsafeCell<T>,
+}
+
+impl<T> CMutex<T> {
+ #[inline]
+ pub fn new(val: impl PinInit<T>) -> impl PinInit<Self> {
+ pin_init!(CMutex {
+ wait_list <- ListHead::new(),
+ spin_lock: SpinLock::new(),
+ locked: Cell::new(false),
+ data <- unsafe {
+ pin_init_from_closure(|slot: *mut UnsafeCell<T>| {
+ val.__pinned_init(slot.cast::<T>())
+ })
+ },
+ })
+ }
+
+ #[inline]
+ pub fn lock(&self) -> Pin<CMutexGuard<'_, T>> {
+ let mut sguard = self.spin_lock.acquire();
+ if self.locked.get() {
+ stack_pin_init!(let wait_entry = WaitEntry::insert_new(&self.wait_list));
+ // println!("wait list length: {}", self.wait_list.size());
+ while self.locked.get() {
+ drop(sguard);
+ park();
+ sguard = self.spin_lock.acquire();
+ }
+ // This does have an effect, as the ListHead inside wait_entry implements Drop!
+ #[expect(clippy::drop_non_drop)]
+ drop(wait_entry);
+ }
+ self.locked.set(true);
+ unsafe {
+ Pin::new_unchecked(CMutexGuard {
+ mtx: self,
+ _pin: PhantomPinned,
+ })
+ }
+ }
+
+ #[allow(dead_code)]
+ pub fn get_data_mut(self: Pin<&mut Self>) -> &mut T {
+ // SAFETY: we have an exclusive reference and thus nobody has access to data.
+ unsafe { &mut *self.data.get() }
+ }
+}
+
+unsafe impl<T: Send> Send for CMutex<T> {}
+unsafe impl<T: Send> Sync for CMutex<T> {}
+
+pub struct CMutexGuard<'a, T> {
+ mtx: &'a CMutex<T>,
+ _pin: PhantomPinned,
+}
+
+impl<T> Drop for CMutexGuard<'_, T> {
+ #[inline]
+ fn drop(&mut self) {
+ let sguard = self.mtx.spin_lock.acquire();
+ self.mtx.locked.set(false);
+ if let Some(list_field) = self.mtx.wait_list.next() {
+ let wait_entry = list_field.as_ptr().cast::<WaitEntry>();
+ unsafe { (*wait_entry).thread.unpark() };
+ }
+ drop(sguard);
+ }
+}
+
+impl<T> Deref for CMutexGuard<'_, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*self.mtx.data.get() }
+ }
+}
+
+impl<T> DerefMut for CMutexGuard<'_, T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *self.mtx.data.get() }
+ }
+}
+
+#[pin_data]
+#[repr(C)]
+struct WaitEntry {
+ #[pin]
+ wait_list: ListHead,
+ thread: Thread,
+}
+
+impl WaitEntry {
+ #[inline]
+ fn insert_new(list: &ListHead) -> impl PinInit<Self> + '_ {
+ pin_init!(Self {
+ thread: thread::current(),
+ wait_list <- ListHead::insert_prev(list),
+ })
+ }
+}
+
+#[cfg(not(any(feature = "std", feature = "alloc")))]
+fn main() {}
+
+#[allow(dead_code)]
+#[cfg_attr(test, test)]
+#[cfg(any(feature = "std", feature = "alloc"))]
+fn main() {
+ let mtx: Pin<Arc<CMutex<usize>>> = Arc::pin_init(CMutex::new(0)).unwrap();
+ let mut handles = vec![];
+ let thread_count = 20;
+ let workload = if cfg!(miri) { 100 } else { 1_000 };
+ for i in 0..thread_count {
+ let mtx = mtx.clone();
+ handles.push(
+ Builder::new()
+ .name(format!("worker #{i}"))
+ .spawn(move || {
+ for _ in 0..workload {
+ *mtx.lock() += 1;
+ }
+ println!("{i} halfway");
+ sleep(Duration::from_millis((i as u64) * 10));
+ for _ in 0..workload {
+ *mtx.lock() += 1;
+ }
+ println!("{i} finished");
+ })
+ .expect("should not fail"),
+ );
+ }
+ for h in handles {
+ h.join().expect("thread panicked");
+ }
+ println!("{:?}", &*mtx.lock());
+ assert_eq!(*mtx.lock(), workload * thread_count * 2);
+}
diff --git a/rust/pin-init/examples/pthread_mutex.rs b/rust/pin-init/examples/pthread_mutex.rs
new file mode 100644
index 000000000000..5ac22f1880d2
--- /dev/null
+++ b/rust/pin-init/examples/pthread_mutex.rs
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+// inspired by <https://github.com/nbdd0121/pin-init/blob/trunk/examples/pthread_mutex.rs>
+#![allow(clippy::undocumented_unsafe_blocks)]
+#![cfg_attr(feature = "alloc", feature(allocator_api))]
+#[cfg(not(windows))]
+mod pthread_mtx {
+ #[cfg(feature = "alloc")]
+ use core::alloc::AllocError;
+ use core::{
+ cell::UnsafeCell,
+ marker::PhantomPinned,
+ mem::MaybeUninit,
+ ops::{Deref, DerefMut},
+ pin::Pin,
+ };
+ use pin_init::*;
+ use std::convert::Infallible;
+
+ #[pin_data(PinnedDrop)]
+ pub struct PThreadMutex<T> {
+ #[pin]
+ raw: UnsafeCell<libc::pthread_mutex_t>,
+ data: UnsafeCell<T>,
+ #[pin]
+ pin: PhantomPinned,
+ }
+
+ unsafe impl<T: Send> Send for PThreadMutex<T> {}
+ unsafe impl<T: Send> Sync for PThreadMutex<T> {}
+
+ #[pinned_drop]
+ impl<T> PinnedDrop for PThreadMutex<T> {
+ fn drop(self: Pin<&mut Self>) {
+ unsafe {
+ libc::pthread_mutex_destroy(self.raw.get());
+ }
+ }
+ }
+
+ #[derive(Debug)]
+ pub enum Error {
+ #[expect(dead_code)]
+ IO(std::io::Error),
+ Alloc,
+ }
+
+ impl From<Infallible> for Error {
+ fn from(e: Infallible) -> Self {
+ match e {}
+ }
+ }
+
+ #[cfg(feature = "alloc")]
+ impl From<AllocError> for Error {
+ fn from(_: AllocError) -> Self {
+ Self::Alloc
+ }
+ }
+
+ impl<T> PThreadMutex<T> {
+ pub fn new(data: T) -> impl PinInit<Self, Error> {
+ fn init_raw() -> impl PinInit<UnsafeCell<libc::pthread_mutex_t>, Error> {
+ let init = |slot: *mut UnsafeCell<libc::pthread_mutex_t>| {
+ // we can cast, because `UnsafeCell` has the same layout as T.
+ let slot: *mut libc::pthread_mutex_t = slot.cast();
+ let mut attr = MaybeUninit::uninit();
+ let attr = attr.as_mut_ptr();
+ // SAFETY: ptr is valid
+ let ret = unsafe { libc::pthread_mutexattr_init(attr) };
+ if ret != 0 {
+ return Err(Error::IO(std::io::Error::from_raw_os_error(ret)));
+ }
+ // SAFETY: attr is initialized
+ let ret = unsafe {
+ libc::pthread_mutexattr_settype(attr, libc::PTHREAD_MUTEX_NORMAL)
+ };
+ if ret != 0 {
+ // SAFETY: attr is initialized
+ unsafe { libc::pthread_mutexattr_destroy(attr) };
+ return Err(Error::IO(std::io::Error::from_raw_os_error(ret)));
+ }
+ // SAFETY: slot is valid
+ unsafe { slot.write(libc::PTHREAD_MUTEX_INITIALIZER) };
+ // SAFETY: attr and slot are valid ptrs and attr is initialized
+ let ret = unsafe { libc::pthread_mutex_init(slot, attr) };
+ // SAFETY: attr was initialized
+ unsafe { libc::pthread_mutexattr_destroy(attr) };
+ if ret != 0 {
+ return Err(Error::IO(std::io::Error::from_raw_os_error(ret)));
+ }
+ Ok(())
+ };
+ // SAFETY: mutex has been initialized
+ unsafe { pin_init_from_closure(init) }
+ }
+ try_pin_init!(Self {
+ data: UnsafeCell::new(data),
+ raw <- init_raw(),
+ pin: PhantomPinned,
+ }? Error)
+ }
+
+ pub fn lock(&self) -> PThreadMutexGuard<'_, T> {
+ // SAFETY: raw is always initialized
+ unsafe { libc::pthread_mutex_lock(self.raw.get()) };
+ PThreadMutexGuard { mtx: self }
+ }
+ }
+
+ pub struct PThreadMutexGuard<'a, T> {
+ mtx: &'a PThreadMutex<T>,
+ }
+
+ impl<T> Drop for PThreadMutexGuard<'_, T> {
+ fn drop(&mut self) {
+ // SAFETY: raw is always initialized
+ unsafe { libc::pthread_mutex_unlock(self.mtx.raw.get()) };
+ }
+ }
+
+ impl<T> Deref for PThreadMutexGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*self.mtx.data.get() }
+ }
+ }
+
+ impl<T> DerefMut for PThreadMutexGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *self.mtx.data.get() }
+ }
+ }
+}
+
+#[cfg_attr(test, test)]
+fn main() {
+ #[cfg(all(any(feature = "std", feature = "alloc"), not(windows)))]
+ {
+ use core::pin::Pin;
+ use pin_init::*;
+ use pthread_mtx::*;
+ use std::{
+ sync::Arc,
+ thread::{sleep, Builder},
+ time::Duration,
+ };
+ let mtx: Pin<Arc<PThreadMutex<usize>>> = Arc::try_pin_init(PThreadMutex::new(0)).unwrap();
+ let mut handles = vec![];
+ let thread_count = 20;
+ let workload = 1_000_000;
+ for i in 0..thread_count {
+ let mtx = mtx.clone();
+ handles.push(
+ Builder::new()
+ .name(format!("worker #{i}"))
+ .spawn(move || {
+ for _ in 0..workload {
+ *mtx.lock() += 1;
+ }
+ println!("{i} halfway");
+ sleep(Duration::from_millis((i as u64) * 10));
+ for _ in 0..workload {
+ *mtx.lock() += 1;
+ }
+ println!("{i} finished");
+ })
+ .expect("should not fail"),
+ );
+ }
+ for h in handles {
+ h.join().expect("thread panicked");
+ }
+ println!("{:?}", &*mtx.lock());
+ assert_eq!(*mtx.lock(), workload * thread_count * 2);
+ }
+}
diff --git a/rust/pin-init/examples/static_init.rs b/rust/pin-init/examples/static_init.rs
new file mode 100644
index 000000000000..3487d761aa26
--- /dev/null
+++ b/rust/pin-init/examples/static_init.rs
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+#![allow(clippy::undocumented_unsafe_blocks)]
+#![cfg_attr(feature = "alloc", feature(allocator_api))]
+
+use core::{
+ cell::{Cell, UnsafeCell},
+ mem::MaybeUninit,
+ ops,
+ pin::Pin,
+ time::Duration,
+};
+use pin_init::*;
+use std::{
+ sync::Arc,
+ thread::{sleep, Builder},
+};
+
+#[expect(unused_attributes)]
+mod mutex;
+use mutex::*;
+
+pub struct StaticInit<T, I> {
+ cell: UnsafeCell<MaybeUninit<T>>,
+ init: Cell<Option<I>>,
+ lock: SpinLock,
+ present: Cell<bool>,
+}
+
+unsafe impl<T: Sync, I> Sync for StaticInit<T, I> {}
+unsafe impl<T: Send, I> Send for StaticInit<T, I> {}
+
+impl<T, I: PinInit<T>> StaticInit<T, I> {
+ pub const fn new(init: I) -> Self {
+ Self {
+ cell: UnsafeCell::new(MaybeUninit::uninit()),
+ init: Cell::new(Some(init)),
+ lock: SpinLock::new(),
+ present: Cell::new(false),
+ }
+ }
+}
+
+impl<T, I: PinInit<T>> ops::Deref for StaticInit<T, I> {
+ type Target = T;
+ fn deref(&self) -> &Self::Target {
+ if self.present.get() {
+ unsafe { (*self.cell.get()).assume_init_ref() }
+ } else {
+ println!("acquire spinlock on static init");
+ let _guard = self.lock.acquire();
+ println!("rechecking present...");
+ std::thread::sleep(std::time::Duration::from_millis(200));
+ if self.present.get() {
+ return unsafe { (*self.cell.get()).assume_init_ref() };
+ }
+ println!("doing init");
+ let ptr = self.cell.get().cast::<T>();
+ match self.init.take() {
+ Some(f) => unsafe { f.__pinned_init(ptr).unwrap() },
+ None => unsafe { core::hint::unreachable_unchecked() },
+ }
+ self.present.set(true);
+ unsafe { (*self.cell.get()).assume_init_ref() }
+ }
+ }
+}
+
+pub struct CountInit;
+
+unsafe impl PinInit<CMutex<usize>> for CountInit {
+ unsafe fn __pinned_init(
+ self,
+ slot: *mut CMutex<usize>,
+ ) -> Result<(), core::convert::Infallible> {
+ let init = CMutex::new(0);
+ std::thread::sleep(std::time::Duration::from_millis(1000));
+ unsafe { init.__pinned_init(slot) }
+ }
+}
+
+pub static COUNT: StaticInit<CMutex<usize>, CountInit> = StaticInit::new(CountInit);
+
+#[cfg(not(any(feature = "std", feature = "alloc")))]
+fn main() {}
+
+#[cfg(any(feature = "std", feature = "alloc"))]
+fn main() {
+ let mtx: Pin<Arc<CMutex<usize>>> = Arc::pin_init(CMutex::new(0)).unwrap();
+ let mut handles = vec![];
+ let thread_count = 20;
+ let workload = 1_000;
+ for i in 0..thread_count {
+ let mtx = mtx.clone();
+ handles.push(
+ Builder::new()
+ .name(format!("worker #{i}"))
+ .spawn(move || {
+ for _ in 0..workload {
+ *COUNT.lock() += 1;
+ std::thread::sleep(std::time::Duration::from_millis(10));
+ *mtx.lock() += 1;
+ std::thread::sleep(std::time::Duration::from_millis(10));
+ *COUNT.lock() += 1;
+ }
+ println!("{i} halfway");
+ sleep(Duration::from_millis((i as u64) * 10));
+ for _ in 0..workload {
+ std::thread::sleep(std::time::Duration::from_millis(10));
+ *mtx.lock() += 1;
+ }
+ println!("{i} finished");
+ })
+ .expect("should not fail"),
+ );
+ }
+ for h in handles {
+ h.join().expect("thread panicked");
+ }
+ println!("{:?}, {:?}", &*mtx.lock(), &*COUNT.lock());
+ assert_eq!(*mtx.lock(), workload * thread_count * 2);
+}
diff --git a/rust/pin-init/internal/src/helpers.rs b/rust/pin-init/internal/src/helpers.rs
new file mode 100644
index 000000000000..236f989a50f2
--- /dev/null
+++ b/rust/pin-init/internal/src/helpers.rs
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+#[cfg(not(kernel))]
+use proc_macro2 as proc_macro;
+
+use proc_macro::{TokenStream, TokenTree};
+
+/// Parsed generics.
+///
+/// See the field documentation for an explanation what each of the fields represents.
+///
+/// # Examples
+///
+/// ```rust,ignore
+/// # let input = todo!();
+/// let (Generics { decl_generics, impl_generics, ty_generics }, rest) = parse_generics(input);
+/// quote! {
+/// struct Foo<$($decl_generics)*> {
+/// // ...
+/// }
+///
+/// impl<$impl_generics> Foo<$ty_generics> {
+/// fn foo() {
+/// // ...
+/// }
+/// }
+/// }
+/// ```
+pub(crate) struct Generics {
+ /// The generics with bounds and default values (e.g. `T: Clone, const N: usize = 0`).
+ ///
+ /// Use this on type definitions e.g. `struct Foo<$decl_generics> ...` (or `union`/`enum`).
+ pub(crate) decl_generics: Vec<TokenTree>,
+ /// The generics with bounds (e.g. `T: Clone, const N: usize`).
+ ///
+ /// Use this on `impl` blocks e.g. `impl<$impl_generics> Trait for ...`.
+ pub(crate) impl_generics: Vec<TokenTree>,
+ /// The generics without bounds and without default values (e.g. `T, N`).
+ ///
+ /// Use this when you use the type that is declared with these generics e.g.
+ /// `Foo<$ty_generics>`.
+ pub(crate) ty_generics: Vec<TokenTree>,
+}
+
+/// Parses the given `TokenStream` into `Generics` and the rest.
+///
+/// The generics are not present in the rest, but a where clause might remain.
+pub(crate) fn parse_generics(input: TokenStream) -> (Generics, Vec<TokenTree>) {
+ // The generics with bounds and default values.
+ let mut decl_generics = vec![];
+ // `impl_generics`, the declared generics with their bounds.
+ let mut impl_generics = vec![];
+ // Only the names of the generics, without any bounds.
+ let mut ty_generics = vec![];
+ // Tokens not related to the generics e.g. the `where` token and definition.
+ let mut rest = vec![];
+ // The current level of `<`.
+ let mut nesting = 0;
+ let mut toks = input.into_iter();
+ // If we are at the beginning of a generic parameter.
+ let mut at_start = true;
+ let mut skip_until_comma = false;
+ while let Some(tt) = toks.next() {
+ if nesting == 1 && matches!(&tt, TokenTree::Punct(p) if p.as_char() == '>') {
+ // Found the end of the generics.
+ break;
+ } else if nesting >= 1 {
+ decl_generics.push(tt.clone());
+ }
+ match tt.clone() {
+ TokenTree::Punct(p) if p.as_char() == '<' => {
+ if nesting >= 1 && !skip_until_comma {
+ // This is inside of the generics and part of some bound.
+ impl_generics.push(tt);
+ }
+ nesting += 1;
+ }
+ TokenTree::Punct(p) if p.as_char() == '>' => {
+ // This is a parsing error, so we just end it here.
+ if nesting == 0 {
+ break;
+ } else {
+ nesting -= 1;
+ if nesting >= 1 && !skip_until_comma {
+ // We are still inside of the generics and part of some bound.
+ impl_generics.push(tt);
+ }
+ }
+ }
+ TokenTree::Punct(p) if skip_until_comma && p.as_char() == ',' => {
+ if nesting == 1 {
+ impl_generics.push(tt.clone());
+ impl_generics.push(tt);
+ skip_until_comma = false;
+ }
+ }
+ _ if !skip_until_comma => {
+ match nesting {
+ // If we haven't entered the generics yet, we still want to keep these tokens.
+ 0 => rest.push(tt),
+ 1 => {
+ // Here depending on the token, it might be a generic variable name.
+ match tt.clone() {
+ TokenTree::Ident(i) if at_start && i.to_string() == "const" => {
+ let Some(name) = toks.next() else {
+ // Parsing error.
+ break;
+ };
+ impl_generics.push(tt);
+ impl_generics.push(name.clone());
+ ty_generics.push(name.clone());
+ decl_generics.push(name);
+ at_start = false;
+ }
+ TokenTree::Ident(_) if at_start => {
+ impl_generics.push(tt.clone());
+ ty_generics.push(tt);
+ at_start = false;
+ }
+ TokenTree::Punct(p) if p.as_char() == ',' => {
+ impl_generics.push(tt.clone());
+ ty_generics.push(tt);
+ at_start = true;
+ }
+ // Lifetimes begin with `'`.
+ TokenTree::Punct(p) if p.as_char() == '\'' && at_start => {
+ impl_generics.push(tt.clone());
+ ty_generics.push(tt);
+ }
+ // Generics can have default values, we skip these.
+ TokenTree::Punct(p) if p.as_char() == '=' => {
+ skip_until_comma = true;
+ }
+ _ => impl_generics.push(tt),
+ }
+ }
+ _ => impl_generics.push(tt),
+ }
+ }
+ _ => {}
+ }
+ }
+ rest.extend(toks);
+ (
+ Generics {
+ impl_generics,
+ decl_generics,
+ ty_generics,
+ },
+ rest,
+ )
+}
diff --git a/rust/pin-init/internal/src/lib.rs b/rust/pin-init/internal/src/lib.rs
new file mode 100644
index 000000000000..babe5e878550
--- /dev/null
+++ b/rust/pin-init/internal/src/lib.rs
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+// When fixdep scans this, it will find this string `CONFIG_RUSTC_VERSION_TEXT`
+// and thus add a dependency on `include/config/RUSTC_VERSION_TEXT`, which is
+// touched by Kconfig when the version string from the compiler changes.
+
+//! `pin-init` proc macros.
+
+#![cfg_attr(not(RUSTC_LINT_REASONS_IS_STABLE), feature(lint_reasons))]
+// Allow `.into()` to convert
+// - `proc_macro2::TokenStream` into `proc_macro::TokenStream` in the user-space version.
+// - `proc_macro::TokenStream` into `proc_macro::TokenStream` in the kernel version.
+// Clippy warns on this conversion, but it's required by the user-space version.
+//
+// Remove once we have `proc_macro2` in the kernel.
+#![allow(clippy::useless_conversion)]
+// Documentation is done in the pin-init crate instead.
+#![allow(missing_docs)]
+
+use proc_macro::TokenStream;
+
+#[cfg(kernel)]
+#[path = "../../../macros/quote.rs"]
+#[macro_use]
+mod quote;
+#[cfg(not(kernel))]
+#[macro_use]
+extern crate quote;
+
+mod helpers;
+mod pin_data;
+mod pinned_drop;
+mod zeroable;
+
+#[proc_macro_attribute]
+pub fn pin_data(inner: TokenStream, item: TokenStream) -> TokenStream {
+ pin_data::pin_data(inner.into(), item.into()).into()
+}
+
+#[proc_macro_attribute]
+pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream {
+ pinned_drop::pinned_drop(args.into(), input.into()).into()
+}
+
+#[proc_macro_derive(Zeroable)]
+pub fn derive_zeroable(input: TokenStream) -> TokenStream {
+ zeroable::derive(input.into()).into()
+}
diff --git a/rust/macros/pin_data.rs b/rust/pin-init/internal/src/pin_data.rs
index 6d58cfda9872..87d4a7eb1d35 100644
--- a/rust/macros/pin_data.rs
+++ b/rust/pin-init/internal/src/pin_data.rs
@@ -1,15 +1,19 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
+#[cfg(not(kernel))]
+use proc_macro2 as proc_macro;
+
use crate::helpers::{parse_generics, Generics};
use proc_macro::{Group, Punct, Spacing, TokenStream, TokenTree};
pub(crate) fn pin_data(args: TokenStream, input: TokenStream) -> TokenStream {
// This proc-macro only does some pre-parsing and then delegates the actual parsing to
- // `kernel::__pin_data!`.
+ // `pin_init::__pin_data!`.
let (
Generics {
impl_generics,
+ decl_generics,
ty_generics,
},
rest,
@@ -70,12 +74,13 @@ pub(crate) fn pin_data(args: TokenStream, input: TokenStream) -> TokenStream {
.collect::<Vec<_>>();
// This should be the body of the struct `{...}`.
let last = rest.pop();
- let mut quoted = quote!(::kernel::__pin_data! {
+ let mut quoted = quote!(::pin_init::__pin_data! {
parse_input:
@args(#args),
@sig(#(#rest)*),
@impl_generics(#(#impl_generics)*),
@ty_generics(#(#ty_generics)*),
+ @decl_generics(#(#decl_generics)*),
@body(#last),
});
quoted.extend(errs);
diff --git a/rust/macros/pinned_drop.rs b/rust/pin-init/internal/src/pinned_drop.rs
index 88fb72b20660..c4ca7a70b726 100644
--- a/rust/macros/pinned_drop.rs
+++ b/rust/pin-init/internal/src/pinned_drop.rs
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
+#[cfg(not(kernel))]
+use proc_macro2 as proc_macro;
+
use proc_macro::{TokenStream, TokenTree};
pub(crate) fn pinned_drop(_args: TokenStream, input: TokenStream) -> TokenStream {
@@ -25,8 +28,7 @@ pub(crate) fn pinned_drop(_args: TokenStream, input: TokenStream) -> TokenStream
// Found the end of the generics, this should be `PinnedDrop`.
assert!(
matches!(tt, TokenTree::Ident(i) if i.to_string() == "PinnedDrop"),
- "expected 'PinnedDrop', found: '{:?}'",
- tt
+ "expected 'PinnedDrop', found: '{tt:?}'"
);
pinned_drop_idx = Some(i);
break;
@@ -35,11 +37,11 @@ pub(crate) fn pinned_drop(_args: TokenStream, input: TokenStream) -> TokenStream
let idx = pinned_drop_idx
.unwrap_or_else(|| panic!("Expected an `impl` block implementing `PinnedDrop`."));
// Fully qualify the `PinnedDrop`, as to avoid any tampering.
- toks.splice(idx..idx, quote!(::kernel::init::));
+ toks.splice(idx..idx, quote!(::pin_init::));
// Take the `{}` body and call the declarative macro.
if let Some(TokenTree::Group(last)) = toks.pop() {
let last = last.stream();
- quote!(::kernel::__pinned_drop! {
+ quote!(::pin_init::__pinned_drop! {
@impl_sig(#(#toks)*),
@impl_body(#last),
})
diff --git a/rust/macros/zeroable.rs b/rust/pin-init/internal/src/zeroable.rs
index 0d605c46ab3b..acc94008c152 100644
--- a/rust/macros/zeroable.rs
+++ b/rust/pin-init/internal/src/zeroable.rs
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
+#[cfg(not(kernel))]
+use proc_macro2 as proc_macro;
+
use crate::helpers::{parse_generics, Generics};
use proc_macro::{TokenStream, TokenTree};
@@ -7,6 +10,7 @@ pub(crate) fn derive(input: TokenStream) -> TokenStream {
let (
Generics {
impl_generics,
+ decl_generics: _,
ty_generics,
},
mut rest,
@@ -26,7 +30,7 @@ pub(crate) fn derive(input: TokenStream) -> TokenStream {
// If we find a `,`, then we have finished a generic/constant/lifetime parameter.
TokenTree::Punct(p) if nested == 0 && p.as_char() == ',' => {
if in_generic && !inserted {
- new_impl_generics.extend(quote! { : ::kernel::init::Zeroable });
+ new_impl_generics.extend(quote! { : ::pin_init::Zeroable });
}
in_generic = true;
inserted = false;
@@ -40,7 +44,7 @@ pub(crate) fn derive(input: TokenStream) -> TokenStream {
TokenTree::Punct(p) if nested == 0 && p.as_char() == ':' => {
new_impl_generics.push(tt);
if in_generic {
- new_impl_generics.extend(quote! { ::kernel::init::Zeroable + });
+ new_impl_generics.extend(quote! { ::pin_init::Zeroable + });
inserted = true;
}
}
@@ -58,10 +62,10 @@ pub(crate) fn derive(input: TokenStream) -> TokenStream {
}
assert_eq!(nested, 0);
if in_generic && !inserted {
- new_impl_generics.extend(quote! { : ::kernel::init::Zeroable });
+ new_impl_generics.extend(quote! { : ::pin_init::Zeroable });
}
quote! {
- ::kernel::__derive_zeroable!(
+ ::pin_init::__derive_zeroable!(
parse_input:
@sig(#(#rest)*),
@impl_generics(#(#new_impl_generics)*),
diff --git a/rust/kernel/init/__internal.rs b/rust/pin-init/src/__internal.rs
index db3372619ecd..557b5948cddc 100644
--- a/rust/kernel/init/__internal.rs
+++ b/rust/pin-init/src/__internal.rs
@@ -1,23 +1,25 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
-//! This module contains API-internal items for pin-init.
+//! This module contains library internal items.
//!
-//! These items must not be used outside of
-//! - `kernel/init.rs`
-//! - `macros/pin_data.rs`
-//! - `macros/pinned_drop.rs`
+//! These items must not be used outside of this crate and the pin-init-internal crate located at
+//! `../internal`.
use super::*;
/// See the [nomicon] for what subtyping is. See also [this table].
///
+/// The reason for not using `PhantomData<*mut T>` is that that type never implements [`Send`] and
+/// [`Sync`]. Hence `fn(*mut T) -> *mut T` is used, as that type always implements them.
+///
/// [nomicon]: https://doc.rust-lang.org/nomicon/subtyping.html
/// [this table]: https://doc.rust-lang.org/nomicon/phantom-data.html#table-of-phantomdata-patterns
-pub(super) type Invariant<T> = PhantomData<fn(*mut T) -> *mut T>;
+pub(crate) type Invariant<T> = PhantomData<fn(*mut T) -> *mut T>;
-/// This is the module-internal type implementing `PinInit` and `Init`. It is unsafe to create this
-/// type, since the closure needs to fulfill the same safety requirement as the
-/// `__pinned_init`/`__init` functions.
+/// Module-internal type implementing `PinInit` and `Init`.
+///
+/// It is unsafe to create this type, since the closure needs to fulfill the same safety
+/// requirement as the `__pinned_init`/`__init` functions.
pub(crate) struct InitClosure<F, T: ?Sized, E>(pub(crate) F, pub(crate) Invariant<(E, T)>);
// SAFETY: While constructing the `InitClosure`, the user promised that it upholds the
@@ -53,6 +55,7 @@ where
pub unsafe trait HasPinData {
type PinData: PinData;
+ #[expect(clippy::missing_safety_doc)]
unsafe fn __pin_data() -> Self::PinData;
}
@@ -82,6 +85,7 @@ pub unsafe trait PinData: Copy {
pub unsafe trait HasInitData {
type InitData: InitData;
+ #[expect(clippy::missing_safety_doc)]
unsafe fn __init_data() -> Self::InitData;
}
@@ -102,7 +106,7 @@ pub unsafe trait InitData: Copy {
}
}
-pub struct AllData<T: ?Sized>(PhantomData<fn(Box<T>) -> Box<T>>);
+pub struct AllData<T: ?Sized>(Invariant<T>);
impl<T: ?Sized> Clone for AllData<T> {
fn clone(&self) -> Self {
@@ -112,10 +116,12 @@ impl<T: ?Sized> Clone for AllData<T> {
impl<T: ?Sized> Copy for AllData<T> {}
+// SAFETY: TODO.
unsafe impl<T: ?Sized> InitData for AllData<T> {
type Datee = T;
}
+// SAFETY: TODO.
unsafe impl<T: ?Sized> HasInitData for T {
type InitData = AllData<T>;
@@ -130,7 +136,7 @@ unsafe impl<T: ?Sized> HasInitData for T {
///
/// If `self.is_init` is true, then `self.value` is initialized.
///
-/// [`stack_pin_init`]: kernel::stack_pin_init
+/// [`stack_pin_init`]: crate::stack_pin_init
pub struct StackInit<T> {
value: MaybeUninit<T>,
is_init: bool,
@@ -151,7 +157,7 @@ impl<T> StackInit<T> {
/// Creates a new [`StackInit<T>`] that is uninitialized. Use [`stack_pin_init`] instead of this
/// primitive.
///
- /// [`stack_pin_init`]: kernel::stack_pin_init
+ /// [`stack_pin_init`]: crate::stack_pin_init
#[inline]
pub fn uninit() -> Self {
Self {
@@ -181,6 +187,33 @@ impl<T> StackInit<T> {
}
}
+#[test]
+fn stack_init_reuse() {
+ use ::std::{borrow::ToOwned, println, string::String};
+ use core::pin::pin;
+
+ #[derive(Debug)]
+ struct Foo {
+ a: usize,
+ b: String,
+ }
+ let mut slot: Pin<&mut StackInit<Foo>> = pin!(StackInit::uninit());
+ let value: Result<Pin<&mut Foo>, core::convert::Infallible> =
+ slot.as_mut().init(crate::init!(Foo {
+ a: 42,
+ b: "Hello".to_owned(),
+ }));
+ let value = value.unwrap();
+ println!("{value:?}");
+ let value: Result<Pin<&mut Foo>, core::convert::Infallible> =
+ slot.as_mut().init(crate::init!(Foo {
+ a: 24,
+ b: "world!".to_owned(),
+ }));
+ let value = value.unwrap();
+ println!("{value:?}");
+}
+
/// When a value of this type is dropped, it drops a `T`.
///
/// Can be forgotten to prevent the drop.
@@ -228,3 +261,32 @@ impl OnlyCallFromDrop {
Self(())
}
}
+
+/// Initializer that always fails.
+///
+/// Used by [`assert_pinned!`].
+///
+/// [`assert_pinned!`]: crate::assert_pinned
+pub struct AlwaysFail<T: ?Sized> {
+ _t: PhantomData<T>,
+}
+
+impl<T: ?Sized> AlwaysFail<T> {
+ /// Creates a new initializer that always fails.
+ pub fn new() -> Self {
+ Self { _t: PhantomData }
+ }
+}
+
+impl<T: ?Sized> Default for AlwaysFail<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+// SAFETY: `__pinned_init` always fails, which is always okay.
+unsafe impl<T: ?Sized> PinInit<T, ()> for AlwaysFail<T> {
+ unsafe fn __pinned_init(self, _slot: *mut T) -> Result<(), ()> {
+ Err(())
+ }
+}
diff --git a/rust/pin-init/src/alloc.rs b/rust/pin-init/src/alloc.rs
new file mode 100644
index 000000000000..5017f57442d8
--- /dev/null
+++ b/rust/pin-init/src/alloc.rs
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+#[cfg(all(feature = "alloc", not(feature = "std")))]
+use alloc::{boxed::Box, sync::Arc};
+#[cfg(feature = "alloc")]
+use core::alloc::AllocError;
+use core::{mem::MaybeUninit, pin::Pin};
+#[cfg(feature = "std")]
+use std::sync::Arc;
+
+#[cfg(not(feature = "alloc"))]
+type AllocError = core::convert::Infallible;
+
+use crate::{
+ init_from_closure, pin_init_from_closure, InPlaceWrite, Init, PinInit, ZeroableOption,
+};
+
+pub extern crate alloc;
+
+// SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee:
+// <https://doc.rust-lang.org/stable/std/option/index.html#representation>).
+unsafe impl<T> ZeroableOption for Box<T> {}
+
+/// Smart pointer that can initialize memory in-place.
+pub trait InPlaceInit<T>: Sized {
+ /// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
+ /// type.
+ ///
+ /// If `T: !Unpin` it will not be able to move afterwards.
+ fn try_pin_init<E>(init: impl PinInit<T, E>) -> Result<Pin<Self>, E>
+ where
+ E: From<AllocError>;
+
+ /// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
+ /// type.
+ ///
+ /// If `T: !Unpin` it will not be able to move afterwards.
+ fn pin_init(init: impl PinInit<T>) -> Result<Pin<Self>, AllocError> {
+ // SAFETY: We delegate to `init` and only change the error type.
+ let init = unsafe {
+ pin_init_from_closure(|slot| match init.__pinned_init(slot) {
+ Ok(()) => Ok(()),
+ Err(i) => match i {},
+ })
+ };
+ Self::try_pin_init(init)
+ }
+
+ /// Use the given initializer to in-place initialize a `T`.
+ fn try_init<E>(init: impl Init<T, E>) -> Result<Self, E>
+ where
+ E: From<AllocError>;
+
+ /// Use the given initializer to in-place initialize a `T`.
+ fn init(init: impl Init<T>) -> Result<Self, AllocError> {
+ // SAFETY: We delegate to `init` and only change the error type.
+ let init = unsafe {
+ init_from_closure(|slot| match init.__init(slot) {
+ Ok(()) => Ok(()),
+ Err(i) => match i {},
+ })
+ };
+ Self::try_init(init)
+ }
+}
+
+#[cfg(feature = "alloc")]
+macro_rules! try_new_uninit {
+ ($type:ident) => {
+ $type::try_new_uninit()?
+ };
+}
+#[cfg(all(feature = "std", not(feature = "alloc")))]
+macro_rules! try_new_uninit {
+ ($type:ident) => {
+ $type::new_uninit()
+ };
+}
+
+impl<T> InPlaceInit<T> for Box<T> {
+ #[inline]
+ fn try_pin_init<E>(init: impl PinInit<T, E>) -> Result<Pin<Self>, E>
+ where
+ E: From<AllocError>,
+ {
+ try_new_uninit!(Box).write_pin_init(init)
+ }
+
+ #[inline]
+ fn try_init<E>(init: impl Init<T, E>) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ try_new_uninit!(Box).write_init(init)
+ }
+}
+
+impl<T> InPlaceInit<T> for Arc<T> {
+ #[inline]
+ fn try_pin_init<E>(init: impl PinInit<T, E>) -> Result<Pin<Self>, E>
+ where
+ E: From<AllocError>,
+ {
+ let mut this = try_new_uninit!(Arc);
+ let Some(slot) = Arc::get_mut(&mut this) else {
+ // SAFETY: the Arc has just been created and has no external references
+ unsafe { core::hint::unreachable_unchecked() }
+ };
+ let slot = slot.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid and will not be moved, because we pin it later.
+ unsafe { init.__pinned_init(slot)? };
+ // SAFETY: All fields have been initialized and this is the only `Arc` to that data.
+ Ok(unsafe { Pin::new_unchecked(this.assume_init()) })
+ }
+
+ #[inline]
+ fn try_init<E>(init: impl Init<T, E>) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ let mut this = try_new_uninit!(Arc);
+ let Some(slot) = Arc::get_mut(&mut this) else {
+ // SAFETY: the Arc has just been created and has no external references
+ unsafe { core::hint::unreachable_unchecked() }
+ };
+ let slot = slot.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid.
+ unsafe { init.__init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { this.assume_init() })
+ }
+}
+
+impl<T> InPlaceWrite<T> for Box<MaybeUninit<T>> {
+ type Initialized = Box<T>;
+
+ fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid.
+ unsafe { init.__init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() })
+ }
+
+ fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid and will not be moved, because we pin it later.
+ unsafe { init.__pinned_init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() }.into())
+ }
+}
diff --git a/rust/pin-init/src/lib.rs b/rust/pin-init/src/lib.rs
new file mode 100644
index 000000000000..0806c689f693
--- /dev/null
+++ b/rust/pin-init/src/lib.rs
@@ -0,0 +1,1483 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! Library to safely and fallibly initialize pinned `struct`s using in-place constructors.
+//!
+//! [Pinning][pinning] is Rust's way of ensuring data does not move.
+//!
+//! It also allows in-place initialization of big `struct`s that would otherwise produce a stack
+//! overflow.
+//!
+//! This library's main use-case is in [Rust-for-Linux]. Although this version can be used
+//! standalone.
+//!
+//! There are cases when you want to in-place initialize a struct. For example when it is very big
+//! and moving it from the stack is not an option, because it is bigger than the stack itself.
+//! Another reason would be that you need the address of the object to initialize it. This stands
+//! in direct conflict with Rust's normal process of first initializing an object and then moving
+//! it into it's final memory location. For more information, see
+//! <https://rust-for-linux.com/the-safe-pinned-initialization-problem>.
+//!
+//! This library allows you to do in-place initialization safely.
+//!
+//! ## Nightly Needed for `alloc` feature
+//!
+//! This library requires the [`allocator_api` unstable feature] when the `alloc` feature is
+//! enabled and thus this feature can only be used with a nightly compiler. When enabling the
+//! `alloc` feature, the user will be required to activate `allocator_api` as well.
+//!
+//! [`allocator_api` unstable feature]: https://doc.rust-lang.org/nightly/unstable-book/library-features/allocator-api.html
+//!
+//! The feature is enabled by default, thus by default `pin-init` will require a nightly compiler.
+//! However, using the crate on stable compilers is possible by disabling `alloc`. In practice this
+//! will require the `std` feature, because stable compilers have neither `Box` nor `Arc` in no-std
+//! mode.
+//!
+//! # Overview
+//!
+//! To initialize a `struct` with an in-place constructor you will need two things:
+//! - an in-place constructor,
+//! - a memory location that can hold your `struct` (this can be the [stack], an [`Arc<T>`],
+//! [`Box<T>`] or any other smart pointer that supports this library).
+//!
+//! To get an in-place constructor there are generally three options:
+//! - directly creating an in-place constructor using the [`pin_init!`] macro,
+//! - a custom function/macro returning an in-place constructor provided by someone else,
+//! - using the unsafe function [`pin_init_from_closure()`] to manually create an initializer.
+//!
+//! Aside from pinned initialization, this library also supports in-place construction without
+//! pinning, the macros/types/functions are generally named like the pinned variants without the
+//! `pin_` prefix.
+//!
+//! # Examples
+//!
+//! Throughout the examples we will often make use of the `CMutex` type which can be found in
+//! `../examples/mutex.rs`. It is essentially a userland rebuild of the `struct mutex` type from
+//! the Linux kernel. It also uses a wait list and a basic spinlock. Importantly the wait list
+//! requires it to be pinned to be locked and thus is a prime candidate for using this library.
+//!
+//! ## Using the [`pin_init!`] macro
+//!
+//! If you want to use [`PinInit`], then you will have to annotate your `struct` with
+//! `#[`[`pin_data`]`]`. It is a macro that uses `#[pin]` as a marker for
+//! [structurally pinned fields]. After doing this, you can then create an in-place constructor via
+//! [`pin_init!`]. The syntax is almost the same as normal `struct` initializers. The difference is
+//! that you need to write `<-` instead of `:` for fields that you want to initialize in-place.
+//!
+//! ```rust
+//! # #![expect(clippy::disallowed_names)]
+//! # #![feature(allocator_api)]
+//! # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+//! # use core::pin::Pin;
+//! use pin_init::{pin_data, pin_init, InPlaceInit};
+//!
+//! #[pin_data]
+//! struct Foo {
+//! #[pin]
+//! a: CMutex<usize>,
+//! b: u32,
+//! }
+//!
+//! let foo = pin_init!(Foo {
+//! a <- CMutex::new(42),
+//! b: 24,
+//! });
+//! # let _ = Box::pin_init(foo);
+//! ```
+//!
+//! `foo` now is of the type [`impl PinInit<Foo>`]. We can now use any smart pointer that we like
+//! (or just the stack) to actually initialize a `Foo`:
+//!
+//! ```rust
+//! # #![expect(clippy::disallowed_names)]
+//! # #![feature(allocator_api)]
+//! # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+//! # use core::{alloc::AllocError, pin::Pin};
+//! # use pin_init::*;
+//! #
+//! # #[pin_data]
+//! # struct Foo {
+//! # #[pin]
+//! # a: CMutex<usize>,
+//! # b: u32,
+//! # }
+//! #
+//! # let foo = pin_init!(Foo {
+//! # a <- CMutex::new(42),
+//! # b: 24,
+//! # });
+//! let foo: Result<Pin<Box<Foo>>, AllocError> = Box::pin_init(foo);
+//! ```
+//!
+//! For more information see the [`pin_init!`] macro.
+//!
+//! ## Using a custom function/macro that returns an initializer
+//!
+//! Many types that use this library supply a function/macro that returns an initializer, because
+//! the above method only works for types where you can access the fields.
+//!
+//! ```rust
+//! # #![feature(allocator_api)]
+//! # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+//! # use pin_init::*;
+//! # use std::sync::Arc;
+//! # use core::pin::Pin;
+//! let mtx: Result<Pin<Arc<CMutex<usize>>>, _> = Arc::pin_init(CMutex::new(42));
+//! ```
+//!
+//! To declare an init macro/function you just return an [`impl PinInit<T, E>`]:
+//!
+//! ```rust
+//! # #![feature(allocator_api)]
+//! # use pin_init::*;
+//! # #[path = "../examples/error.rs"] mod error; use error::Error;
+//! # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+//! #[pin_data]
+//! struct DriverData {
+//! #[pin]
+//! status: CMutex<i32>,
+//! buffer: Box<[u8; 1_000_000]>,
+//! }
+//!
+//! impl DriverData {
+//! fn new() -> impl PinInit<Self, Error> {
+//! try_pin_init!(Self {
+//! status <- CMutex::new(0),
+//! buffer: Box::init(pin_init::zeroed())?,
+//! }? Error)
+//! }
+//! }
+//! ```
+//!
+//! ## Manual creation of an initializer
+//!
+//! Often when working with primitives the previous approaches are not sufficient. That is where
+//! [`pin_init_from_closure()`] comes in. This `unsafe` function allows you to create a
+//! [`impl PinInit<T, E>`] directly from a closure. Of course you have to ensure that the closure
+//! actually does the initialization in the correct way. Here are the things to look out for
+//! (we are calling the parameter to the closure `slot`):
+//! - when the closure returns `Ok(())`, then it has completed the initialization successfully, so
+//! `slot` now contains a valid bit pattern for the type `T`,
+//! - when the closure returns `Err(e)`, then the caller may deallocate the memory at `slot`, so
+//! you need to take care to clean up anything if your initialization fails mid-way,
+//! - you may assume that `slot` will stay pinned even after the closure returns until `drop` of
+//! `slot` gets called.
+//!
+//! ```rust
+//! # #![feature(extern_types)]
+//! use pin_init::{pin_data, pinned_drop, PinInit, PinnedDrop, pin_init_from_closure};
+//! use core::{
+//! ptr::addr_of_mut,
+//! marker::PhantomPinned,
+//! cell::UnsafeCell,
+//! pin::Pin,
+//! mem::MaybeUninit,
+//! };
+//! mod bindings {
+//! #[repr(C)]
+//! pub struct foo {
+//! /* fields from C ... */
+//! }
+//! extern "C" {
+//! pub fn init_foo(ptr: *mut foo);
+//! pub fn destroy_foo(ptr: *mut foo);
+//! #[must_use = "you must check the error return code"]
+//! pub fn enable_foo(ptr: *mut foo, flags: u32) -> i32;
+//! }
+//! }
+//!
+//! /// # Invariants
+//! ///
+//! /// `foo` is always initialized
+//! #[pin_data(PinnedDrop)]
+//! pub struct RawFoo {
+//! #[pin]
+//! _p: PhantomPinned,
+//! #[pin]
+//! foo: UnsafeCell<MaybeUninit<bindings::foo>>,
+//! }
+//!
+//! impl RawFoo {
+//! pub fn new(flags: u32) -> impl PinInit<Self, i32> {
+//! // SAFETY:
+//! // - when the closure returns `Ok(())`, then it has successfully initialized and
+//! // enabled `foo`,
+//! // - when it returns `Err(e)`, then it has cleaned up before
+//! unsafe {
+//! pin_init_from_closure(move |slot: *mut Self| {
+//! // `slot` contains uninit memory, avoid creating a reference.
+//! let foo = addr_of_mut!((*slot).foo);
+//! let foo = UnsafeCell::raw_get(foo).cast::<bindings::foo>();
+//!
+//! // Initialize the `foo`
+//! bindings::init_foo(foo);
+//!
+//! // Try to enable it.
+//! let err = bindings::enable_foo(foo, flags);
+//! if err != 0 {
+//! // Enabling has failed, first clean up the foo and then return the error.
+//! bindings::destroy_foo(foo);
+//! Err(err)
+//! } else {
+//! // All fields of `RawFoo` have been initialized, since `_p` is a ZST.
+//! Ok(())
+//! }
+//! })
+//! }
+//! }
+//! }
+//!
+//! #[pinned_drop]
+//! impl PinnedDrop for RawFoo {
+//! fn drop(self: Pin<&mut Self>) {
+//! // SAFETY: Since `foo` is initialized, destroying is safe.
+//! unsafe { bindings::destroy_foo(self.foo.get().cast::<bindings::foo>()) };
+//! }
+//! }
+//! ```
+//!
+//! For more information on how to use [`pin_init_from_closure()`], take a look at the uses inside
+//! the `kernel` crate. The [`sync`] module is a good starting point.
+//!
+//! [`sync`]: https://rust.docs.kernel.org/kernel/sync/index.html
+//! [pinning]: https://doc.rust-lang.org/std/pin/index.html
+//! [structurally pinned fields]:
+//! https://doc.rust-lang.org/std/pin/index.html#pinning-is-structural-for-field
+//! [stack]: crate::stack_pin_init
+#![cfg_attr(
+ kernel,
+ doc = "[`Arc<T>`]: https://rust.docs.kernel.org/kernel/sync/struct.Arc.html"
+)]
+#![cfg_attr(
+ kernel,
+ doc = "[`Box<T>`]: https://rust.docs.kernel.org/kernel/alloc/kbox/struct.Box.html"
+)]
+#![cfg_attr(not(kernel), doc = "[`Arc<T>`]: alloc::alloc::sync::Arc")]
+#![cfg_attr(not(kernel), doc = "[`Box<T>`]: alloc::alloc::boxed::Box")]
+//! [`impl PinInit<Foo>`]: crate::PinInit
+//! [`impl PinInit<T, E>`]: crate::PinInit
+//! [`impl Init<T, E>`]: crate::Init
+//! [Rust-for-Linux]: https://rust-for-linux.com/
+
+#![cfg_attr(not(RUSTC_LINT_REASONS_IS_STABLE), feature(lint_reasons))]
+#![cfg_attr(
+ all(
+ any(feature = "alloc", feature = "std"),
+ not(RUSTC_NEW_UNINIT_IS_STABLE)
+ ),
+ feature(new_uninit)
+)]
+#![forbid(missing_docs, unsafe_op_in_unsafe_fn)]
+#![cfg_attr(not(feature = "std"), no_std)]
+#![cfg_attr(feature = "alloc", feature(allocator_api))]
+
+use core::{
+ cell::UnsafeCell,
+ convert::Infallible,
+ marker::PhantomData,
+ mem::MaybeUninit,
+ num::*,
+ pin::Pin,
+ ptr::{self, NonNull},
+};
+
+#[doc(hidden)]
+pub mod __internal;
+#[doc(hidden)]
+pub mod macros;
+
+#[cfg(any(feature = "std", feature = "alloc"))]
+mod alloc;
+#[cfg(any(feature = "std", feature = "alloc"))]
+pub use alloc::InPlaceInit;
+
+/// Used to specify the pinning information of the fields of a struct.
+///
+/// This is somewhat similar in purpose as
+/// [pin-project-lite](https://crates.io/crates/pin-project-lite).
+/// Place this macro on a struct definition and then `#[pin]` in front of the attributes of each
+/// field you want to structurally pin.
+///
+/// This macro enables the use of the [`pin_init!`] macro. When pin-initializing a `struct`,
+/// then `#[pin]` directs the type of initializer that is required.
+///
+/// If your `struct` implements `Drop`, then you need to add `PinnedDrop` as arguments to this
+/// macro, and change your `Drop` implementation to `PinnedDrop` annotated with
+/// `#[`[`macro@pinned_drop`]`]`, since dropping pinned values requires extra care.
+///
+/// # Examples
+///
+/// ```
+/// # #![feature(allocator_api)]
+/// # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+/// use pin_init::pin_data;
+///
+/// enum Command {
+/// /* ... */
+/// }
+///
+/// #[pin_data]
+/// struct DriverData {
+/// #[pin]
+/// queue: CMutex<Vec<Command>>,
+/// buf: Box<[u8; 1024 * 1024]>,
+/// }
+/// ```
+///
+/// ```
+/// # #![feature(allocator_api)]
+/// # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+/// # mod bindings { pub struct info; pub unsafe fn destroy_info(_: *mut info) {} }
+/// use core::pin::Pin;
+/// use pin_init::{pin_data, pinned_drop, PinnedDrop};
+///
+/// enum Command {
+/// /* ... */
+/// }
+///
+/// #[pin_data(PinnedDrop)]
+/// struct DriverData {
+/// #[pin]
+/// queue: CMutex<Vec<Command>>,
+/// buf: Box<[u8; 1024 * 1024]>,
+/// raw_info: *mut bindings::info,
+/// }
+///
+/// #[pinned_drop]
+/// impl PinnedDrop for DriverData {
+/// fn drop(self: Pin<&mut Self>) {
+/// unsafe { bindings::destroy_info(self.raw_info) };
+/// }
+/// }
+/// ```
+pub use ::pin_init_internal::pin_data;
+
+/// Used to implement `PinnedDrop` safely.
+///
+/// Only works on structs that are annotated via `#[`[`macro@pin_data`]`]`.
+///
+/// # Examples
+///
+/// ```
+/// # #![feature(allocator_api)]
+/// # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+/// # mod bindings { pub struct info; pub unsafe fn destroy_info(_: *mut info) {} }
+/// use core::pin::Pin;
+/// use pin_init::{pin_data, pinned_drop, PinnedDrop};
+///
+/// enum Command {
+/// /* ... */
+/// }
+///
+/// #[pin_data(PinnedDrop)]
+/// struct DriverData {
+/// #[pin]
+/// queue: CMutex<Vec<Command>>,
+/// buf: Box<[u8; 1024 * 1024]>,
+/// raw_info: *mut bindings::info,
+/// }
+///
+/// #[pinned_drop]
+/// impl PinnedDrop for DriverData {
+/// fn drop(self: Pin<&mut Self>) {
+/// unsafe { bindings::destroy_info(self.raw_info) };
+/// }
+/// }
+/// ```
+pub use ::pin_init_internal::pinned_drop;
+
+/// Derives the [`Zeroable`] trait for the given struct.
+///
+/// This can only be used for structs where every field implements the [`Zeroable`] trait.
+///
+/// # Examples
+///
+/// ```
+/// use pin_init::Zeroable;
+///
+/// #[derive(Zeroable)]
+/// pub struct DriverData {
+/// id: i64,
+/// buf_ptr: *mut u8,
+/// len: usize,
+/// }
+/// ```
+pub use ::pin_init_internal::Zeroable;
+
+/// Initialize and pin a type directly on the stack.
+///
+/// # Examples
+///
+/// ```rust
+/// # #![expect(clippy::disallowed_names)]
+/// # #![feature(allocator_api)]
+/// # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+/// # use pin_init::*;
+/// # use core::pin::Pin;
+/// #[pin_data]
+/// struct Foo {
+/// #[pin]
+/// a: CMutex<usize>,
+/// b: Bar,
+/// }
+///
+/// #[pin_data]
+/// struct Bar {
+/// x: u32,
+/// }
+///
+/// stack_pin_init!(let foo = pin_init!(Foo {
+/// a <- CMutex::new(42),
+/// b: Bar {
+/// x: 64,
+/// },
+/// }));
+/// let foo: Pin<&mut Foo> = foo;
+/// println!("a: {}", &*foo.a.lock());
+/// ```
+///
+/// # Syntax
+///
+/// A normal `let` binding with optional type annotation. The expression is expected to implement
+/// [`PinInit`]/[`Init`] with the error type [`Infallible`]. If you want to use a different error
+/// type, then use [`stack_try_pin_init!`].
+#[macro_export]
+macro_rules! stack_pin_init {
+ (let $var:ident $(: $t:ty)? = $val:expr) => {
+ let val = $val;
+ let mut $var = ::core::pin::pin!($crate::__internal::StackInit$(::<$t>)?::uninit());
+ let mut $var = match $crate::__internal::StackInit::init($var, val) {
+ Ok(res) => res,
+ Err(x) => {
+ let x: ::core::convert::Infallible = x;
+ match x {}
+ }
+ };
+ };
+}
+
+/// Initialize and pin a type directly on the stack.
+///
+/// # Examples
+///
+/// ```rust
+/// # #![expect(clippy::disallowed_names)]
+/// # #![feature(allocator_api)]
+/// # #[path = "../examples/error.rs"] mod error; use error::Error;
+/// # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+/// # use pin_init::*;
+/// #[pin_data]
+/// struct Foo {
+/// #[pin]
+/// a: CMutex<usize>,
+/// b: Box<Bar>,
+/// }
+///
+/// struct Bar {
+/// x: u32,
+/// }
+///
+/// stack_try_pin_init!(let foo: Foo = try_pin_init!(Foo {
+/// a <- CMutex::new(42),
+/// b: Box::try_new(Bar {
+/// x: 64,
+/// })?,
+/// }? Error));
+/// let foo = foo.unwrap();
+/// println!("a: {}", &*foo.a.lock());
+/// ```
+///
+/// ```rust
+/// # #![expect(clippy::disallowed_names)]
+/// # #![feature(allocator_api)]
+/// # #[path = "../examples/error.rs"] mod error; use error::Error;
+/// # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+/// # use pin_init::*;
+/// #[pin_data]
+/// struct Foo {
+/// #[pin]
+/// a: CMutex<usize>,
+/// b: Box<Bar>,
+/// }
+///
+/// struct Bar {
+/// x: u32,
+/// }
+///
+/// stack_try_pin_init!(let foo: Foo =? try_pin_init!(Foo {
+/// a <- CMutex::new(42),
+/// b: Box::try_new(Bar {
+/// x: 64,
+/// })?,
+/// }? Error));
+/// println!("a: {}", &*foo.a.lock());
+/// # Ok::<_, Error>(())
+/// ```
+///
+/// # Syntax
+///
+/// A normal `let` binding with optional type annotation. The expression is expected to implement
+/// [`PinInit`]/[`Init`]. This macro assigns a result to the given variable, adding a `?` after the
+/// `=` will propagate this error.
+#[macro_export]
+macro_rules! stack_try_pin_init {
+ (let $var:ident $(: $t:ty)? = $val:expr) => {
+ let val = $val;
+ let mut $var = ::core::pin::pin!($crate::__internal::StackInit$(::<$t>)?::uninit());
+ let mut $var = $crate::__internal::StackInit::init($var, val);
+ };
+ (let $var:ident $(: $t:ty)? =? $val:expr) => {
+ let val = $val;
+ let mut $var = ::core::pin::pin!($crate::__internal::StackInit$(::<$t>)?::uninit());
+ let mut $var = $crate::__internal::StackInit::init($var, val)?;
+ };
+}
+
+/// Construct an in-place, pinned initializer for `struct`s.
+///
+/// This macro defaults the error to [`Infallible`]. If you need a different error, then use
+/// [`try_pin_init!`].
+///
+/// The syntax is almost identical to that of a normal `struct` initializer:
+///
+/// ```rust
+/// # use pin_init::*;
+/// # use core::pin::Pin;
+/// #[pin_data]
+/// struct Foo {
+/// a: usize,
+/// b: Bar,
+/// }
+///
+/// #[pin_data]
+/// struct Bar {
+/// x: u32,
+/// }
+///
+/// # fn demo() -> impl PinInit<Foo> {
+/// let a = 42;
+///
+/// let initializer = pin_init!(Foo {
+/// a,
+/// b: Bar {
+/// x: 64,
+/// },
+/// });
+/// # initializer }
+/// # Box::pin_init(demo()).unwrap();
+/// ```
+///
+/// Arbitrary Rust expressions can be used to set the value of a variable.
+///
+/// The fields are initialized in the order that they appear in the initializer. So it is possible
+/// to read already initialized fields using raw pointers.
+///
+/// IMPORTANT: You are not allowed to create references to fields of the struct inside of the
+/// initializer.
+///
+/// # Init-functions
+///
+/// When working with this library it is often desired to let others construct your types without
+/// giving access to all fields. This is where you would normally write a plain function `new` that
+/// would return a new instance of your type. With this library that is also possible. However,
+/// there are a few extra things to keep in mind.
+///
+/// To create an initializer function, simply declare it like this:
+///
+/// ```rust
+/// # use pin_init::*;
+/// # use core::pin::Pin;
+/// # #[pin_data]
+/// # struct Foo {
+/// # a: usize,
+/// # b: Bar,
+/// # }
+/// # #[pin_data]
+/// # struct Bar {
+/// # x: u32,
+/// # }
+/// impl Foo {
+/// fn new() -> impl PinInit<Self> {
+/// pin_init!(Self {
+/// a: 42,
+/// b: Bar {
+/// x: 64,
+/// },
+/// })
+/// }
+/// }
+/// ```
+///
+/// Users of `Foo` can now create it like this:
+///
+/// ```rust
+/// # #![expect(clippy::disallowed_names)]
+/// # use pin_init::*;
+/// # use core::pin::Pin;
+/// # #[pin_data]
+/// # struct Foo {
+/// # a: usize,
+/// # b: Bar,
+/// # }
+/// # #[pin_data]
+/// # struct Bar {
+/// # x: u32,
+/// # }
+/// # impl Foo {
+/// # fn new() -> impl PinInit<Self> {
+/// # pin_init!(Self {
+/// # a: 42,
+/// # b: Bar {
+/// # x: 64,
+/// # },
+/// # })
+/// # }
+/// # }
+/// let foo = Box::pin_init(Foo::new());
+/// ```
+///
+/// They can also easily embed it into their own `struct`s:
+///
+/// ```rust
+/// # use pin_init::*;
+/// # use core::pin::Pin;
+/// # #[pin_data]
+/// # struct Foo {
+/// # a: usize,
+/// # b: Bar,
+/// # }
+/// # #[pin_data]
+/// # struct Bar {
+/// # x: u32,
+/// # }
+/// # impl Foo {
+/// # fn new() -> impl PinInit<Self> {
+/// # pin_init!(Self {
+/// # a: 42,
+/// # b: Bar {
+/// # x: 64,
+/// # },
+/// # })
+/// # }
+/// # }
+/// #[pin_data]
+/// struct FooContainer {
+/// #[pin]
+/// foo1: Foo,
+/// #[pin]
+/// foo2: Foo,
+/// other: u32,
+/// }
+///
+/// impl FooContainer {
+/// fn new(other: u32) -> impl PinInit<Self> {
+/// pin_init!(Self {
+/// foo1 <- Foo::new(),
+/// foo2 <- Foo::new(),
+/// other,
+/// })
+/// }
+/// }
+/// ```
+///
+/// Here we see that when using `pin_init!` with `PinInit`, one needs to write `<-` instead of `:`.
+/// This signifies that the given field is initialized in-place. As with `struct` initializers, just
+/// writing the field (in this case `other`) without `:` or `<-` means `other: other,`.
+///
+/// # Syntax
+///
+/// As already mentioned in the examples above, inside of `pin_init!` a `struct` initializer with
+/// the following modifications is expected:
+/// - Fields that you want to initialize in-place have to use `<-` instead of `:`.
+/// - In front of the initializer you can write `&this in` to have access to a [`NonNull<Self>`]
+/// pointer named `this` inside of the initializer.
+/// - Using struct update syntax one can place `..Zeroable::zeroed()` at the very end of the
+/// struct, this initializes every field with 0 and then runs all initializers specified in the
+/// body. This can only be done if [`Zeroable`] is implemented for the struct.
+///
+/// For instance:
+///
+/// ```rust
+/// # use pin_init::*;
+/// # use core::{ptr::addr_of_mut, marker::PhantomPinned};
+/// #[pin_data]
+/// #[derive(Zeroable)]
+/// struct Buf {
+/// // `ptr` points into `buf`.
+/// ptr: *mut u8,
+/// buf: [u8; 64],
+/// #[pin]
+/// pin: PhantomPinned,
+/// }
+///
+/// let init = pin_init!(&this in Buf {
+/// buf: [0; 64],
+/// // SAFETY: TODO.
+/// ptr: unsafe { addr_of_mut!((*this.as_ptr()).buf).cast() },
+/// pin: PhantomPinned,
+/// });
+/// let init = pin_init!(Buf {
+/// buf: [1; 64],
+/// ..Zeroable::zeroed()
+/// });
+/// ```
+///
+/// [`NonNull<Self>`]: core::ptr::NonNull
+// For a detailed example of how this macro works, see the module documentation of the hidden
+// module `macros` inside of `macros.rs`.
+#[macro_export]
+macro_rules! pin_init {
+ ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+ $($fields:tt)*
+ }) => {
+ $crate::try_pin_init!($(&$this in)? $t $(::<$($generics),*>)? {
+ $($fields)*
+ }? ::core::convert::Infallible)
+ };
+}
+
+/// Construct an in-place, fallible pinned initializer for `struct`s.
+///
+/// If the initialization can complete without error (or [`Infallible`]), then use [`pin_init!`].
+///
+/// You can use the `?` operator or use `return Err(err)` inside the initializer to stop
+/// initialization and return the error.
+///
+/// IMPORTANT: if you have `unsafe` code inside of the initializer you have to ensure that when
+/// initialization fails, the memory can be safely deallocated without any further modifications.
+///
+/// The syntax is identical to [`pin_init!`] with the following exception: you must append `? $type`
+/// after the `struct` initializer to specify the error type you want to use.
+///
+/// # Examples
+///
+/// ```rust
+/// # #![feature(allocator_api)]
+/// # #[path = "../examples/error.rs"] mod error; use error::Error;
+/// use pin_init::{pin_data, try_pin_init, PinInit, InPlaceInit, zeroed};
+///
+/// #[pin_data]
+/// struct BigBuf {
+/// big: Box<[u8; 1024 * 1024 * 1024]>,
+/// small: [u8; 1024 * 1024],
+/// ptr: *mut u8,
+/// }
+///
+/// impl BigBuf {
+/// fn new() -> impl PinInit<Self, Error> {
+/// try_pin_init!(Self {
+/// big: Box::init(zeroed())?,
+/// small: [0; 1024 * 1024],
+/// ptr: core::ptr::null_mut(),
+/// }? Error)
+/// }
+/// }
+/// # let _ = Box::pin_init(BigBuf::new());
+/// ```
+// For a detailed example of how this macro works, see the module documentation of the hidden
+// module `macros` inside of `macros.rs`.
+#[macro_export]
+macro_rules! try_pin_init {
+ ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+ $($fields:tt)*
+ }? $err:ty) => {
+ $crate::__init_internal!(
+ @this($($this)?),
+ @typ($t $(::<$($generics),*>)? ),
+ @fields($($fields)*),
+ @error($err),
+ @data(PinData, use_data),
+ @has_data(HasPinData, __pin_data),
+ @construct_closure(pin_init_from_closure),
+ @munch_fields($($fields)*),
+ )
+ }
+}
+
+/// Construct an in-place initializer for `struct`s.
+///
+/// This macro defaults the error to [`Infallible`]. If you need a different error, then use
+/// [`try_init!`].
+///
+/// The syntax is identical to [`pin_init!`] and its safety caveats also apply:
+/// - `unsafe` code must guarantee either full initialization or return an error and allow
+/// deallocation of the memory.
+/// - the fields are initialized in the order given in the initializer.
+/// - no references to fields are allowed to be created inside of the initializer.
+///
+/// This initializer is for initializing data in-place that might later be moved. If you want to
+/// pin-initialize, use [`pin_init!`].
+///
+/// # Examples
+///
+/// ```rust
+/// # #![feature(allocator_api)]
+/// # #[path = "../examples/error.rs"] mod error; use error::Error;
+/// # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+/// # use pin_init::InPlaceInit;
+/// use pin_init::{init, Init, zeroed};
+///
+/// struct BigBuf {
+/// small: [u8; 1024 * 1024],
+/// }
+///
+/// impl BigBuf {
+/// fn new() -> impl Init<Self> {
+/// init!(Self {
+/// small <- zeroed(),
+/// })
+/// }
+/// }
+/// # let _ = Box::init(BigBuf::new());
+/// ```
+// For a detailed example of how this macro works, see the module documentation of the hidden
+// module `macros` inside of `macros.rs`.
+#[macro_export]
+macro_rules! init {
+ ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+ $($fields:tt)*
+ }) => {
+ $crate::try_init!($(&$this in)? $t $(::<$($generics),*>)? {
+ $($fields)*
+ }? ::core::convert::Infallible)
+ }
+}
+
+/// Construct an in-place fallible initializer for `struct`s.
+///
+/// If the initialization can complete without error (or [`Infallible`]), then use
+/// [`init!`].
+///
+/// The syntax is identical to [`try_pin_init!`]. You need to specify a custom error
+/// via `? $type` after the `struct` initializer.
+/// The safety caveats from [`try_pin_init!`] also apply:
+/// - `unsafe` code must guarantee either full initialization or return an error and allow
+/// deallocation of the memory.
+/// - the fields are initialized in the order given in the initializer.
+/// - no references to fields are allowed to be created inside of the initializer.
+///
+/// # Examples
+///
+/// ```rust
+/// # #![feature(allocator_api)]
+/// # use core::alloc::AllocError;
+/// # use pin_init::InPlaceInit;
+/// use pin_init::{try_init, Init, zeroed};
+///
+/// struct BigBuf {
+/// big: Box<[u8; 1024 * 1024 * 1024]>,
+/// small: [u8; 1024 * 1024],
+/// }
+///
+/// impl BigBuf {
+/// fn new() -> impl Init<Self, AllocError> {
+/// try_init!(Self {
+/// big: Box::init(zeroed())?,
+/// small: [0; 1024 * 1024],
+/// }? AllocError)
+/// }
+/// }
+/// # let _ = Box::init(BigBuf::new());
+/// ```
+// For a detailed example of how this macro works, see the module documentation of the hidden
+// module `macros` inside of `macros.rs`.
+#[macro_export]
+macro_rules! try_init {
+ ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+ $($fields:tt)*
+ }? $err:ty) => {
+ $crate::__init_internal!(
+ @this($($this)?),
+ @typ($t $(::<$($generics),*>)?),
+ @fields($($fields)*),
+ @error($err),
+ @data(InitData, /*no use_data*/),
+ @has_data(HasInitData, __init_data),
+ @construct_closure(init_from_closure),
+ @munch_fields($($fields)*),
+ )
+ }
+}
+
+/// Asserts that a field on a struct using `#[pin_data]` is marked with `#[pin]` ie. that it is
+/// structurally pinned.
+///
+/// # Example
+///
+/// This will succeed:
+/// ```
+/// use pin_init::{pin_data, assert_pinned};
+///
+/// #[pin_data]
+/// struct MyStruct {
+/// #[pin]
+/// some_field: u64,
+/// }
+///
+/// assert_pinned!(MyStruct, some_field, u64);
+/// ```
+///
+/// This will fail:
+/// ```compile_fail
+/// use pin_init::{pin_data, assert_pinned};
+///
+/// #[pin_data]
+/// struct MyStruct {
+/// some_field: u64,
+/// }
+///
+/// assert_pinned!(MyStruct, some_field, u64);
+/// ```
+///
+/// Some uses of the macro may trigger the `can't use generic parameters from outer item` error. To
+/// work around this, you may pass the `inline` parameter to the macro. The `inline` parameter can
+/// only be used when the macro is invoked from a function body.
+/// ```
+/// # use core::pin::Pin;
+/// use pin_init::{pin_data, assert_pinned};
+///
+/// #[pin_data]
+/// struct Foo<T> {
+/// #[pin]
+/// elem: T,
+/// }
+///
+/// impl<T> Foo<T> {
+/// fn project(self: Pin<&mut Self>) -> Pin<&mut T> {
+/// assert_pinned!(Foo<T>, elem, T, inline);
+///
+/// // SAFETY: The field is structurally pinned.
+/// unsafe { self.map_unchecked_mut(|me| &mut me.elem) }
+/// }
+/// }
+/// ```
+#[macro_export]
+macro_rules! assert_pinned {
+ ($ty:ty, $field:ident, $field_ty:ty, inline) => {
+ let _ = move |ptr: *mut $field_ty| {
+ // SAFETY: This code is unreachable.
+ let data = unsafe { <$ty as $crate::__internal::HasPinData>::__pin_data() };
+ let init = $crate::__internal::AlwaysFail::<$field_ty>::new();
+ // SAFETY: This code is unreachable.
+ unsafe { data.$field(ptr, init) }.ok();
+ };
+ };
+
+ ($ty:ty, $field:ident, $field_ty:ty) => {
+ const _: () = {
+ $crate::assert_pinned!($ty, $field, $field_ty, inline);
+ };
+ };
+}
+
+/// A pin-initializer for the type `T`.
+///
+/// To use this initializer, you will need a suitable memory location that can hold a `T`. This can
+/// be [`Box<T>`], [`Arc<T>`] or even the stack (see [`stack_pin_init!`]).
+///
+/// Also see the [module description](self).
+///
+/// # Safety
+///
+/// When implementing this trait you will need to take great care. Also there are probably very few
+/// cases where a manual implementation is necessary. Use [`pin_init_from_closure`] where possible.
+///
+/// The [`PinInit::__pinned_init`] function:
+/// - returns `Ok(())` if it initialized every field of `slot`,
+/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
+/// - `slot` can be deallocated without UB occurring,
+/// - `slot` does not need to be dropped,
+/// - `slot` is not partially initialized.
+/// - while constructing the `T` at `slot` it upholds the pinning invariants of `T`.
+///
+#[cfg_attr(
+ kernel,
+ doc = "[`Arc<T>`]: https://rust.docs.kernel.org/kernel/sync/struct.Arc.html"
+)]
+#[cfg_attr(
+ kernel,
+ doc = "[`Box<T>`]: https://rust.docs.kernel.org/kernel/alloc/kbox/struct.Box.html"
+)]
+#[cfg_attr(not(kernel), doc = "[`Arc<T>`]: alloc::alloc::sync::Arc")]
+#[cfg_attr(not(kernel), doc = "[`Box<T>`]: alloc::alloc::boxed::Box")]
+#[must_use = "An initializer must be used in order to create its value."]
+pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
+ /// Initializes `slot`.
+ ///
+ /// # Safety
+ ///
+ /// - `slot` is a valid pointer to uninitialized memory.
+ /// - the caller does not touch `slot` when `Err` is returned, they are only permitted to
+ /// deallocate.
+ /// - `slot` will not move until it is dropped, i.e. it will be pinned.
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E>;
+
+ /// First initializes the value using `self` then calls the function `f` with the initialized
+ /// value.
+ ///
+ /// If `f` returns an error the value is dropped and the initializer will forward the error.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #![feature(allocator_api)]
+ /// # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+ /// # use pin_init::*;
+ /// let mtx_init = CMutex::new(42);
+ /// // Make the initializer print the value.
+ /// let mtx_init = mtx_init.pin_chain(|mtx| {
+ /// println!("{:?}", mtx.get_data_mut());
+ /// Ok(())
+ /// });
+ /// ```
+ fn pin_chain<F>(self, f: F) -> ChainPinInit<Self, F, T, E>
+ where
+ F: FnOnce(Pin<&mut T>) -> Result<(), E>,
+ {
+ ChainPinInit(self, f, PhantomData)
+ }
+}
+
+/// An initializer returned by [`PinInit::pin_chain`].
+pub struct ChainPinInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, T)>);
+
+// SAFETY: The `__pinned_init` function is implemented such that it
+// - returns `Ok(())` on successful initialization,
+// - returns `Err(err)` on error and in this case `slot` will be dropped.
+// - considers `slot` pinned.
+unsafe impl<T: ?Sized, E, I, F> PinInit<T, E> for ChainPinInit<I, F, T, E>
+where
+ I: PinInit<T, E>,
+ F: FnOnce(Pin<&mut T>) -> Result<(), E>,
+{
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: All requirements fulfilled since this function is `__pinned_init`.
+ unsafe { self.0.__pinned_init(slot)? };
+ // SAFETY: The above call initialized `slot` and we still have unique access.
+ let val = unsafe { &mut *slot };
+ // SAFETY: `slot` is considered pinned.
+ let val = unsafe { Pin::new_unchecked(val) };
+ // SAFETY: `slot` was initialized above.
+ (self.1)(val).inspect_err(|_| unsafe { core::ptr::drop_in_place(slot) })
+ }
+}
+
+/// An initializer for `T`.
+///
+/// To use this initializer, you will need a suitable memory location that can hold a `T`. This can
+/// be [`Box<T>`], [`Arc<T>`] or even the stack (see [`stack_pin_init!`]). Because
+/// [`PinInit<T, E>`] is a super trait, you can use every function that takes it as well.
+///
+/// Also see the [module description](self).
+///
+/// # Safety
+///
+/// When implementing this trait you will need to take great care. Also there are probably very few
+/// cases where a manual implementation is necessary. Use [`init_from_closure`] where possible.
+///
+/// The [`Init::__init`] function:
+/// - returns `Ok(())` if it initialized every field of `slot`,
+/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
+/// - `slot` can be deallocated without UB occurring,
+/// - `slot` does not need to be dropped,
+/// - `slot` is not partially initialized.
+/// - while constructing the `T` at `slot` it upholds the pinning invariants of `T`.
+///
+/// The `__pinned_init` function from the supertrait [`PinInit`] needs to execute the exact same
+/// code as `__init`.
+///
+/// Contrary to its supertype [`PinInit<T, E>`] the caller is allowed to
+/// move the pointee after initialization.
+///
+#[cfg_attr(
+ kernel,
+ doc = "[`Arc<T>`]: https://rust.docs.kernel.org/kernel/sync/struct.Arc.html"
+)]
+#[cfg_attr(
+ kernel,
+ doc = "[`Box<T>`]: https://rust.docs.kernel.org/kernel/alloc/kbox/struct.Box.html"
+)]
+#[cfg_attr(not(kernel), doc = "[`Arc<T>`]: alloc::alloc::sync::Arc")]
+#[cfg_attr(not(kernel), doc = "[`Box<T>`]: alloc::alloc::boxed::Box")]
+#[must_use = "An initializer must be used in order to create its value."]
+pub unsafe trait Init<T: ?Sized, E = Infallible>: PinInit<T, E> {
+ /// Initializes `slot`.
+ ///
+ /// # Safety
+ ///
+ /// - `slot` is a valid pointer to uninitialized memory.
+ /// - the caller does not touch `slot` when `Err` is returned, they are only permitted to
+ /// deallocate.
+ unsafe fn __init(self, slot: *mut T) -> Result<(), E>;
+
+ /// First initializes the value using `self` then calls the function `f` with the initialized
+ /// value.
+ ///
+ /// If `f` returns an error the value is dropped and the initializer will forward the error.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #![expect(clippy::disallowed_names)]
+ /// use pin_init::{init, zeroed, Init};
+ ///
+ /// struct Foo {
+ /// buf: [u8; 1_000_000],
+ /// }
+ ///
+ /// impl Foo {
+ /// fn setup(&mut self) {
+ /// println!("Setting up foo");
+ /// }
+ /// }
+ ///
+ /// let foo = init!(Foo {
+ /// buf <- zeroed()
+ /// }).chain(|foo| {
+ /// foo.setup();
+ /// Ok(())
+ /// });
+ /// ```
+ fn chain<F>(self, f: F) -> ChainInit<Self, F, T, E>
+ where
+ F: FnOnce(&mut T) -> Result<(), E>,
+ {
+ ChainInit(self, f, PhantomData)
+ }
+}
+
+/// An initializer returned by [`Init::chain`].
+pub struct ChainInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, T)>);
+
+// SAFETY: The `__init` function is implemented such that it
+// - returns `Ok(())` on successful initialization,
+// - returns `Err(err)` on error and in this case `slot` will be dropped.
+unsafe impl<T: ?Sized, E, I, F> Init<T, E> for ChainInit<I, F, T, E>
+where
+ I: Init<T, E>,
+ F: FnOnce(&mut T) -> Result<(), E>,
+{
+ unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: All requirements fulfilled since this function is `__init`.
+ unsafe { self.0.__pinned_init(slot)? };
+ // SAFETY: The above call initialized `slot` and we still have unique access.
+ (self.1)(unsafe { &mut *slot }).inspect_err(|_|
+ // SAFETY: `slot` was initialized above.
+ unsafe { core::ptr::drop_in_place(slot) })
+ }
+}
+
+// SAFETY: `__pinned_init` behaves exactly the same as `__init`.
+unsafe impl<T: ?Sized, E, I, F> PinInit<T, E> for ChainInit<I, F, T, E>
+where
+ I: Init<T, E>,
+ F: FnOnce(&mut T) -> Result<(), E>,
+{
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: `__init` has less strict requirements compared to `__pinned_init`.
+ unsafe { self.__init(slot) }
+ }
+}
+
+/// Creates a new [`PinInit<T, E>`] from the given closure.
+///
+/// # Safety
+///
+/// The closure:
+/// - returns `Ok(())` if it initialized every field of `slot`,
+/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
+/// - `slot` can be deallocated without UB occurring,
+/// - `slot` does not need to be dropped,
+/// - `slot` is not partially initialized.
+/// - may assume that the `slot` does not move if `T: !Unpin`,
+/// - while constructing the `T` at `slot` it upholds the pinning invariants of `T`.
+#[inline]
+pub const unsafe fn pin_init_from_closure<T: ?Sized, E>(
+ f: impl FnOnce(*mut T) -> Result<(), E>,
+) -> impl PinInit<T, E> {
+ __internal::InitClosure(f, PhantomData)
+}
+
+/// Creates a new [`Init<T, E>`] from the given closure.
+///
+/// # Safety
+///
+/// The closure:
+/// - returns `Ok(())` if it initialized every field of `slot`,
+/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
+/// - `slot` can be deallocated without UB occurring,
+/// - `slot` does not need to be dropped,
+/// - `slot` is not partially initialized.
+/// - the `slot` may move after initialization.
+/// - while constructing the `T` at `slot` it upholds the pinning invariants of `T`.
+#[inline]
+pub const unsafe fn init_from_closure<T: ?Sized, E>(
+ f: impl FnOnce(*mut T) -> Result<(), E>,
+) -> impl Init<T, E> {
+ __internal::InitClosure(f, PhantomData)
+}
+
+/// An initializer that leaves the memory uninitialized.
+///
+/// The initializer is a no-op. The `slot` memory is not changed.
+#[inline]
+pub fn uninit<T, E>() -> impl Init<MaybeUninit<T>, E> {
+ // SAFETY: The memory is allowed to be uninitialized.
+ unsafe { init_from_closure(|_| Ok(())) }
+}
+
+/// Initializes an array by initializing each element via the provided initializer.
+///
+/// # Examples
+///
+/// ```rust
+/// # use pin_init::*;
+/// use pin_init::init_array_from_fn;
+/// let array: Box<[usize; 1_000]> = Box::init(init_array_from_fn(|i| i)).unwrap();
+/// assert_eq!(array.len(), 1_000);
+/// ```
+pub fn init_array_from_fn<I, const N: usize, T, E>(
+ mut make_init: impl FnMut(usize) -> I,
+) -> impl Init<[T; N], E>
+where
+ I: Init<T, E>,
+{
+ let init = move |slot: *mut [T; N]| {
+ let slot = slot.cast::<T>();
+ for i in 0..N {
+ let init = make_init(i);
+ // SAFETY: Since 0 <= `i` < N, it is still in bounds of `[T; N]`.
+ let ptr = unsafe { slot.add(i) };
+ // SAFETY: The pointer is derived from `slot` and thus satisfies the `__init`
+ // requirements.
+ if let Err(e) = unsafe { init.__init(ptr) } {
+ // SAFETY: The loop has initialized the elements `slot[0..i]` and since we return
+ // `Err` below, `slot` will be considered uninitialized memory.
+ unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(slot, i)) };
+ return Err(e);
+ }
+ }
+ Ok(())
+ };
+ // SAFETY: The initializer above initializes every element of the array. On failure it drops
+ // any initialized elements and returns `Err`.
+ unsafe { init_from_closure(init) }
+}
+
+/// Initializes an array by initializing each element via the provided initializer.
+///
+/// # Examples
+///
+/// ```rust
+/// # #![feature(allocator_api)]
+/// # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+/// # use pin_init::*;
+/// # use core::pin::Pin;
+/// use pin_init::pin_init_array_from_fn;
+/// use std::sync::Arc;
+/// let array: Pin<Arc<[CMutex<usize>; 1_000]>> =
+/// Arc::pin_init(pin_init_array_from_fn(|i| CMutex::new(i))).unwrap();
+/// assert_eq!(array.len(), 1_000);
+/// ```
+pub fn pin_init_array_from_fn<I, const N: usize, T, E>(
+ mut make_init: impl FnMut(usize) -> I,
+) -> impl PinInit<[T; N], E>
+where
+ I: PinInit<T, E>,
+{
+ let init = move |slot: *mut [T; N]| {
+ let slot = slot.cast::<T>();
+ for i in 0..N {
+ let init = make_init(i);
+ // SAFETY: Since 0 <= `i` < N, it is still in bounds of `[T; N]`.
+ let ptr = unsafe { slot.add(i) };
+ // SAFETY: The pointer is derived from `slot` and thus satisfies the `__init`
+ // requirements.
+ if let Err(e) = unsafe { init.__pinned_init(ptr) } {
+ // SAFETY: The loop has initialized the elements `slot[0..i]` and since we return
+ // `Err` below, `slot` will be considered uninitialized memory.
+ unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(slot, i)) };
+ return Err(e);
+ }
+ }
+ Ok(())
+ };
+ // SAFETY: The initializer above initializes every element of the array. On failure it drops
+ // any initialized elements and returns `Err`.
+ unsafe { pin_init_from_closure(init) }
+}
+
+// SAFETY: Every type can be initialized by-value.
+unsafe impl<T, E> Init<T, E> for T {
+ unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { slot.write(self) };
+ Ok(())
+ }
+}
+
+// SAFETY: Every type can be initialized by-value. `__pinned_init` calls `__init`.
+unsafe impl<T, E> PinInit<T, E> for T {
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { self.__init(slot) }
+ }
+}
+
+/// Smart pointer containing uninitialized memory and that can write a value.
+pub trait InPlaceWrite<T> {
+ /// The type `Self` turns into when the contents are initialized.
+ type Initialized;
+
+ /// Use the given initializer to write a value into `self`.
+ ///
+ /// Does not drop the current value and considers it as uninitialized memory.
+ fn write_init<E>(self, init: impl Init<T, E>) -> Result<Self::Initialized, E>;
+
+ /// Use the given pin-initializer to write a value into `self`.
+ ///
+ /// Does not drop the current value and considers it as uninitialized memory.
+ fn write_pin_init<E>(self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E>;
+}
+
+/// Trait facilitating pinned destruction.
+///
+/// Use [`pinned_drop`] to implement this trait safely:
+///
+/// ```rust
+/// # #![feature(allocator_api)]
+/// # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
+/// # use pin_init::*;
+/// use core::pin::Pin;
+/// #[pin_data(PinnedDrop)]
+/// struct Foo {
+/// #[pin]
+/// mtx: CMutex<usize>,
+/// }
+///
+/// #[pinned_drop]
+/// impl PinnedDrop for Foo {
+/// fn drop(self: Pin<&mut Self>) {
+/// println!("Foo is being dropped!");
+/// }
+/// }
+/// ```
+///
+/// # Safety
+///
+/// This trait must be implemented via the [`pinned_drop`] proc-macro attribute on the impl.
+pub unsafe trait PinnedDrop: __internal::HasPinData {
+ /// Executes the pinned destructor of this type.
+ ///
+ /// While this function is marked safe, it is actually unsafe to call it manually. For this
+ /// reason it takes an additional parameter. This type can only be constructed by `unsafe` code
+ /// and thus prevents this function from being called where it should not.
+ ///
+ /// This extra parameter will be generated by the `#[pinned_drop]` proc-macro attribute
+ /// automatically.
+ fn drop(self: Pin<&mut Self>, only_call_from_drop: __internal::OnlyCallFromDrop);
+}
+
+/// Marker trait for types that can be initialized by writing just zeroes.
+///
+/// # Safety
+///
+/// The bit pattern consisting of only zeroes is a valid bit pattern for this type. In other words,
+/// this is not UB:
+///
+/// ```rust,ignore
+/// let val: Self = unsafe { core::mem::zeroed() };
+/// ```
+pub unsafe trait Zeroable {}
+
+/// Marker trait for types that allow `Option<Self>` to be set to all zeroes in order to write
+/// `None` to that location.
+///
+/// # Safety
+///
+/// The implementer needs to ensure that `unsafe impl Zeroable for Option<Self> {}` is sound.
+pub unsafe trait ZeroableOption {}
+
+// SAFETY: by the safety requirement of `ZeroableOption`, this is valid.
+unsafe impl<T: ZeroableOption> Zeroable for Option<T> {}
+
+/// Create a new zeroed T.
+///
+/// The returned initializer will write `0x00` to every byte of the given `slot`.
+#[inline]
+pub fn zeroed<T: Zeroable>() -> impl Init<T> {
+ // SAFETY: Because `T: Zeroable`, all bytes zero is a valid bit pattern for `T`
+ // and because we write all zeroes, the memory is initialized.
+ unsafe {
+ init_from_closure(|slot: *mut T| {
+ slot.write_bytes(0, 1);
+ Ok(())
+ })
+ }
+}
+
+macro_rules! impl_zeroable {
+ ($($({$($generics:tt)*})? $t:ty, )*) => {
+ // SAFETY: Safety comments written in the macro invocation.
+ $(unsafe impl$($($generics)*)? Zeroable for $t {})*
+ };
+}
+
+impl_zeroable! {
+ // SAFETY: All primitives that are allowed to be zero.
+ bool,
+ char,
+ u8, u16, u32, u64, u128, usize,
+ i8, i16, i32, i64, i128, isize,
+ f32, f64,
+
+ // Note: do not add uninhabited types (such as `!` or `core::convert::Infallible`) to this list;
+ // creating an instance of an uninhabited type is immediate undefined behavior. For more on
+ // uninhabited/empty types, consult The Rustonomicon:
+ // <https://doc.rust-lang.org/stable/nomicon/exotic-sizes.html#empty-types>. The Rust Reference
+ // also has information on undefined behavior:
+ // <https://doc.rust-lang.org/stable/reference/behavior-considered-undefined.html>.
+ //
+ // SAFETY: These are inhabited ZSTs; there is nothing to zero and a valid value exists.
+ {<T: ?Sized>} PhantomData<T>, core::marker::PhantomPinned, (),
+
+ // SAFETY: Type is allowed to take any value, including all zeros.
+ {<T>} MaybeUninit<T>,
+
+ // SAFETY: `T: Zeroable` and `UnsafeCell` is `repr(transparent)`.
+ {<T: ?Sized + Zeroable>} UnsafeCell<T>,
+
+ // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee:
+ // <https://doc.rust-lang.org/stable/std/option/index.html#representation>).
+ Option<NonZeroU8>, Option<NonZeroU16>, Option<NonZeroU32>, Option<NonZeroU64>,
+ Option<NonZeroU128>, Option<NonZeroUsize>,
+ Option<NonZeroI8>, Option<NonZeroI16>, Option<NonZeroI32>, Option<NonZeroI64>,
+ Option<NonZeroI128>, Option<NonZeroIsize>,
+ {<T>} Option<NonNull<T>>,
+
+ // SAFETY: `null` pointer is valid.
+ //
+ // We cannot use `T: ?Sized`, since the VTABLE pointer part of fat pointers is not allowed to be
+ // null.
+ //
+ // When `Pointee` gets stabilized, we could use
+ // `T: ?Sized where <T as Pointee>::Metadata: Zeroable`
+ {<T>} *mut T, {<T>} *const T,
+
+ // SAFETY: `null` pointer is valid and the metadata part of these fat pointers is allowed to be
+ // zero.
+ {<T>} *mut [T], {<T>} *const [T], *mut str, *const str,
+
+ // SAFETY: `T` is `Zeroable`.
+ {<const N: usize, T: Zeroable>} [T; N], {<T: Zeroable>} Wrapping<T>,
+}
+
+macro_rules! impl_tuple_zeroable {
+ ($(,)?) => {};
+ ($first:ident, $($t:ident),* $(,)?) => {
+ // SAFETY: All elements are zeroable and padding can be zero.
+ unsafe impl<$first: Zeroable, $($t: Zeroable),*> Zeroable for ($first, $($t),*) {}
+ impl_tuple_zeroable!($($t),* ,);
+ }
+}
+
+impl_tuple_zeroable!(A, B, C, D, E, F, G, H, I, J);
diff --git a/rust/kernel/init/macros.rs b/rust/pin-init/src/macros.rs
index cb6e61b6c50b..361623324d5c 100644
--- a/rust/kernel/init/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
//! This module provides the macros that actually implement the proc-macros `pin_data` and
-//! `pinned_drop`. It also contains `__init_internal` the implementation of the `{try_}{pin_}init!`
-//! macros.
+//! `pinned_drop`. It also contains `__init_internal`, the implementation of the
+//! `{try_}{pin_}init!` macros.
//!
//! These macros should never be called directly, since they expect their input to be
//! in a certain format which is internal. If used incorrectly, these macros can lead to UB even in
@@ -11,16 +11,17 @@
//! This architecture has been chosen because the kernel does not yet have access to `syn` which
//! would make matters a lot easier for implementing these as proc-macros.
//!
+//! Since this library and the kernel implementation should diverge as little as possible, the same
+//! approach has been taken here.
+//!
//! # Macro expansion example
//!
//! This section is intended for readers trying to understand the macros in this module and the
-//! `pin_init!` macros from `init.rs`.
+//! `[try_][pin_]init!` macros from `lib.rs`.
//!
//! We will look at the following example:
//!
//! ```rust,ignore
-//! # use kernel::init::*;
-//! # use core::pin::Pin;
//! #[pin_data]
//! #[repr(C)]
//! struct Bar<T> {
@@ -45,7 +46,7 @@
//! #[pinned_drop]
//! impl PinnedDrop for Foo {
//! fn drop(self: Pin<&mut Self>) {
-//! pr_info!("{self:p} is getting dropped.");
+//! println!("{self:p} is getting dropped.");
//! }
//! }
//!
@@ -75,7 +76,6 @@
//! Here is the definition of `Bar` from our example:
//!
//! ```rust,ignore
-//! # use kernel::init::*;
//! #[pin_data]
//! #[repr(C)]
//! struct Bar<T> {
@@ -121,22 +121,22 @@
//! self,
//! slot: *mut T,
//! // Since `t` is `#[pin]`, this is `PinInit`.
-//! init: impl ::kernel::init::PinInit<T, E>,
+//! init: impl ::pin_init::PinInit<T, E>,
//! ) -> ::core::result::Result<(), E> {
-//! unsafe { ::kernel::init::PinInit::__pinned_init(init, slot) }
+//! unsafe { ::pin_init::PinInit::__pinned_init(init, slot) }
//! }
//! pub unsafe fn x<E>(
//! self,
//! slot: *mut usize,
//! // Since `x` is not `#[pin]`, this is `Init`.
-//! init: impl ::kernel::init::Init<usize, E>,
+//! init: impl ::pin_init::Init<usize, E>,
//! ) -> ::core::result::Result<(), E> {
-//! unsafe { ::kernel::init::Init::__init(init, slot) }
+//! unsafe { ::pin_init::Init::__init(init, slot) }
//! }
//! }
//! // Implement the internal `HasPinData` trait that associates `Bar` with the pin-data struct
//! // that we constructed above.
-//! unsafe impl<T> ::kernel::init::__internal::HasPinData for Bar<T> {
+//! unsafe impl<T> ::pin_init::__internal::HasPinData for Bar<T> {
//! type PinData = __ThePinData<T>;
//! unsafe fn __pin_data() -> Self::PinData {
//! __ThePinData {
@@ -145,9 +145,9 @@
//! }
//! }
//! // Implement the internal `PinData` trait that marks the pin-data struct as a pin-data
-//! // struct. This is important to ensure that no user can implement a rouge `__pin_data`
+//! // struct. This is important to ensure that no user can implement a rogue `__pin_data`
//! // function without using `unsafe`.
-//! unsafe impl<T> ::kernel::init::__internal::PinData for __ThePinData<T> {
+//! unsafe impl<T> ::pin_init::__internal::PinData for __ThePinData<T> {
//! type Datee = Bar<T>;
//! }
//! // Now we only want to implement `Unpin` for `Bar` when every structurally pinned field is
@@ -156,7 +156,7 @@
//! // case no such fields exist, hence this is almost empty. The two phantomdata fields exist
//! // for two reasons:
//! // - `__phantom`: every generic must be used, since we cannot really know which generics
-//! // are used, we declere all and then use everything here once.
+//! // are used, we declare all and then use everything here once.
//! // - `__phantom_pin`: uses the `'__pin` lifetime and ensures that this struct is invariant
//! // over it. The lifetime is needed to work around the limitation that trait bounds must
//! // not be trivial, e.g. the user has a `#[pin] PhantomPinned` field -- this is
@@ -182,16 +182,16 @@
//! // Normally `Drop` bounds do not have the correct semantics, but for this purpose they do
//! // (normally people want to know if a type has any kind of drop glue at all, here we want
//! // to know if it has any kind of custom drop glue, which is exactly what this bound does).
-//! #[allow(drop_bounds)]
+//! #[expect(drop_bounds)]
//! impl<T: ::core::ops::Drop> MustNotImplDrop for T {}
//! impl<T> MustNotImplDrop for Bar<T> {}
//! // Here comes a convenience check, if one implemented `PinnedDrop`, but forgot to add it to
//! // `#[pin_data]`, then this will error with the same mechanic as above, this is not needed
//! // for safety, but a good sanity check, since no normal code calls `PinnedDrop::drop`.
-//! #[allow(non_camel_case_types)]
+//! #[expect(non_camel_case_types)]
//! trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {}
//! impl<
-//! T: ::kernel::init::PinnedDrop,
+//! T: ::pin_init::PinnedDrop,
//! > UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {}
//! impl<T> UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for Bar<T> {}
//! };
@@ -227,11 +227,11 @@
//! // - we `use` the `HasPinData` trait in the block, it is only available in that
//! // scope.
//! let data = unsafe {
-//! use ::kernel::init::__internal::HasPinData;
+//! use ::pin_init::__internal::HasPinData;
//! Self::__pin_data()
//! };
//! // Ensure that `data` really is of type `PinData` and help with type inference:
-//! let init = ::kernel::init::__internal::PinData::make_closure::<
+//! let init = ::pin_init::__internal::PinData::make_closure::<
//! _,
//! __InitOk,
//! ::core::convert::Infallible,
@@ -250,8 +250,8 @@
//! // error type is `Infallible`) we will need to drop this field if there
//! // is an error later. This `DropGuard` will drop the field when it gets
//! // dropped and has not yet been forgotten.
-//! let t = unsafe {
-//! ::pinned_init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).t))
+//! let __t_guard = unsafe {
+//! ::pin_init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).t))
//! };
//! // Expansion of `x: 0,`:
//! // Since this can be an arbitrary expression we cannot place it inside
@@ -261,14 +261,14 @@
//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).x), x) };
//! }
//! // We again create a `DropGuard`.
-//! let x = unsafe {
-//! ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).x))
+//! let __x_guard = unsafe {
+//! ::pin_init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).x))
//! };
//! // Since initialization has successfully completed, we can now forget
//! // the guards. This is not `mem::forget`, since we only have
//! // `&DropGuard`.
-//! ::core::mem::forget(x);
-//! ::core::mem::forget(t);
+//! ::core::mem::forget(__x_guard);
+//! ::core::mem::forget(__t_guard);
//! // Here we use the type checker to ensure that every field has been
//! // initialized exactly once, since this is `if false` it will never get
//! // executed, but still type-checked.
@@ -303,7 +303,7 @@
//! };
//! // Construct the initializer.
//! let init = unsafe {
-//! ::kernel::init::pin_init_from_closure::<
+//! ::pin_init::pin_init_from_closure::<
//! _,
//! ::core::convert::Infallible,
//! >(init)
@@ -350,19 +350,19 @@
//! unsafe fn b<E>(
//! self,
//! slot: *mut Bar<u32>,
-//! init: impl ::kernel::init::PinInit<Bar<u32>, E>,
+//! init: impl ::pin_init::PinInit<Bar<u32>, E>,
//! ) -> ::core::result::Result<(), E> {
-//! unsafe { ::kernel::init::PinInit::__pinned_init(init, slot) }
+//! unsafe { ::pin_init::PinInit::__pinned_init(init, slot) }
//! }
//! unsafe fn a<E>(
//! self,
//! slot: *mut usize,
-//! init: impl ::kernel::init::Init<usize, E>,
+//! init: impl ::pin_init::Init<usize, E>,
//! ) -> ::core::result::Result<(), E> {
-//! unsafe { ::kernel::init::Init::__init(init, slot) }
+//! unsafe { ::pin_init::Init::__init(init, slot) }
//! }
//! }
-//! unsafe impl ::kernel::init::__internal::HasPinData for Foo {
+//! unsafe impl ::pin_init::__internal::HasPinData for Foo {
//! type PinData = __ThePinData;
//! unsafe fn __pin_data() -> Self::PinData {
//! __ThePinData {
@@ -370,7 +370,7 @@
//! }
//! }
//! }
-//! unsafe impl ::kernel::init::__internal::PinData for __ThePinData {
+//! unsafe impl ::pin_init::__internal::PinData for __ThePinData {
//! type Datee = Foo;
//! }
//! #[allow(dead_code)]
@@ -394,8 +394,8 @@
//! let pinned = unsafe { ::core::pin::Pin::new_unchecked(self) };
//! // Create the unsafe token that proves that we are inside of a destructor, this
//! // type is only allowed to be created in a destructor.
-//! let token = unsafe { ::kernel::init::__internal::OnlyCallFromDrop::new() };
-//! ::kernel::init::PinnedDrop::drop(pinned, token);
+//! let token = unsafe { ::pin_init::__internal::OnlyCallFromDrop::new() };
+//! ::pin_init::PinnedDrop::drop(pinned, token);
//! }
//! }
//! };
@@ -412,7 +412,7 @@
//! #[pinned_drop]
//! impl PinnedDrop for Foo {
//! fn drop(self: Pin<&mut Self>) {
-//! pr_info!("{self:p} is getting dropped.");
+//! println!("{self:p} is getting dropped.");
//! }
//! }
//! ```
@@ -421,9 +421,9 @@
//!
//! ```rust,ignore
//! // `unsafe`, full path and the token parameter are added, everything else stays the same.
-//! unsafe impl ::kernel::init::PinnedDrop for Foo {
-//! fn drop(self: Pin<&mut Self>, _: ::kernel::init::__internal::OnlyCallFromDrop) {
-//! pr_info!("{self:p} is getting dropped.");
+//! unsafe impl ::pin_init::PinnedDrop for Foo {
+//! fn drop(self: Pin<&mut Self>, _: ::pin_init::__internal::OnlyCallFromDrop) {
+//! println!("{self:p} is getting dropped.");
//! }
//! }
//! ```
@@ -448,10 +448,10 @@
//! let initializer = {
//! struct __InitOk;
//! let data = unsafe {
-//! use ::kernel::init::__internal::HasPinData;
+//! use ::pin_init::__internal::HasPinData;
//! Foo::__pin_data()
//! };
-//! let init = ::kernel::init::__internal::PinData::make_closure::<
+//! let init = ::pin_init::__internal::PinData::make_closure::<
//! _,
//! __InitOk,
//! ::core::convert::Infallible,
@@ -461,16 +461,16 @@
//! {
//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).a), a) };
//! }
-//! let a = unsafe {
-//! ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).a))
+//! let __a_guard = unsafe {
+//! ::pin_init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).a))
//! };
//! let init = Bar::new(36);
//! unsafe { data.b(::core::addr_of_mut!((*slot).b), b)? };
-//! let b = unsafe {
-//! ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).b))
+//! let __b_guard = unsafe {
+//! ::pin_init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).b))
//! };
-//! ::core::mem::forget(b);
-//! ::core::mem::forget(a);
+//! ::core::mem::forget(__b_guard);
+//! ::core::mem::forget(__a_guard);
//! #[allow(unreachable_code, clippy::diverging_sub_expression)]
//! let _ = || {
//! unsafe {
@@ -492,12 +492,17 @@
//! init(slot).map(|__InitOk| ())
//! };
//! let init = unsafe {
-//! ::kernel::init::pin_init_from_closure::<_, ::core::convert::Infallible>(init)
+//! ::pin_init::pin_init_from_closure::<_, ::core::convert::Infallible>(init)
//! };
//! init
//! };
//! ```
+#[cfg(kernel)]
+pub use ::macros::paste;
+#[cfg(not(kernel))]
+pub use ::paste::paste;
+
/// Creates a `unsafe impl<...> PinnedDrop for $type` block.
///
/// See [`PinnedDrop`] for more information.
@@ -513,10 +518,11 @@ macro_rules! __pinned_drop {
}
),
) => {
+ // SAFETY: TODO.
unsafe $($impl_sig)* {
// Inherit all attributes and the type/ident tokens for the signature.
$(#[$($attr)*])*
- fn drop($($sig)*, _: $crate::init::__internal::OnlyCallFromDrop) {
+ fn drop($($sig)*, _: $crate::__internal::OnlyCallFromDrop) {
$($inner)*
}
}
@@ -538,6 +544,7 @@ macro_rules! __pin_data {
),
@impl_generics($($impl_generics:tt)*),
@ty_generics($($ty_generics:tt)*),
+ @decl_generics($($decl_generics:tt)*),
@body({ $($fields:tt)* }),
) => {
// We now use token munching to iterate through all of the fields. While doing this we
@@ -560,6 +567,9 @@ macro_rules! __pin_data {
@impl_generics($($impl_generics)*),
// The 'ty generics', the generics that will need to be specified on the impl blocks.
@ty_generics($($ty_generics)*),
+ // The 'decl generics', the generics that need to be specified on the struct
+ // definition.
+ @decl_generics($($decl_generics)*),
// The where clause of any impl block and the declaration.
@where($($($whr)*)?),
// The remaining fields tokens that need to be processed.
@@ -585,6 +595,7 @@ macro_rules! __pin_data {
@name($name:ident),
@impl_generics($($impl_generics:tt)*),
@ty_generics($($ty_generics:tt)*),
+ @decl_generics($($decl_generics:tt)*),
@where($($whr:tt)*),
// We found a PhantomPinned field, this should generally be pinned!
@fields_munch($field:ident : $($($(::)?core::)?marker::)?PhantomPinned, $($rest:tt)*),
@@ -607,6 +618,7 @@ macro_rules! __pin_data {
@name($name),
@impl_generics($($impl_generics)*),
@ty_generics($($ty_generics)*),
+ @decl_generics($($decl_generics)*),
@where($($whr)*),
@fields_munch($($rest)*),
@pinned($($pinned)* $($accum)* $field: ::core::marker::PhantomPinned,),
@@ -623,6 +635,7 @@ macro_rules! __pin_data {
@name($name:ident),
@impl_generics($($impl_generics:tt)*),
@ty_generics($($ty_generics:tt)*),
+ @decl_generics($($decl_generics:tt)*),
@where($($whr:tt)*),
// We reached the field declaration.
@fields_munch($field:ident : $type:ty, $($rest:tt)*),
@@ -640,6 +653,7 @@ macro_rules! __pin_data {
@name($name),
@impl_generics($($impl_generics)*),
@ty_generics($($ty_generics)*),
+ @decl_generics($($decl_generics)*),
@where($($whr)*),
@fields_munch($($rest)*),
@pinned($($pinned)* $($accum)* $field: $type,),
@@ -656,6 +670,7 @@ macro_rules! __pin_data {
@name($name:ident),
@impl_generics($($impl_generics:tt)*),
@ty_generics($($ty_generics:tt)*),
+ @decl_generics($($decl_generics:tt)*),
@where($($whr:tt)*),
// We reached the field declaration.
@fields_munch($field:ident : $type:ty, $($rest:tt)*),
@@ -673,6 +688,7 @@ macro_rules! __pin_data {
@name($name),
@impl_generics($($impl_generics)*),
@ty_generics($($ty_generics)*),
+ @decl_generics($($decl_generics)*),
@where($($whr)*),
@fields_munch($($rest)*),
@pinned($($pinned)*),
@@ -689,6 +705,7 @@ macro_rules! __pin_data {
@name($name:ident),
@impl_generics($($impl_generics:tt)*),
@ty_generics($($ty_generics:tt)*),
+ @decl_generics($($decl_generics:tt)*),
@where($($whr:tt)*),
// We found the `#[pin]` attr.
@fields_munch(#[pin] $($rest:tt)*),
@@ -705,6 +722,7 @@ macro_rules! __pin_data {
@name($name),
@impl_generics($($impl_generics)*),
@ty_generics($($ty_generics)*),
+ @decl_generics($($decl_generics)*),
@where($($whr)*),
@fields_munch($($rest)*),
// We do not include `#[pin]` in the list of attributes, since it is not actually an
@@ -724,6 +742,7 @@ macro_rules! __pin_data {
@name($name:ident),
@impl_generics($($impl_generics:tt)*),
@ty_generics($($ty_generics:tt)*),
+ @decl_generics($($decl_generics:tt)*),
@where($($whr:tt)*),
// We reached the field declaration with visibility, for simplicity we only munch the
// visibility and put it into `$accum`.
@@ -741,6 +760,7 @@ macro_rules! __pin_data {
@name($name),
@impl_generics($($impl_generics)*),
@ty_generics($($ty_generics)*),
+ @decl_generics($($decl_generics)*),
@where($($whr)*),
@fields_munch($field $($rest)*),
@pinned($($pinned)*),
@@ -757,6 +777,7 @@ macro_rules! __pin_data {
@name($name:ident),
@impl_generics($($impl_generics:tt)*),
@ty_generics($($ty_generics:tt)*),
+ @decl_generics($($decl_generics:tt)*),
@where($($whr:tt)*),
// Some other attribute, just put it into `$accum`.
@fields_munch(#[$($attr:tt)*] $($rest:tt)*),
@@ -773,6 +794,7 @@ macro_rules! __pin_data {
@name($name),
@impl_generics($($impl_generics)*),
@ty_generics($($ty_generics)*),
+ @decl_generics($($decl_generics)*),
@where($($whr)*),
@fields_munch($($rest)*),
@pinned($($pinned)*),
@@ -789,6 +811,7 @@ macro_rules! __pin_data {
@name($name:ident),
@impl_generics($($impl_generics:tt)*),
@ty_generics($($ty_generics:tt)*),
+ @decl_generics($($decl_generics:tt)*),
@where($($whr:tt)*),
// We reached the end of the fields, plus an optional additional comma, since we added one
// before and the user is also allowed to put a trailing comma.
@@ -802,7 +825,7 @@ macro_rules! __pin_data {
) => {
// Declare the struct with all fields in the correct order.
$($struct_attrs)*
- $vis struct $name <$($impl_generics)*>
+ $vis struct $name <$($decl_generics)*>
where $($whr)*
{
$($fields)*
@@ -845,7 +868,7 @@ macro_rules! __pin_data {
// SAFETY: We have added the correct projection functions above to `__ThePinData` and
// we also use the least restrictive generics possible.
unsafe impl<$($impl_generics)*>
- $crate::init::__internal::HasPinData for $name<$($ty_generics)*>
+ $crate::__internal::HasPinData for $name<$($ty_generics)*>
where $($whr)*
{
type PinData = __ThePinData<$($ty_generics)*>;
@@ -855,8 +878,9 @@ macro_rules! __pin_data {
}
}
+ // SAFETY: TODO.
unsafe impl<$($impl_generics)*>
- $crate::init::__internal::PinData for __ThePinData<$($ty_generics)*>
+ $crate::__internal::PinData for __ThePinData<$($ty_generics)*>
where $($whr)*
{
type Datee = $name<$($ty_generics)*>;
@@ -906,16 +930,16 @@ macro_rules! __pin_data {
// `Drop`. Additionally we will implement this trait for the struct leading to a conflict,
// if it also implements `Drop`
trait MustNotImplDrop {}
- #[allow(drop_bounds)]
+ #[expect(drop_bounds)]
impl<T: ::core::ops::Drop> MustNotImplDrop for T {}
impl<$($impl_generics)*> MustNotImplDrop for $name<$($ty_generics)*>
where $($whr)* {}
// We also take care to prevent users from writing a useless `PinnedDrop` implementation.
// They might implement `PinnedDrop` correctly for the struct, but forget to give
// `PinnedDrop` as the parameter to `#[pin_data]`.
- #[allow(non_camel_case_types)]
+ #[expect(non_camel_case_types)]
trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {}
- impl<T: $crate::init::PinnedDrop>
+ impl<T: $crate::PinnedDrop>
UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {}
impl<$($impl_generics)*>
UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for $name<$($ty_generics)*>
@@ -938,8 +962,8 @@ macro_rules! __pin_data {
let pinned = unsafe { ::core::pin::Pin::new_unchecked(self) };
// SAFETY: Since this is a drop function, we can create this token to call the
// pinned destructor of this type.
- let token = unsafe { $crate::init::__internal::OnlyCallFromDrop::new() };
- $crate::init::PinnedDrop::drop(pinned, token);
+ let token = unsafe { $crate::__internal::OnlyCallFromDrop::new() };
+ $crate::PinnedDrop::drop(pinned, token);
}
}
};
@@ -970,6 +994,7 @@ macro_rules! __pin_data {
//
// The functions are `unsafe` to prevent accidentally calling them.
#[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
where $($whr)*
{
@@ -978,9 +1003,10 @@ macro_rules! __pin_data {
$pvis unsafe fn $p_field<E>(
self,
slot: *mut $p_type,
- init: impl $crate::init::PinInit<$p_type, E>,
+ init: impl $crate::PinInit<$p_type, E>,
) -> ::core::result::Result<(), E> {
- unsafe { $crate::init::PinInit::__pinned_init(init, slot) }
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
}
)*
$(
@@ -988,9 +1014,10 @@ macro_rules! __pin_data {
$fvis unsafe fn $field<E>(
self,
slot: *mut $type,
- init: impl $crate::init::Init<$type, E>,
+ init: impl $crate::Init<$type, E>,
) -> ::core::result::Result<(), E> {
- unsafe { $crate::init::Init::__init(init, slot) }
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
}
)*
}
@@ -1104,16 +1131,18 @@ macro_rules! __init_internal {
// no possibility of returning without `unsafe`.
struct __InitOk;
// Get the data about fields from the supplied type.
+ //
+ // SAFETY: TODO.
let data = unsafe {
- use $crate::init::__internal::$has_data;
+ use $crate::__internal::$has_data;
// Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
// information that is associated to already parsed fragments, so a path fragment
// cannot be used in this position. Doing the retokenization results in valid rust
// code.
- ::kernel::macros::paste!($t::$get_data())
+ $crate::macros::paste!($t::$get_data())
};
// Ensure that `data` really is of type `$data` and help with type inference:
- let init = $crate::init::__internal::$data::make_closure::<_, __InitOk, $err>(
+ let init = $crate::__internal::$data::make_closure::<_, __InitOk, $err>(
data,
move |slot| {
{
@@ -1123,7 +1152,7 @@ macro_rules! __init_internal {
// error when fields are missing (since they will be zeroed). We also have to
// check that the type actually implements `Zeroable`.
$({
- fn assert_zeroable<T: $crate::init::Zeroable>(_: *mut T) {}
+ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// Ensure that the struct is indeed `Zeroable`.
assert_zeroable(slot);
// SAFETY: The type implements `Zeroable` by the check above.
@@ -1159,7 +1188,8 @@ macro_rules! __init_internal {
let init = move |slot| -> ::core::result::Result<(), $err> {
init(slot).map(|__InitOk| ())
};
- let init = unsafe { $crate::init::$construct_closure::<_, $err>(init) };
+ // SAFETY: TODO.
+ let init = unsafe { $crate::$construct_closure::<_, $err>(init) };
init
}};
(init_slot($($use_data:ident)?):
@@ -1190,16 +1220,16 @@ macro_rules! __init_internal {
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
// We use `paste!` to create new hygiene for `$field`.
- ::kernel::macros::paste! {
+ $crate::macros::paste! {
// SAFETY: We forget the guard later when initialization has succeeded.
- let [<$field>] = unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
$crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
- @guards([<$field>], $($guards,)*),
+ @guards([< __ $field _guard >], $($guards,)*),
@munch_fields($($rest)*),
);
}
@@ -1216,21 +1246,21 @@ macro_rules! __init_internal {
//
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
- unsafe { $crate::init::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+ unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
// We use `paste!` to create new hygiene for `$field`.
- ::kernel::macros::paste! {
+ $crate::macros::paste! {
// SAFETY: We forget the guard later when initialization has succeeded.
- let [<$field>] = unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
$crate::__init_internal!(init_slot():
@data($data),
@slot($slot),
- @guards([<$field>], $($guards,)*),
+ @guards([< __ $field _guard >], $($guards,)*),
@munch_fields($($rest)*),
);
}
@@ -1253,16 +1283,16 @@ macro_rules! __init_internal {
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
// We use `paste!` to create new hygiene for `$field`.
- ::kernel::macros::paste! {
+ $crate::macros::paste! {
// SAFETY: We forget the guard later when initialization has succeeded.
- let [<$field>] = unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
$crate::__init_internal!(init_slot($($use_data)?):
@data($data),
@slot($slot),
- @guards([<$field>], $($guards,)*),
+ @guards([< __ $field _guard >], $($guards,)*),
@munch_fields($($rest)*),
);
}
@@ -1290,7 +1320,7 @@ macro_rules! __init_internal {
// information that is associated to already parsed fragments, so a path fragment
// cannot be used in this position. Doing the retokenization results in valid rust
// code.
- ::kernel::macros::paste!(
+ $crate::macros::paste!(
::core::ptr::write($slot, $t {
$($acc)*
..zeroed
@@ -1307,12 +1337,14 @@ macro_rules! __init_internal {
// Endpoint, nothing more to munch, create the initializer.
// Since we are in the closure that is never called, this will never get executed.
// We abuse `slot` to get the correct type inference here:
+ //
+ // SAFETY: TODO.
unsafe {
// Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
// information that is associated to already parsed fragments, so a path fragment
// cannot be used in this position. Doing the retokenization results in valid rust
// code.
- ::kernel::macros::paste!(
+ $crate::macros::paste!(
::core::ptr::write($slot, $t {
$($acc)*
});
@@ -1367,12 +1399,12 @@ macro_rules! __derive_zeroable {
) => {
// SAFETY: Every field type implements `Zeroable` and padding bytes may be zero.
#[automatically_derived]
- unsafe impl<$($impl_generics)*> $crate::init::Zeroable for $name<$($ty_generics)*>
+ unsafe impl<$($impl_generics)*> $crate::Zeroable for $name<$($ty_generics)*>
where
$($($whr)*)?
{}
const _: () = {
- fn assert_zeroable<T: ?::core::marker::Sized + $crate::init::Zeroable>() {}
+ fn assert_zeroable<T: ?::core::marker::Sized + $crate::Zeroable>() {}
fn ensure_zeroable<$($impl_generics)*>()
where $($($whr)*)?
{
diff --git a/rust/uapi/lib.rs b/rust/uapi/lib.rs
index 0caad902ba40..c98d7a8cde77 100644
--- a/rust/uapi/lib.rs
+++ b/rust/uapi/lib.rs
@@ -14,6 +14,8 @@
#![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))]
#![allow(
clippy::all,
+ clippy::undocumented_unsafe_blocks,
+ dead_code,
missing_docs,
non_camel_case_types,
non_upper_case_globals,
@@ -22,5 +24,11 @@
unreachable_pub,
unsafe_op_in_unsafe_fn
)]
+#![cfg_attr(CONFIG_RUSTC_HAS_UNNECESSARY_TRANSMUTES, allow(unnecessary_transmutes))]
+
+// Manual definition of blocklisted types.
+type __kernel_size_t = usize;
+type __kernel_ssize_t = isize;
+type __kernel_ptrdiff_t = isize;
include!(concat!(env!("OBJTREE"), "/rust/uapi/uapi_generated.rs"));
diff --git a/rust/uapi/uapi_helper.h b/rust/uapi/uapi_helper.h
index 08f5e9334c9e..1409441359f5 100644
--- a/rust/uapi/uapi_helper.h
+++ b/rust/uapi/uapi_helper.h
@@ -7,5 +7,8 @@
*/
#include <uapi/asm-generic/ioctl.h>
+#include <uapi/drm/drm.h>
+#include <uapi/drm/nova_drm.h>
+#include <uapi/linux/mdio.h>
#include <uapi/linux/mii.h>
#include <uapi/linux/ethtool.h>