diff options
-rw-r--r-- | .mailmap | 1 | ||||
-rw-r--r-- | Documentation/process/embargoed-hardware-issues.rst | 1 | ||||
-rw-r--r-- | fs/smb/server/connection.c | 2 | ||||
-rw-r--r-- | fs/smb/server/connection.h | 1 | ||||
-rw-r--r-- | fs/smb/server/smb2pdu.c | 74 | ||||
-rw-r--r-- | fs/smb/server/transport_rdma.c | 10 | ||||
-rw-r--r-- | fs/smb/server/transport_tcp.c | 3 | ||||
-rw-r--r-- | fs/smb/server/vfs.c | 5 | ||||
-rw-r--r-- | fs/smb/server/vfs_cache.h | 1 | ||||
-rw-r--r-- | kernel/cgroup/legacy_freezer.c | 3 | ||||
-rw-r--r-- | rust/bindings/bindings_helper.h | 1 | ||||
-rw-r--r-- | rust/helpers/completion.c | 8 | ||||
-rw-r--r-- | rust/helpers/helpers.c | 1 | ||||
-rw-r--r-- | rust/kernel/devres.rs | 60 | ||||
-rw-r--r-- | rust/kernel/revocable.rs | 18 | ||||
-rw-r--r-- | rust/kernel/sync.rs | 2 | ||||
-rw-r--r-- | rust/kernel/sync/completion.rs | 112 |
17 files changed, 256 insertions, 47 deletions
@@ -197,6 +197,7 @@ Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch> Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com> Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com> Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com> +Danilo Krummrich <dakr@kernel.org> <dakr@redhat.com> David Brownell <david-b@pacbell.net> David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org> David Heidelberg <david@ixit.cz> <d.okias@gmail.com> diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst index da6bf0f6d01e..34e00848e0da 100644 --- a/Documentation/process/embargoed-hardware-issues.rst +++ b/Documentation/process/embargoed-hardware-issues.rst @@ -290,6 +290,7 @@ an involved disclosed party. The current ambassadors list: AMD Tom Lendacky <thomas.lendacky@amd.com> Ampere Darren Hart <darren@os.amperecomputing.com> ARM Catalin Marinas <catalin.marinas@arm.com> + IBM Power Madhavan Srinivasan <maddy@linux.ibm.com> IBM Z Christian Borntraeger <borntraeger@de.ibm.com> Intel Tony Luck <tony.luck@intel.com> Qualcomm Trilok Soni <quic_tsoni@quicinc.com> diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c index 83764c230e9d..3f04a2977ba8 100644 --- a/fs/smb/server/connection.c +++ b/fs/smb/server/connection.c @@ -40,7 +40,7 @@ void ksmbd_conn_free(struct ksmbd_conn *conn) kvfree(conn->request_buf); kfree(conn->preauth_info); if (atomic_dec_and_test(&conn->refcnt)) { - ksmbd_free_transport(conn->transport); + conn->transport->ops->free_transport(conn->transport); kfree(conn); } } diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h index 6efed923bd68..dd3e0e3f7bf0 100644 --- a/fs/smb/server/connection.h +++ b/fs/smb/server/connection.h @@ -133,6 +133,7 @@ struct ksmbd_transport_ops { void *buf, unsigned int len, struct smb2_buffer_desc_v1 *desc, unsigned int desc_len); + void (*free_transport)(struct ksmbd_transport *kt); }; struct ksmbd_transport { diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 1a308171b599..fafa86273f12 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -1607,17 +1607,18 @@ static int krb5_authenticate(struct ksmbd_work *work, out_len = work->response_sz - (le16_to_cpu(rsp->SecurityBufferOffset) + 4); - /* Check previous session */ - prev_sess_id = le64_to_cpu(req->PreviousSessionId); - if (prev_sess_id && prev_sess_id != sess->id) - destroy_previous_session(conn, sess->user, prev_sess_id); - retval = ksmbd_krb5_authenticate(sess, in_blob, in_len, out_blob, &out_len); if (retval) { ksmbd_debug(SMB, "krb5 authentication failed\n"); return -EINVAL; } + + /* Check previous session */ + prev_sess_id = le64_to_cpu(req->PreviousSessionId); + if (prev_sess_id && prev_sess_id != sess->id) + destroy_previous_session(conn, sess->user, prev_sess_id); + rsp->SecurityBufferLength = cpu_to_le16(out_len); if ((conn->sign || server_conf.enforced_signing) || @@ -4871,8 +4872,13 @@ static int get_file_standard_info(struct smb2_query_info_rsp *rsp, sinfo = (struct smb2_file_standard_info *)rsp->Buffer; delete_pending = ksmbd_inode_pending_delete(fp); - sinfo->AllocationSize = cpu_to_le64(stat.blocks << 9); - sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); + if (ksmbd_stream_fd(fp) == false) { + sinfo->AllocationSize = cpu_to_le64(stat.blocks << 9); + sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); + } else { + sinfo->AllocationSize = cpu_to_le64(fp->stream.size); + sinfo->EndOfFile = cpu_to_le64(fp->stream.size); + } sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending); sinfo->DeletePending = delete_pending; sinfo->Directory = S_ISDIR(stat.mode) ? 1 : 0; @@ -4935,9 +4941,14 @@ static int get_file_all_info(struct ksmbd_work *work, file_info->ChangeTime = cpu_to_le64(time); file_info->Attributes = fp->f_ci->m_fattr; file_info->Pad1 = 0; - file_info->AllocationSize = - cpu_to_le64(stat.blocks << 9); - file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); + if (ksmbd_stream_fd(fp) == false) { + file_info->AllocationSize = + cpu_to_le64(stat.blocks << 9); + file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); + } else { + file_info->AllocationSize = cpu_to_le64(fp->stream.size); + file_info->EndOfFile = cpu_to_le64(fp->stream.size); + } file_info->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending); file_info->DeletePending = delete_pending; @@ -4946,7 +4957,10 @@ static int get_file_all_info(struct ksmbd_work *work, file_info->IndexNumber = cpu_to_le64(stat.ino); file_info->EASize = 0; file_info->AccessFlags = fp->daccess; - file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos); + if (ksmbd_stream_fd(fp) == false) + file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos); + else + file_info->CurrentByteOffset = cpu_to_le64(fp->stream.pos); file_info->Mode = fp->coption; file_info->AlignmentRequirement = 0; conv_len = smbConvertToUTF16((__le16 *)file_info->FileName, filename, @@ -5134,8 +5148,13 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp, time = ksmbd_UnixTimeToNT(stat.ctime); file_info->ChangeTime = cpu_to_le64(time); file_info->Attributes = fp->f_ci->m_fattr; - file_info->AllocationSize = cpu_to_le64(stat.blocks << 9); - file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); + if (ksmbd_stream_fd(fp) == false) { + file_info->AllocationSize = cpu_to_le64(stat.blocks << 9); + file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); + } else { + file_info->AllocationSize = cpu_to_le64(fp->stream.size); + file_info->EndOfFile = cpu_to_le64(fp->stream.size); + } file_info->Reserved = cpu_to_le32(0); rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_ntwrk_info)); @@ -5158,7 +5177,11 @@ static void get_file_position_info(struct smb2_query_info_rsp *rsp, struct smb2_file_pos_info *file_info; file_info = (struct smb2_file_pos_info *)rsp->Buffer; - file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos); + if (ksmbd_stream_fd(fp) == false) + file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos); + else + file_info->CurrentByteOffset = cpu_to_le64(fp->stream.pos); + rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_pos_info)); } @@ -5247,8 +5270,13 @@ static int find_file_posix_info(struct smb2_query_info_rsp *rsp, file_info->ChangeTime = cpu_to_le64(time); file_info->DosAttributes = fp->f_ci->m_fattr; file_info->Inode = cpu_to_le64(stat.ino); - file_info->EndOfFile = cpu_to_le64(stat.size); - file_info->AllocationSize = cpu_to_le64(stat.blocks << 9); + if (ksmbd_stream_fd(fp) == false) { + file_info->EndOfFile = cpu_to_le64(stat.size); + file_info->AllocationSize = cpu_to_le64(stat.blocks << 9); + } else { + file_info->EndOfFile = cpu_to_le64(fp->stream.size); + file_info->AllocationSize = cpu_to_le64(fp->stream.size); + } file_info->HardLinks = cpu_to_le32(stat.nlink); file_info->Mode = cpu_to_le32(stat.mode & 0777); switch (stat.mode & S_IFMT) { @@ -6190,6 +6218,9 @@ static int set_file_allocation_info(struct ksmbd_work *work, if (!(fp->daccess & FILE_WRITE_DATA_LE)) return -EACCES; + if (ksmbd_stream_fd(fp) == true) + return 0; + rc = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); if (rc) @@ -6248,7 +6279,8 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp, * truncate of some filesystem like FAT32 fill zero data in * truncated range. */ - if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) { + if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC && + ksmbd_stream_fd(fp) == false) { ksmbd_debug(SMB, "truncated to newsize %lld\n", newsize); rc = ksmbd_vfs_truncate(work, fp, newsize); if (rc) { @@ -6321,7 +6353,13 @@ static int set_file_position_info(struct ksmbd_file *fp, return -EINVAL; } - fp->filp->f_pos = current_byte_offset; + if (ksmbd_stream_fd(fp) == false) + fp->filp->f_pos = current_byte_offset; + else { + if (current_byte_offset > XATTR_SIZE_MAX) + current_byte_offset = XATTR_SIZE_MAX; + fp->stream.pos = current_byte_offset; + } return 0; } diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c index 4998df04ab95..64a428a06ace 100644 --- a/fs/smb/server/transport_rdma.c +++ b/fs/smb/server/transport_rdma.c @@ -159,7 +159,8 @@ struct smb_direct_transport { }; #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport)) - +#define SMBD_TRANS(t) ((struct smb_direct_transport *)container_of(t, \ + struct smb_direct_transport, transport)) enum { SMB_DIRECT_MSG_NEGOTIATE_REQ = 0, SMB_DIRECT_MSG_DATA_TRANSFER @@ -410,6 +411,11 @@ err: return NULL; } +static void smb_direct_free_transport(struct ksmbd_transport *kt) +{ + kfree(SMBD_TRANS(kt)); +} + static void free_transport(struct smb_direct_transport *t) { struct smb_direct_recvmsg *recvmsg; @@ -455,7 +461,6 @@ static void free_transport(struct smb_direct_transport *t) smb_direct_destroy_pools(t); ksmbd_conn_free(KSMBD_TRANS(t)->conn); - kfree(t); } static struct smb_direct_sendmsg @@ -2281,4 +2286,5 @@ static const struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = { .read = smb_direct_read, .rdma_read = smb_direct_rdma_read, .rdma_write = smb_direct_rdma_write, + .free_transport = smb_direct_free_transport, }; diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c index abedf510899a..4e9f98db9ff4 100644 --- a/fs/smb/server/transport_tcp.c +++ b/fs/smb/server/transport_tcp.c @@ -93,7 +93,7 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk) return t; } -void ksmbd_free_transport(struct ksmbd_transport *kt) +static void ksmbd_tcp_free_transport(struct ksmbd_transport *kt) { struct tcp_transport *t = TCP_TRANS(kt); @@ -656,4 +656,5 @@ static const struct ksmbd_transport_ops ksmbd_tcp_transport_ops = { .read = ksmbd_tcp_read, .writev = ksmbd_tcp_writev, .disconnect = ksmbd_tcp_disconnect, + .free_transport = ksmbd_tcp_free_transport, }; diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index ba45e809555a..0f3aad12e495 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -293,6 +293,7 @@ static int ksmbd_vfs_stream_read(struct ksmbd_file *fp, char *buf, loff_t *pos, if (v_len - *pos < count) count = v_len - *pos; + fp->stream.pos = v_len; memcpy(buf, &stream_buf[*pos], count); @@ -456,8 +457,8 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos, true); if (err < 0) goto out; - - fp->filp->f_pos = *pos; + else + fp->stream.pos = size; err = 0; out: kvfree(stream_buf); diff --git a/fs/smb/server/vfs_cache.h b/fs/smb/server/vfs_cache.h index 5bbb179736c2..0708155b5caf 100644 --- a/fs/smb/server/vfs_cache.h +++ b/fs/smb/server/vfs_cache.h @@ -44,6 +44,7 @@ struct ksmbd_lock { struct stream { char *name; ssize_t size; + loff_t pos; }; struct ksmbd_inode { diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c index 039d1eb2f215..507b8f19a262 100644 --- a/kernel/cgroup/legacy_freezer.c +++ b/kernel/cgroup/legacy_freezer.c @@ -188,13 +188,12 @@ static void freezer_attach(struct cgroup_taskset *tset) if (!(freezer->state & CGROUP_FREEZING)) { __thaw_task(task); } else { - freeze_task(task); - /* clear FROZEN and propagate upwards */ while (freezer && (freezer->state & CGROUP_FROZEN)) { freezer->state &= ~CGROUP_FROZEN; freezer = parent_freezer(freezer); } + freeze_task(task); } } diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index bc494745f67b..8cbb660e2ec2 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -39,6 +39,7 @@ #include <linux/blk_types.h> #include <linux/blkdev.h> #include <linux/clk.h> +#include <linux/completion.h> #include <linux/configfs.h> #include <linux/cpu.h> #include <linux/cpufreq.h> diff --git a/rust/helpers/completion.c b/rust/helpers/completion.c new file mode 100644 index 000000000000..b2443262a2ae --- /dev/null +++ b/rust/helpers/completion.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/completion.h> + +void rust_helper_init_completion(struct completion *x) +{ + init_completion(x); +} diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index 16fa9bca5949..b15b3cddad4e 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -13,6 +13,7 @@ #include "build_assert.c" #include "build_bug.c" #include "clk.c" +#include "completion.c" #include "cpu.c" #include "cpufreq.c" #include "cpumask.c" diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs index 0f79a2ec9474..57502534d985 100644 --- a/rust/kernel/devres.rs +++ b/rust/kernel/devres.rs @@ -12,26 +12,28 @@ use crate::{ error::{Error, Result}, ffi::c_void, prelude::*, - revocable::Revocable, - sync::Arc, + revocable::{Revocable, RevocableGuard}, + sync::{rcu, Arc, Completion}, types::ARef, }; -use core::ops::Deref; - #[pin_data] struct DevresInner<T> { dev: ARef<Device>, callback: unsafe extern "C" fn(*mut c_void), #[pin] data: Revocable<T>, + #[pin] + revoke: Completion, } /// This abstraction is meant to be used by subsystems to containerize [`Device`] bound resources to /// manage their lifetime. /// /// [`Device`] bound resources should be freed when either the resource goes out of scope or the -/// [`Device`] is unbound respectively, depending on what happens first. +/// [`Device`] is unbound respectively, depending on what happens first. In any case, it is always +/// guaranteed that revoking the device resource is completed before the corresponding [`Device`] +/// is unbound. /// /// To achieve that [`Devres`] registers a devres callback on creation, which is called once the /// [`Device`] is unbound, revoking access to the encapsulated resource (see also [`Revocable`]). @@ -102,6 +104,7 @@ impl<T> DevresInner<T> { dev: dev.into(), callback: Self::devres_callback, data <- Revocable::new(data), + revoke <- Completion::new(), }), flags, )?; @@ -130,26 +133,28 @@ impl<T> DevresInner<T> { self as _ } - fn remove_action(this: &Arc<Self>) { + fn remove_action(this: &Arc<Self>) -> bool { // SAFETY: // - `self.inner.dev` is a valid `Device`, // - the `action` and `data` pointers are the exact same ones as given to devm_add_action() // previously, // - `self` is always valid, even if the action has been released already. - let ret = unsafe { + let success = unsafe { bindings::devm_remove_action_nowarn( this.dev.as_raw(), Some(this.callback), this.as_ptr() as _, ) - }; + } == 0; - if ret == 0 { + if success { // SAFETY: We leaked an `Arc` reference to devm_add_action() in `DevresInner::new`; if // devm_remove_action_nowarn() was successful we can (and have to) claim back ownership // of this reference. let _ = unsafe { Arc::from_raw(this.as_ptr()) }; } + + success } #[allow(clippy::missing_safety_doc)] @@ -161,7 +166,12 @@ impl<T> DevresInner<T> { // `DevresInner::new`. let inner = unsafe { Arc::from_raw(ptr) }; - inner.data.revoke(); + if !inner.data.revoke() { + // If `revoke()` returns false, it means that `Devres::drop` already started revoking + // `inner.data` for us. Hence we have to wait until `Devres::drop()` signals that it + // completed revoking `inner.data`. + inner.revoke.wait_for_completion(); + } } } @@ -218,20 +228,36 @@ impl<T> Devres<T> { // SAFETY: `dev` being the same device as the device this `Devres` has been created for // proves that `self.0.data` hasn't been revoked and is guaranteed to not be revoked as // long as `dev` lives; `dev` lives at least as long as `self`. - Ok(unsafe { self.deref().access() }) + Ok(unsafe { self.0.data.access() }) } -} -impl<T> Deref for Devres<T> { - type Target = Revocable<T>; + /// [`Devres`] accessor for [`Revocable::try_access`]. + pub fn try_access(&self) -> Option<RevocableGuard<'_, T>> { + self.0.data.try_access() + } + + /// [`Devres`] accessor for [`Revocable::try_access_with`]. + pub fn try_access_with<R, F: FnOnce(&T) -> R>(&self, f: F) -> Option<R> { + self.0.data.try_access_with(f) + } - fn deref(&self) -> &Self::Target { - &self.0.data + /// [`Devres`] accessor for [`Revocable::try_access_with_guard`]. + pub fn try_access_with_guard<'a>(&'a self, guard: &'a rcu::Guard) -> Option<&'a T> { + self.0.data.try_access_with_guard(guard) } } impl<T> Drop for Devres<T> { fn drop(&mut self) { - DevresInner::remove_action(&self.0); + // SAFETY: When `drop` runs, it is guaranteed that nobody is accessing the revocable data + // anymore, hence it is safe not to wait for the grace period to finish. + if unsafe { self.0.data.revoke_nosync() } { + // We revoked `self.0.data` before the devres action did, hence try to remove it. + if !DevresInner::remove_action(&self.0) { + // We could not remove the devres action, which means that it now runs concurrently, + // hence signal that `self.0.data` has been revoked successfully. + self.0.revoke.complete_all(); + } + } } } diff --git a/rust/kernel/revocable.rs b/rust/kernel/revocable.rs index db4aa46bb121..06a3cdfce344 100644 --- a/rust/kernel/revocable.rs +++ b/rust/kernel/revocable.rs @@ -154,8 +154,10 @@ impl<T> Revocable<T> { /// # Safety /// /// Callers must ensure that there are no more concurrent users of the revocable object. - unsafe fn revoke_internal<const SYNC: bool>(&self) { - if self.is_available.swap(false, Ordering::Relaxed) { + unsafe fn revoke_internal<const SYNC: bool>(&self) -> bool { + let revoke = self.is_available.swap(false, Ordering::Relaxed); + + if revoke { if SYNC { // SAFETY: Just an FFI call, there are no further requirements. unsafe { bindings::synchronize_rcu() }; @@ -165,6 +167,8 @@ impl<T> Revocable<T> { // `compare_exchange` above that takes `is_available` from `true` to `false`. unsafe { drop_in_place(self.data.get()) }; } + + revoke } /// Revokes access to and drops the wrapped object. @@ -172,10 +176,13 @@ impl<T> Revocable<T> { /// Access to the object is revoked immediately to new callers of [`Revocable::try_access`], /// expecting that there are no concurrent users of the object. /// + /// Returns `true` if `&self` has been revoked with this call, `false` if it was revoked + /// already. + /// /// # Safety /// /// Callers must ensure that there are no more concurrent users of the revocable object. - pub unsafe fn revoke_nosync(&self) { + pub unsafe fn revoke_nosync(&self) -> bool { // SAFETY: By the safety requirement of this function, the caller ensures that nobody is // accessing the data anymore and hence we don't have to wait for the grace period to // finish. @@ -189,7 +196,10 @@ impl<T> Revocable<T> { /// If there are concurrent users of the object (i.e., ones that called /// [`Revocable::try_access`] beforehand and still haven't dropped the returned guard), this /// function waits for the concurrent access to complete before dropping the wrapped object. - pub fn revoke(&self) { + /// + /// Returns `true` if `&self` has been revoked with this call, `false` if it was revoked + /// already. + pub fn revoke(&self) -> bool { // SAFETY: By passing `true` we ask `revoke_internal` to wait for the grace period to // finish. unsafe { self.revoke_internal::<true>() } diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index 36a719015583..c23a12639924 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -10,6 +10,7 @@ use crate::types::Opaque; use pin_init; mod arc; +pub mod completion; mod condvar; pub mod lock; mod locked_by; @@ -17,6 +18,7 @@ pub mod poll; pub mod rcu; pub use arc::{Arc, ArcBorrow, UniqueArc}; +pub use completion::Completion; pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult}; pub use lock::global::{global_lock, GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy}; pub use lock::mutex::{new_mutex, Mutex, MutexGuard}; diff --git a/rust/kernel/sync/completion.rs b/rust/kernel/sync/completion.rs new file mode 100644 index 000000000000..c50012a940a3 --- /dev/null +++ b/rust/kernel/sync/completion.rs @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Completion support. +//! +//! Reference: <https://docs.kernel.org/scheduler/completion.html> +//! +//! C header: [`include/linux/completion.h`](srctree/include/linux/completion.h) + +use crate::{bindings, prelude::*, types::Opaque}; + +/// Synchronization primitive to signal when a certain task has been completed. +/// +/// The [`Completion`] synchronization primitive signals when a certain task has been completed by +/// waking up other tasks that have been queued up to wait for the [`Completion`] to be completed. +/// +/// # Examples +/// +/// ``` +/// use kernel::sync::{Arc, Completion}; +/// use kernel::workqueue::{self, impl_has_work, new_work, Work, WorkItem}; +/// +/// #[pin_data] +/// struct MyTask { +/// #[pin] +/// work: Work<MyTask>, +/// #[pin] +/// done: Completion, +/// } +/// +/// impl_has_work! { +/// impl HasWork<Self> for MyTask { self.work } +/// } +/// +/// impl MyTask { +/// fn new() -> Result<Arc<Self>> { +/// let this = Arc::pin_init(pin_init!(MyTask { +/// work <- new_work!("MyTask::work"), +/// done <- Completion::new(), +/// }), GFP_KERNEL)?; +/// +/// let _ = workqueue::system().enqueue(this.clone()); +/// +/// Ok(this) +/// } +/// +/// fn wait_for_completion(&self) { +/// self.done.wait_for_completion(); +/// +/// pr_info!("Completion: task complete\n"); +/// } +/// } +/// +/// impl WorkItem for MyTask { +/// type Pointer = Arc<MyTask>; +/// +/// fn run(this: Arc<MyTask>) { +/// // process this task +/// this.done.complete_all(); +/// } +/// } +/// +/// let task = MyTask::new()?; +/// task.wait_for_completion(); +/// # Ok::<(), Error>(()) +/// ``` +#[pin_data] +pub struct Completion { + #[pin] + inner: Opaque<bindings::completion>, +} + +// SAFETY: `Completion` is safe to be send to any task. +unsafe impl Send for Completion {} + +// SAFETY: `Completion` is safe to be accessed concurrently. +unsafe impl Sync for Completion {} + +impl Completion { + /// Create an initializer for a new [`Completion`]. + pub fn new() -> impl PinInit<Self> { + pin_init!(Self { + inner <- Opaque::ffi_init(|slot: *mut bindings::completion| { + // SAFETY: `slot` is a valid pointer to an uninitialized `struct completion`. + unsafe { bindings::init_completion(slot) }; + }), + }) + } + + fn as_raw(&self) -> *mut bindings::completion { + self.inner.get() + } + + /// Signal all tasks waiting on this completion. + /// + /// This method wakes up all tasks waiting on this completion; after this operation the + /// completion is permanently done, i.e. signals all current and future waiters. + pub fn complete_all(&self) { + // SAFETY: `self.as_raw()` is a pointer to a valid `struct completion`. + unsafe { bindings::complete_all(self.as_raw()) }; + } + + /// Wait for completion of a task. + /// + /// This method waits for the completion of a task; it is not interruptible and there is no + /// timeout. + /// + /// See also [`Completion::complete_all`]. + pub fn wait_for_completion(&self) { + // SAFETY: `self.as_raw()` is a pointer to a valid `struct completion`. + unsafe { bindings::wait_for_completion(self.as_raw()) }; + } +} |