summaryrefslogtreecommitdiff
path: root/io_uring/msg_ring.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-02 13:20:44 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-02 13:20:44 -0700
commitb349b1181d24af1c151134a3c39725e94a5619dd (patch)
tree7347cc4035de947c22e575ac7c649c0fa8658dd1 /io_uring/msg_ring.c
parentefb2883060afc79638bb1eb19e2c30e7f6c5a178 (diff)
parentf6b543fd03d347e8bf245cee4f2d54eb6ffd8fcb (diff)
Merge tag 'for-5.20/io_uring-2022-07-29' of git://git.kernel.dk/linux-block
Pull io_uring updates from Jens Axboe: - As per (valid) complaint in the last merge window, fs/io_uring.c has grown quite large these days. io_uring isn't really tied to fs either, as it supports a wide variety of functionality outside of that. Move the code to io_uring/ and split it into files that either implement a specific request type, and split some code into helpers as well. The code is organized a lot better like this, and io_uring.c is now < 4K LOC (me). - Deprecate the epoll_ctl opcode. It'll still work, just trigger a warning once if used. If we don't get any complaints on this, and I don't expect any, then we can fully remove it in a future release (me). - Improve the cancel hash locking (Hao) - kbuf cleanups (Hao) - Efficiency improvements to the task_work handling (Dylan, Pavel) - Provided buffer improvements (Dylan) - Add support for recv/recvmsg multishot support. This is similar to the accept (or poll) support for have for multishot, where a single SQE can trigger everytime data is received. For applications that expect to do more than a few receives on an instantiated socket, this greatly improves efficiency (Dylan). - Efficiency improvements for poll handling (Pavel) - Poll cancelation improvements (Pavel) - Allow specifiying a range for direct descriptor allocations (Pavel) - Cleanup the cqe32 handling (Pavel) - Move io_uring types to greatly cleanup the tracing (Pavel) - Tons of great code cleanups and improvements (Pavel) - Add a way to do sync cancelations rather than through the sqe -> cqe interface, as that's a lot easier to use for some use cases (me). - Add support to IORING_OP_MSG_RING for sending direct descriptors to a different ring. This avoids the usually problematic SCM case, as we disallow those. (me) - Make the per-command alloc cache we use for apoll generic, place limits on it, and use it for netmsg as well (me). - Various cleanups (me, Michal, Gustavo, Uros) * tag 'for-5.20/io_uring-2022-07-29' of git://git.kernel.dk/linux-block: (172 commits) io_uring: ensure REQ_F_ISREG is set async offload net: fix compat pointer in get_compat_msghdr() io_uring: Don't require reinitable percpu_ref io_uring: fix types in io_recvmsg_multishot_overflow io_uring: Use atomic_long_try_cmpxchg in __io_account_mem io_uring: support multishot in recvmsg net: copy from user before calling __get_compat_msghdr net: copy from user before calling __copy_msghdr io_uring: support 0 length iov in buffer select in compat io_uring: fix multishot ending when not polled io_uring: add netmsg cache io_uring: impose max limit on apoll cache io_uring: add abstraction around apoll cache io_uring: move apoll cache to poll.c io_uring: consolidate hash_locked io-wq handling io_uring: clear REQ_F_HASH_LOCKED on hash removal io_uring: don't race double poll setting REQ_F_ASYNC_DATA io_uring: don't miss setting REQ_F_DOUBLE_POLL io_uring: disable multishot recvmsg io_uring: only trace one of complete or overflow ...
Diffstat (limited to 'io_uring/msg_ring.c')
-rw-r--r--io_uring/msg_ring.c171
1 files changed, 171 insertions, 0 deletions
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
new file mode 100644
index 000000000000..753d16734319
--- /dev/null
+++ b/io_uring/msg_ring.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/nospec.h>
+#include <linux/io_uring.h>
+
+#include <uapi/linux/io_uring.h>
+
+#include "io_uring.h"
+#include "rsrc.h"
+#include "filetable.h"
+#include "msg_ring.h"
+
+struct io_msg {
+ struct file *file;
+ u64 user_data;
+ u32 len;
+ u32 cmd;
+ u32 src_fd;
+ u32 dst_fd;
+ u32 flags;
+};
+
+static int io_msg_ring_data(struct io_kiocb *req)
+{
+ struct io_ring_ctx *target_ctx = req->file->private_data;
+ struct io_msg *msg = io_kiocb_to_cmd(req);
+
+ if (msg->src_fd || msg->dst_fd || msg->flags)
+ return -EINVAL;
+
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
+ return 0;
+
+ return -EOVERFLOW;
+}
+
+static void io_double_unlock_ctx(struct io_ring_ctx *ctx,
+ struct io_ring_ctx *octx,
+ unsigned int issue_flags)
+{
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ mutex_unlock(&ctx->uring_lock);
+ mutex_unlock(&octx->uring_lock);
+}
+
+static int io_double_lock_ctx(struct io_ring_ctx *ctx,
+ struct io_ring_ctx *octx,
+ unsigned int issue_flags)
+{
+ /*
+ * To ensure proper ordering between the two ctxs, we can only
+ * attempt a trylock on the target. If that fails and we already have
+ * the source ctx lock, punt to io-wq.
+ */
+ if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+ if (!mutex_trylock(&octx->uring_lock))
+ return -EAGAIN;
+ return 0;
+ }
+
+ /* Always grab smallest value ctx first. We know ctx != octx. */
+ if (ctx < octx) {
+ mutex_lock(&ctx->uring_lock);
+ mutex_lock(&octx->uring_lock);
+ } else {
+ mutex_lock(&octx->uring_lock);
+ mutex_lock(&ctx->uring_lock);
+ }
+
+ return 0;
+}
+
+static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_ring_ctx *target_ctx = req->file->private_data;
+ struct io_msg *msg = io_kiocb_to_cmd(req);
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned long file_ptr;
+ struct file *src_file;
+ int ret;
+
+ if (target_ctx == ctx)
+ return -EINVAL;
+
+ ret = io_double_lock_ctx(ctx, target_ctx, issue_flags);
+ if (unlikely(ret))
+ return ret;
+
+ ret = -EBADF;
+ if (unlikely(msg->src_fd >= ctx->nr_user_files))
+ goto out_unlock;
+
+ msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files);
+ file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr;
+ src_file = (struct file *) (file_ptr & FFS_MASK);
+ get_file(src_file);
+
+ ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
+ if (ret < 0) {
+ fput(src_file);
+ goto out_unlock;
+ }
+
+ if (msg->flags & IORING_MSG_RING_CQE_SKIP)
+ goto out_unlock;
+
+ /*
+ * If this fails, the target still received the file descriptor but
+ * wasn't notified of the fact. This means that if this request
+ * completes with -EOVERFLOW, then the sender must ensure that a
+ * later IORING_OP_MSG_RING delivers the message.
+ */
+ if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
+ ret = -EOVERFLOW;
+out_unlock:
+ io_double_unlock_ctx(ctx, target_ctx, issue_flags);
+ return ret;
+}
+
+int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_msg *msg = io_kiocb_to_cmd(req);
+
+ if (unlikely(sqe->buf_index || sqe->personality))
+ return -EINVAL;
+
+ msg->user_data = READ_ONCE(sqe->off);
+ msg->len = READ_ONCE(sqe->len);
+ msg->cmd = READ_ONCE(sqe->addr);
+ msg->src_fd = READ_ONCE(sqe->addr3);
+ msg->dst_fd = READ_ONCE(sqe->file_index);
+ msg->flags = READ_ONCE(sqe->msg_ring_flags);
+ if (msg->flags & ~IORING_MSG_RING_CQE_SKIP)
+ return -EINVAL;
+
+ return 0;
+}
+
+int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_msg *msg = io_kiocb_to_cmd(req);
+ int ret;
+
+ ret = -EBADFD;
+ if (!io_is_uring_fops(req->file))
+ goto done;
+
+ switch (msg->cmd) {
+ case IORING_MSG_DATA:
+ ret = io_msg_ring_data(req);
+ break;
+ case IORING_MSG_SEND_FD:
+ ret = io_msg_send_fd(req, issue_flags);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+done:
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ /* put file to avoid an attempt to IOPOLL the req */
+ io_put_file(req->file);
+ req->file = NULL;
+ return IOU_OK;
+}