summaryrefslogtreecommitdiff
path: root/io_uring/uring_cmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/uring_cmd.c')
-rw-r--r--io_uring/uring_cmd.c257
1 files changed, 145 insertions, 112 deletions
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index c33fca585dde..929cad6ee326 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -7,12 +7,78 @@
#include <linux/nospec.h>
#include <uapi/linux/io_uring.h>
-#include <asm/ioctls.h>
#include "io_uring.h"
+#include "alloc_cache.h"
#include "rsrc.h"
#include "uring_cmd.h"
+void io_cmd_cache_free(const void *entry)
+{
+ struct io_async_cmd *ac = (struct io_async_cmd *)entry;
+
+ io_vec_free(&ac->vec);
+ kfree(ac);
+}
+
+static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ struct io_async_cmd *ac = req->async_data;
+ struct io_uring_cmd_data *cache = &ac->data;
+
+ if (cache->op_data) {
+ kfree(cache->op_data);
+ cache->op_data = NULL;
+ }
+
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ return;
+
+ io_alloc_cache_vec_kasan(&ac->vec);
+ if (ac->vec.nr > IO_VEC_CACHE_SOFT_CAP)
+ io_vec_free(&ac->vec);
+
+ if (io_alloc_cache_put(&req->ctx->cmd_cache, cache)) {
+ ioucmd->sqe = NULL;
+ req->async_data = NULL;
+ req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
+ }
+}
+
+void io_uring_cmd_cleanup(struct io_kiocb *req)
+{
+ io_req_uring_cleanup(req, 0);
+}
+
+bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
+ struct io_uring_task *tctx, bool cancel_all)
+{
+ struct hlist_node *tmp;
+ struct io_kiocb *req;
+ bool ret = false;
+
+ lockdep_assert_held(&ctx->uring_lock);
+
+ hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
+ hash_node) {
+ struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
+ struct io_uring_cmd);
+ struct file *file = req->file;
+
+ if (!cancel_all && req->tctx != tctx)
+ continue;
+
+ if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
+ file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
+ IO_URING_F_COMPLETE_DEFER);
+ ret = true;
+ }
+ }
+ io_submit_flush_completions(ctx);
+ return ret;
+}
+
static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
@@ -52,12 +118,16 @@ void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
}
EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
-static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
+static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
+ unsigned int flags = IO_URING_F_COMPLETE_DEFER;
- ioucmd->task_work_cb(ioucmd, issue_flags);
+ if (io_should_terminate_tw())
+ flags |= IO_URING_F_TASK_DEAD;
+
+ /* task_work executor checks the deffered list completion */
+ ioucmd->task_work_cb(ioucmd, flags);
}
void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
@@ -83,7 +153,7 @@ static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
* Called by consumers of io_uring_cmd, if they originally returned
* -EIOCBQUEUED upon receiving the command.
*/
-void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
+void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
unsigned issue_flags)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
@@ -96,24 +166,44 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
io_req_set_res(req, ret, 0);
if (req->ctx->flags & IORING_SETUP_CQE32)
io_req_set_cqe32_extra(req, res2, 0);
+ io_req_uring_cleanup(req, issue_flags);
if (req->ctx->flags & IORING_SETUP_IOPOLL) {
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
smp_store_release(&req->iopoll_completed, 1);
+ } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
+ if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
+ return;
+ io_req_complete_defer(req);
} else {
- struct io_tw_state ts = {
- .locked = !(issue_flags & IO_URING_F_UNLOCKED),
- };
- io_req_task_complete(req, &ts);
+ req->io_task_work.func = io_req_task_complete;
+ io_req_task_work_add(req);
}
}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
-int io_uring_cmd_prep_async(struct io_kiocb *req)
+static int io_uring_cmd_prep_setup(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
-
- memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = req->async_data;
+ struct io_async_cmd *ac;
+
+ /* see io_uring_cmd_get_async_data() */
+ BUILD_BUG_ON(offsetof(struct io_async_cmd, data) != 0);
+
+ ac = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
+ if (!ac)
+ return -ENOMEM;
+ ac->data.op_data = NULL;
+
+ /*
+ * Unconditionally cache the SQE for now - this is only needed for
+ * requests that go async, but prep handlers must ensure that any
+ * sqe data is stable beyond prep. Since uring_cmd is special in
+ * that it doesn't read in per-op data, play it safe and ensure that
+ * any SQE data is stable beyond prep. This can later get relaxed.
+ */
+ memcpy(ac->sqes, sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = ac->sqes;
return 0;
}
@@ -128,20 +218,12 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (ioucmd->flags & ~IORING_URING_CMD_MASK)
return -EINVAL;
- if (ioucmd->flags & IORING_URING_CMD_FIXED) {
- struct io_ring_ctx *ctx = req->ctx;
- u16 index;
-
+ if (ioucmd->flags & IORING_URING_CMD_FIXED)
req->buf_index = READ_ONCE(sqe->buf_index);
- if (unlikely(req->buf_index >= ctx->nr_user_bufs))
- return -EFAULT;
- index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
- req->imu = ctx->user_bufs[index];
- io_req_set_rsrc_node(req, ctx, 0);
- }
- ioucmd->sqe = sqe;
+
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
- return 0;
+
+ return io_uring_cmd_prep_setup(req, sqe);
}
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
@@ -162,118 +244,69 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
issue_flags |= IO_URING_F_SQE128;
if (ctx->flags & IORING_SETUP_CQE32)
issue_flags |= IO_URING_F_CQE32;
- if (ctx->compat)
+ if (io_is_compat(ctx))
issue_flags |= IO_URING_F_COMPAT;
if (ctx->flags & IORING_SETUP_IOPOLL) {
if (!file->f_op->uring_cmd_iopoll)
return -EOPNOTSUPP;
issue_flags |= IO_URING_F_IOPOLL;
req->iopoll_completed = 0;
- }
-
- ret = file->f_op->uring_cmd(ioucmd, issue_flags);
- if (ret == -EAGAIN) {
- if (!req_has_async_data(req)) {
- if (io_alloc_async_data(req))
- return -ENOMEM;
- io_uring_cmd_prep_async(req);
+ if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
+ /* make sure every req only blocks once */
+ req->flags &= ~REQ_F_IOPOLL_STATE;
+ req->iopoll_start = ktime_get_ns();
}
- return -EAGAIN;
}
- if (ret != -EIOCBQUEUED) {
- if (ret < 0)
- req_set_fail(req);
- io_req_set_res(req, ret, 0);
+ ret = file->f_op->uring_cmd(ioucmd, issue_flags);
+ if (ret == -EAGAIN || ret == -EIOCBQUEUED)
return ret;
- }
-
- return IOU_ISSUE_SKIP_COMPLETE;
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_uring_cleanup(req, issue_flags);
+ io_req_set_res(req, ret, 0);
+ return IOU_COMPLETE;
}
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
- struct iov_iter *iter, void *ioucmd)
+ struct iov_iter *iter,
+ struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
- return io_import_fixed(rw, iter, req->imu, ubuf, len);
+ if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED)))
+ return -EINVAL;
+
+ return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
-static inline int io_uring_cmd_getsockopt(struct socket *sock,
- struct io_uring_cmd *cmd,
- unsigned int issue_flags)
+int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
+ const struct iovec __user *uvec,
+ size_t uvec_segs,
+ int ddir, struct iov_iter *iter,
+ unsigned issue_flags)
{
- bool compat = !!(issue_flags & IO_URING_F_COMPAT);
- int optlen, optname, level, err;
- void __user *optval;
-
- level = READ_ONCE(cmd->sqe->level);
- if (level != SOL_SOCKET)
- return -EOPNOTSUPP;
-
- optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
- optname = READ_ONCE(cmd->sqe->optname);
- optlen = READ_ONCE(cmd->sqe->optlen);
+ struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ struct io_async_cmd *ac = req->async_data;
+ int ret;
- err = do_sock_getsockopt(sock, compat, level, optname,
- USER_SOCKPTR(optval),
- KERNEL_SOCKPTR(&optlen));
- if (err)
- return err;
+ if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED)))
+ return -EINVAL;
- /* On success, return optlen */
- return optlen;
-}
+ ret = io_prep_reg_iovec(req, &ac->vec, uvec, uvec_segs);
+ if (ret)
+ return ret;
-static inline int io_uring_cmd_setsockopt(struct socket *sock,
- struct io_uring_cmd *cmd,
- unsigned int issue_flags)
-{
- bool compat = !!(issue_flags & IO_URING_F_COMPAT);
- int optname, optlen, level;
- void __user *optval;
- sockptr_t optval_s;
-
- optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
- optname = READ_ONCE(cmd->sqe->optname);
- optlen = READ_ONCE(cmd->sqe->optlen);
- level = READ_ONCE(cmd->sqe->level);
- optval_s = USER_SOCKPTR(optval);
-
- return do_sock_setsockopt(sock, compat, level, optname, optval_s,
- optlen);
+ return io_import_reg_vec(ddir, iter, req, &ac->vec, uvec_segs,
+ issue_flags);
}
+EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed_vec);
-#if defined(CONFIG_NET)
-int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
+void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
{
- struct socket *sock = cmd->file->private_data;
- struct sock *sk = sock->sk;
- struct proto *prot = READ_ONCE(sk->sk_prot);
- int ret, arg = 0;
-
- if (!prot || !prot->ioctl)
- return -EOPNOTSUPP;
+ struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
- switch (cmd->sqe->cmd_op) {
- case SOCKET_URING_OP_SIOCINQ:
- ret = prot->ioctl(sk, SIOCINQ, &arg);
- if (ret)
- return ret;
- return arg;
- case SOCKET_URING_OP_SIOCOUTQ:
- ret = prot->ioctl(sk, SIOCOUTQ, &arg);
- if (ret)
- return ret;
- return arg;
- case SOCKET_URING_OP_GETSOCKOPT:
- return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
- case SOCKET_URING_OP_SETSOCKOPT:
- return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
- default:
- return -EOPNOTSUPP;
- }
+ io_req_queue_iowq(req);
}
-EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
-#endif