summaryrefslogtreecommitdiff
path: root/io_uring/uring_cmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/uring_cmd.c')
-rw-r--r--io_uring/uring_cmd.c93
1 files changed, 58 insertions, 35 deletions
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 929cad6ee326..053bac89b6c0 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -12,6 +12,7 @@
#include "alloc_cache.h"
#include "rsrc.h"
#include "uring_cmd.h"
+#include "poll.h"
void io_cmd_cache_free(const void *entry)
{
@@ -25,12 +26,6 @@ static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
struct io_async_cmd *ac = req->async_data;
- struct io_uring_cmd_data *cache = &ac->data;
-
- if (cache->op_data) {
- kfree(cache->op_data);
- cache->op_data = NULL;
- }
if (issue_flags & IO_URING_F_UNLOCKED)
return;
@@ -39,7 +34,7 @@ static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
if (ac->vec.nr > IO_VEC_CACHE_SOFT_CAP)
io_vec_free(&ac->vec);
- if (io_alloc_cache_put(&req->ctx->cmd_cache, cache)) {
+ if (io_alloc_cache_put(&req->ctx->cmd_cache, ac)) {
ioucmd->sqe = NULL;
req->async_data = NULL;
req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
@@ -136,6 +131,9 @@ void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ if (WARN_ON_ONCE(req->flags & REQ_F_APOLL_MULTISHOT))
+ return;
+
ioucmd->task_work_cb = task_work_cb;
req->io_task_work.func = io_uring_cmd_work;
__io_req_task_work_add(req, flags);
@@ -158,6 +156,9 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ if (WARN_ON_ONCE(req->flags & REQ_F_APOLL_MULTISHOT))
+ return;
+
io_uring_cmd_del_cancelable(ioucmd, issue_flags);
if (ret < 0)
@@ -181,35 +182,10 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
-static int io_uring_cmd_prep_setup(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct io_async_cmd *ac;
-
- /* see io_uring_cmd_get_async_data() */
- BUILD_BUG_ON(offsetof(struct io_async_cmd, data) != 0);
-
- ac = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
- if (!ac)
- return -ENOMEM;
- ac->data.op_data = NULL;
-
- /*
- * Unconditionally cache the SQE for now - this is only needed for
- * requests that go async, but prep handlers must ensure that any
- * sqe data is stable beyond prep. Since uring_cmd is special in
- * that it doesn't read in per-op data, play it safe and ensure that
- * any SQE data is stable beyond prep. This can later get relaxed.
- */
- memcpy(ac->sqes, sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = ac->sqes;
- return 0;
-}
-
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ struct io_async_cmd *ac;
if (sqe->__pad1)
return -EINVAL;
@@ -223,7 +199,23 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
- return io_uring_cmd_prep_setup(req, sqe);
+ ac = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
+ if (!ac)
+ return -ENOMEM;
+ ioucmd->sqe = sqe;
+ return 0;
+}
+
+void io_uring_cmd_sqe_copy(struct io_kiocb *req)
+{
+ struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ struct io_async_cmd *ac = req->async_data;
+
+ /* Should not happen, as REQ_F_SQE_COPIED covers this */
+ if (WARN_ON_ONCE(ioucmd->sqe == ac->sqes))
+ return;
+ memcpy(ac->sqes, ioucmd->sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = ac->sqes;
}
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
@@ -259,7 +251,11 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
}
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
- if (ret == -EAGAIN || ret == -EIOCBQUEUED)
+ if (ret == -EAGAIN) {
+ ioucmd->flags |= IORING_URING_CMD_REISSUE;
+ return ret;
+ }
+ if (ret == -EIOCBQUEUED)
return ret;
if (ret < 0)
req_set_fail(req);
@@ -310,3 +306,30 @@ void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
io_req_queue_iowq(req);
}
+
+int io_cmd_poll_multishot(struct io_uring_cmd *cmd,
+ unsigned int issue_flags, __poll_t mask)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(cmd);
+ int ret;
+
+ if (likely(req->flags & REQ_F_APOLL_MULTISHOT))
+ return 0;
+
+ req->flags |= REQ_F_APOLL_MULTISHOT;
+ mask &= ~EPOLLONESHOT;
+
+ ret = io_arm_apoll(req, issue_flags, mask);
+ return ret == IO_APOLL_OK ? -EIOCBQUEUED : -ECANCELED;
+}
+
+bool io_uring_cmd_post_mshot_cqe32(struct io_uring_cmd *cmd,
+ unsigned int issue_flags,
+ struct io_uring_cqe cqe[2])
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(cmd);
+
+ if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_MULTISHOT)))
+ return false;
+ return io_req_post_cqe32(req, cqe);
+}