summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-03-20 15:23:47 -0600
committerJens Axboe <axboe@kernel.dk>2024-04-15 08:10:25 -0600
commit5eff57fa9f3aae3acbcaf196af507eec58955f3b (patch)
treedff943a3ebf093bbbdb17770217dbf4ff00d0fe9 /io_uring
parentd10f19dff56eac5ae44dc270336b18071a8bd51c (diff)
io_uring/uring_cmd: defer SQE copying until it's needed
The previous commit turned on async data for uring_cmd, and did the basic conversion of setting everything up on the prep side. However, for a lot of use cases, -EIOCBQUEUED will get returned on issue, as the operation got successfully queued. For that case, a persistent SQE isn't needed, as it's just used for issue. Unless execution goes async immediately, defer copying the double SQE until it's necessary. This greatly reduces the overhead of such commands, as evidenced by a perf diff from before and after this change: 10.60% -8.58% [kernel.vmlinux] [k] io_uring_cmd_prep where the prep side drops from 10.60% to ~2%, which is more expected. Performance also rises from ~113M IOPS to ~122M IOPS, bringing us back to where it was before the async command prep. Tested-by: Anuj Gupta <anuj20.g@samsung.com> Reviewed-by: Anuj Gupta <anuj20.g@samsung.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/uring_cmd.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 9bd0ba87553f..92346b5d9f5b 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -182,12 +182,18 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
struct uring_cache *cache;
cache = io_uring_async_get(req);
- if (cache) {
- memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = req->async_data;
+ if (unlikely(!cache))
+ return -ENOMEM;
+
+ if (!(req->flags & REQ_F_FORCE_ASYNC)) {
+ /* defer memcpy until we need it */
+ ioucmd->sqe = sqe;
return 0;
}
- return -ENOMEM;
+
+ memcpy(req->async_data, sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = req->async_data;
+ return 0;
}
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -245,8 +251,15 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
}
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
- if (ret == -EAGAIN || ret == -EIOCBQUEUED)
- return ret;
+ if (ret == -EAGAIN) {
+ struct uring_cache *cache = req->async_data;
+
+ if (ioucmd->sqe != (void *) cache)
+ memcpy(cache, ioucmd->sqe, uring_sqe_size(req->ctx));
+ return -EAGAIN;
+ } else if (ret == -EIOCBQUEUED) {
+ return -EIOCBQUEUED;
+ }
if (ret < 0)
req_set_fail(req);