summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-06-08 21:08:20 +0300
committerJens Axboe <axboe@kernel.dk>2020-06-08 13:47:37 -0600
commitf5fa38c59cb0b40633dee5cdf7465801be3e4928 (patch)
treef654e1a37dcf8464493d76236f965aefe45c8a83 /fs/io_uring.c
parentd4c81f38522f3e7f4be1b472ef9988d0ed7f3696 (diff)
io_wq: add per-wq work handler instead of per work
io_uring is the only user of io-wq, and now it uses only io-wq callback for all its requests, namely io_wq_submit_work(). Instead of storing work->runner callback in each instance of io_wq_work, keep it in io-wq itself. pros: - reduces io_wq_work size - more robust -- ->func won't be invalidated with mem{cpy,set}(req) - helps other work Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 35d96d2a4c8c..3ffe03194c1e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5776,7 +5776,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
refcount_set(&req->refs, 2);
req->task = NULL;
req->result = 0;
- INIT_IO_WORK(&req->work, io_wq_submit_work);
+ INIT_IO_WORK(&req->work);
if (unlikely(req->opcode >= IORING_OP_LAST))
return -EINVAL;
@@ -6796,6 +6796,7 @@ static int io_init_wq_offload(struct io_ring_ctx *ctx,
data.user = ctx->user;
data.free_work = io_free_work;
+ data.do_work = io_wq_submit_work;
if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
/* Do QD, or 4 * CPUS, whatever is smallest */