summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-09-24 21:59:54 +0100
committerJens Axboe <axboe@kernel.dk>2021-10-19 05:49:53 -0600
commit1cce17aca621c38c657410dc278a48cda982dd2e (patch)
treef5b1eb6984dafdae01641d2bca50e516a837ee06
parentd4b7a5ef2b9c06def90d12db9b99bd12d75758fb (diff)
io_uring: don't pass tail into io_free_batch_list
io_free_batch_list() iterates all requests in the passed in list, so we don't really need to know the tail but can keep iterating until meet NULL. Just pass the first node into it and it will be enough. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/4a12c84b6d887d980e05f417ba4172d04c64acae.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7229bf74c9e1..693ad31082ae 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2265,14 +2265,12 @@ static void io_free_req_work(struct io_kiocb *req, bool *locked)
}
static void io_free_batch_list(struct io_ring_ctx *ctx,
- struct io_wq_work_list *list)
+ struct io_wq_work_node *node)
__must_hold(&ctx->uring_lock)
{
- struct io_wq_work_node *node;
struct task_struct *task = NULL;
int task_refs = 0, ctx_refs = 0;
- node = list->first;
do {
struct io_kiocb *req = container_of(node, struct io_kiocb,
comp_list);
@@ -2319,7 +2317,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
- io_free_batch_list(ctx, &state->compl_reqs);
+ io_free_batch_list(ctx, state->compl_reqs.first);
INIT_WQ_LIST(&state->compl_reqs);
}
@@ -2403,7 +2401,6 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
{
struct io_wq_work_node *pos, *start, *prev;
unsigned int poll_flags = BLK_POLL_NOSLEEP;
- struct io_wq_work_list list;
DEFINE_IO_COMP_BATCH(iob);
int nr_events = 0;
@@ -2461,10 +2458,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
io_commit_cqring(ctx);
io_cqring_ev_posted_iopoll(ctx);
- list.first = start ? start->next : ctx->iopoll_list.first;
- list.last = prev;
+ pos = start ? start->next : ctx->iopoll_list.first;
wq_list_cut(&ctx->iopoll_list, prev, start);
- io_free_batch_list(ctx, &list);
+ io_free_batch_list(ctx, pos);
return nr_events;
}