summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-04-12 15:09:46 +0100
committerJens Axboe <axboe@kernel.dk>2022-04-24 18:02:46 -0600
commit23a5c43b2fc0cc210dcb36264fe3050468c79b17 (patch)
tree2c5fc70cbdb41913f82b169a163dd5a1aa27dcab
parente126391c092014af7ea6786648c9f1d3fce512a3 (diff)
io_uring: inline io_flush_cached_reqs
io_flush_cached_reqs() isn't descriptive and has only one caller, inline it into __io_alloc_req_refill(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/ec38abe65a883d9fe6b169793119ce86806655a4.1649771823.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c30
1 files changed, 11 insertions, 19 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a3a3c80e2faa..7b83d6ed13ba 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2224,21 +2224,6 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
spin_unlock(&ctx->completion_lock);
}
-/* Returns true IFF there are requests in the cache */
-static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
-{
- struct io_submit_state *state = &ctx->submit_state;
-
- /*
- * If we have more than a batch's worth of requests in our IRQ side
- * locked cache, grab the lock and move them over to our submission
- * side cache.
- */
- if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
- io_flush_cached_locked_reqs(ctx, state);
- return !!state->free_list.next;
-}
-
/*
* A request might get retired back into the request caches even before opcode
* handlers and io_issue_sqe() are done with it, e.g. inline completion path.
@@ -2251,11 +2236,18 @@ static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
struct io_submit_state *state = &ctx->submit_state;
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
void *reqs[IO_REQ_ALLOC_BATCH];
- struct io_kiocb *req;
int ret, i;
- if (likely(state->free_list.next || io_flush_cached_reqs(ctx)))
- return true;
+ /*
+ * If we have more than a batch's worth of requests in our IRQ side
+ * locked cache, grab the lock and move them over to our submission
+ * side cache.
+ */
+ if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) {
+ io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
+ if (state->free_list.next)
+ return true;
+ }
ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
@@ -2272,7 +2264,7 @@ static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
percpu_ref_get_many(&ctx->refs, ret);
for (i = 0; i < ret; i++) {
- req = reqs[i];
+ struct io_kiocb *req = reqs[i];
io_preinit_req(req, ctx);
wq_stack_add_head(&req->comp_list, &state->free_list);