summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-03-19 17:22:39 +0000
committerJens Axboe <axboe@kernel.dk>2021-04-11 17:41:59 -0600
commitdac7a09864938a310eea08f26f5960d369680629 (patch)
tree830ae7e0e31c4ed25c529bc78ebb1f8aaa0a2791
parenta05432fb49b6439d0c5b803053dfdd875940116d (diff)
io_uring: add helper flushing locked_free_list
Add a new helper io_flush_cached_locked_reqs() that splices locked_free_list to free_list, and does it right doing all sync and invariant reinit. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 08ab7c4830d5..5eb12f45c6bc 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1628,6 +1628,15 @@ static void io_req_complete_failed(struct io_kiocb *req, long res)
io_req_complete_post(req, res, 0);
}
+static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
+ struct io_comp_state *cs)
+{
+ spin_lock_irq(&ctx->completion_lock);
+ list_splice_init(&cs->locked_free_list, &cs->free_list);
+ cs->locked_free_nr = 0;
+ spin_unlock_irq(&ctx->completion_lock);
+}
+
/* Returns true IFF there are requests in the cache */
static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
{
@@ -1640,12 +1649,8 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
* locked cache, grab the lock and move them over to our submission
* side cache.
*/
- if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) {
- spin_lock_irq(&ctx->completion_lock);
- list_splice_init(&cs->locked_free_list, &cs->free_list);
- cs->locked_free_nr = 0;
- spin_unlock_irq(&ctx->completion_lock);
- }
+ if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH)
+ io_flush_cached_locked_reqs(ctx, cs);
nr = state->free_reqs;
while (!list_empty(&cs->free_list)) {
@@ -8446,13 +8451,8 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
submit_state->free_reqs = 0;
}
- spin_lock_irq(&ctx->completion_lock);
- list_splice_init(&cs->locked_free_list, &cs->free_list);
- cs->locked_free_nr = 0;
- spin_unlock_irq(&ctx->completion_lock);
-
+ io_flush_cached_locked_reqs(ctx, cs);
io_req_cache_free(&cs->free_list, NULL);
-
mutex_unlock(&ctx->uring_lock);
}