summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-06-23 12:23:27 +0100
committerJens Axboe <axboe@kernel.dk>2023-06-23 08:19:39 -0600
commitf432b76bcc93f36edb3d371f7b8d7881261dd6e7 (patch)
treebceda916d8409a203f3cad2afca6a44fb575308b /io_uring
parent91c7884ac9a92ffbf78af7fc89603daf24f448a9 (diff)
io_uring: kill io_cq_unlock()
We're abusing ->completion_lock helpers. io_cq_unlock() neither locking conditionally nor doing CQE flushing, which means that callers must have some side reason of taking the lock and should do it directly. Open code io_cq_unlock() into io_cqring_overflow_kill() and clean it up. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/7dabb36856db2b562e78780480396c52c29b2bf4.1687518903.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c10
1 files changed, 2 insertions, 8 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 776d1aa73d26..2f55abb676c0 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -644,12 +644,6 @@ static inline void io_cq_lock(struct io_ring_ctx *ctx)
spin_lock(&ctx->completion_lock);
}
-static inline void io_cq_unlock(struct io_ring_ctx *ctx)
- __releases(ctx->completion_lock)
-{
- spin_unlock(&ctx->completion_lock);
-}
-
/* keep it inlined for io_submit_flush_completions() */
static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
__releases(ctx->completion_lock)
@@ -694,10 +688,10 @@ static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
struct io_overflow_cqe *ocqe;
LIST_HEAD(list);
- io_cq_lock(ctx);
+ spin_lock(&ctx->completion_lock);
list_splice_init(&ctx->cq_overflow_list, &list);
clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
- io_cq_unlock(ctx);
+ spin_unlock(&ctx->completion_lock);
while (!list_empty(&list)) {
ocqe = list_first_entry(&list, struct io_overflow_cqe, list);