summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-01-04 01:34:57 +0000
committerJens Axboe <axboe@kernel.dk>2023-01-03 19:05:41 -0700
commitf26cc9593581bd734c846bf827401350b36dc3c9 (patch)
tree8d3da3597311050876afa42e23207deb939ab2fc
parent9ffa13ff78a0a55df968a72d6f0ebffccee5c9f4 (diff)
io_uring: lockdep annotate CQ locking
Locking around CQE posting is complex and depends on options the ring is created with, add more thorough lockdep annotations checking all invariants. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/aa3770b4eacae3915d782cc2ab2f395a99b4b232.1672795976.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/io_uring.c5
-rw-r--r--io_uring/io_uring.h15
2 files changed, 17 insertions, 3 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 6bed44855679..472574192dd6 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -731,6 +731,8 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
size_t ocq_size = sizeof(struct io_overflow_cqe);
bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
+ lockdep_assert_held(&ctx->completion_lock);
+
if (is_cqe32)
ocq_size += sizeof(struct io_uring_cqe);
@@ -820,9 +822,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
{
struct io_uring_cqe *cqe;
- if (!ctx->task_complete)
- lockdep_assert_held(&ctx->completion_lock);
-
ctx->cq_extra++;
/*
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index e9f0d41ebb99..ab4b2a1c3b7e 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -79,6 +79,19 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
bool cancel_all);
+#define io_lockdep_assert_cq_locked(ctx) \
+ do { \
+ if (ctx->flags & IORING_SETUP_IOPOLL) { \
+ lockdep_assert_held(&ctx->uring_lock); \
+ } else if (!ctx->task_complete) { \
+ lockdep_assert_held(&ctx->completion_lock); \
+ } else if (ctx->submitter_task->flags & PF_EXITING) { \
+ lockdep_assert(current_work()); \
+ } else { \
+ lockdep_assert(current == ctx->submitter_task); \
+ } \
+ } while (0)
+
static inline void io_req_task_work_add(struct io_kiocb *req)
{
__io_req_task_work_add(req, true);
@@ -92,6 +105,8 @@ void io_cq_unlock_post(struct io_ring_ctx *ctx);
static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
bool overflow)
{
+ io_lockdep_assert_cq_locked(ctx);
+
if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
struct io_uring_cqe *cqe = ctx->cqe_cached;