summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-01-09 14:46:08 +0000
committerJens Axboe <axboe@kernel.dk>2023-01-29 15:17:40 -0700
commit7b235dd82ad32c1626e51303d94ec5ef4d7bc994 (patch)
tree7d3cbc691bd6edce85f2b35ecafa7107fe871b79
parent360173ab9e1a8a50bc9092ae8c741f0a05d499b7 (diff)
io_uring: separate wq for ring polling
Don't use ->cq_wait for ring polling but add a separate wait queue for it. We need it for following patches. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/dea0be0bf990503443c5c6c337fc66824af7d590.1673274244.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--include/linux/io_uring_types.h2
-rw-r--r--io_uring/io_uring.c3
-rw-r--r--io_uring/io_uring.h9
3 files changed, 12 insertions, 2 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 8dfb6c4a35d9..0d94ee191c15 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -319,7 +319,7 @@ struct io_ring_ctx {
} ____cacheline_aligned_in_smp;
/* Keep this last, we don't need it for the fast path */
-
+ struct wait_queue_head poll_wq;
struct io_restriction restrictions;
/* slow path rsrc auxilary data, used by update/register */
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 1e79f37b071b..2ee2bcaeadfd 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -316,6 +316,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->cq_wait);
+ init_waitqueue_head(&ctx->poll_wq);
spin_lock_init(&ctx->completion_lock);
spin_lock_init(&ctx->timeout_lock);
INIT_WQ_LIST(&ctx->iopoll_list);
@@ -2786,7 +2787,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
struct io_ring_ctx *ctx = file->private_data;
__poll_t mask = 0;
- poll_wait(file, &ctx->cq_wait, wait);
+ poll_wait(file, &ctx->poll_wq, wait);
/*
* synchronizes with barrier from wq_has_sleeper call in
* io_commit_cqring
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index b5975e353aa1..c75bbb94703c 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -220,9 +220,18 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
}
+static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
+{
+ if (waitqueue_active(&ctx->poll_wq))
+ __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
+ poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
+}
+
/* requires smb_mb() prior, see wq_has_sleeper() */
static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
{
+ io_poll_wq_wake(ctx);
+
/*
* Trigger waitqueue handler on all waiters on our waitqueue. This
* won't necessarily wake up all the tasks, io_should_wake() will make