summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorBui Quang Minh <minhquangbui99@gmail.com>2025-01-13 23:03:31 +0700
committerJens Axboe <axboe@kernel.dk>2025-01-13 15:29:44 -0700
commita13030fd194c88961be4679f87a1380f1bda0ebe (patch)
tree9b87d4be9b0e1805f8e22fb842dc1ded4793441b /io_uring
parent94d57442e56d2ad2ca20d096040b8ae6f216a921 (diff)
io_uring: simplify the SQPOLL thread check when cancelling requests
In io_uring_try_cancel_requests, we check whether sq_data->thread == current to determine if the function is called by the SQPOLL thread to do iopoll when IORING_SETUP_SQPOLL is set. This check can race with the SQPOLL thread termination. io_uring_cancel_generic is used in 2 places: io_uring_cancel_generic and io_ring_exit_work. In io_uring_cancel_generic, we have the information whether the current is SQPOLL thread already. And the SQPOLL thread never reaches io_ring_exit_work. So to avoid the racy check, this commit adds a boolean flag to io_uring_try_cancel_requests to determine if the caller is SQPOLL thread. Reported-by: syzbot+3c750be01dab672c513d@syzkaller.appspotmail.com Reported-by: Li Zetao <lizetao1@huawei.com> Reviewed-by: Li Zetao <lizetao1@huawei.com> Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/20250113160331.44057-1-minhquangbui99@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index af03e9973b58..20a46bc671ea 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -143,7 +143,8 @@ struct io_defer_entry {
static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct io_uring_task *tctx,
- bool cancel_all);
+ bool cancel_all,
+ bool is_sqpoll_thread);
static void io_queue_sqe(struct io_kiocb *req);
@@ -2869,7 +2870,8 @@ static __cold void io_ring_exit_work(struct work_struct *work)
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
io_move_task_work_from_local(ctx);
- while (io_uring_try_cancel_requests(ctx, NULL, true))
+ /* The SQPOLL thread never reaches this path */
+ while (io_uring_try_cancel_requests(ctx, NULL, true, false))
cond_resched();
if (ctx->sq_data) {
@@ -3037,7 +3039,8 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct io_uring_task *tctx,
- bool cancel_all)
+ bool cancel_all,
+ bool is_sqpoll_thread)
{
struct io_task_cancel cancel = { .tctx = tctx, .all = cancel_all, };
enum io_wq_cancel cret;
@@ -3067,7 +3070,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
/* SQPOLL thread does its own polling */
if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
- (ctx->sq_data && ctx->sq_data->thread == current)) {
+ is_sqpoll_thread) {
while (!wq_list_empty(&ctx->iopoll_list)) {
io_iopoll_try_reap_events(ctx);
ret = true;
@@ -3140,13 +3143,15 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
continue;
loop |= io_uring_try_cancel_requests(node->ctx,
current->io_uring,
- cancel_all);
+ cancel_all,
+ false);
}
} else {
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
loop |= io_uring_try_cancel_requests(ctx,
current->io_uring,
- cancel_all);
+ cancel_all,
+ true);
}
if (loop) {