summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-01-16 11:52:11 -0700
committerJens Axboe <axboe@kernel.dk>2021-01-16 12:13:59 -0700
commitc93cc9e16d88e0f5ea95d2d65d58a8a4dab258bc (patch)
tree475e92df72996c3077abb8c0ab41e243dc2b85bc /fs
parenta8d13dbccb137c46fead2ec1a4f1fbc8cfc9ea91 (diff)
io_uring: iopoll requests should also wake task ->in_idle state
If we're freeing/finishing iopoll requests, ensure we check if the task is in idling in terms of cancelation. Otherwise we could end up waiting forever in __io_uring_task_cancel() if the task has active iopoll requests that need cancelation. Cc: stable@vger.kernel.org # 5.9+ Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 985a9e3f976d..5cda878b69cf 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2270,6 +2270,8 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
struct io_uring_task *tctx = rb->task->io_uring;
percpu_counter_sub(&tctx->inflight, rb->task_refs);
+ if (atomic_read(&tctx->in_idle))
+ wake_up(&tctx->wait);
put_task_struct_many(rb->task, rb->task_refs);
rb->task = NULL;
}
@@ -2288,6 +2290,8 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
struct io_uring_task *tctx = rb->task->io_uring;
percpu_counter_sub(&tctx->inflight, rb->task_refs);
+ if (atomic_read(&tctx->in_idle))
+ wake_up(&tctx->wait);
put_task_struct_many(rb->task, rb->task_refs);
}
rb->task = req->task;