summaryrefslogtreecommitdiff
path: root/io_uring/timeout.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-06-20 01:25:53 +0100
committerJens Axboe <axboe@kernel.dk>2022-07-24 18:39:14 -0600
commitba3cdb6fbb6e8eb525c868c60e103c5711edc068 (patch)
tree884b7152b56ddc722a24d9c74c4c25d2492ba711 /io_uring/timeout.c
parentaffa87db90108d9f017f927bcdab536e32c3915e (diff)
io_uring: improve task exit timeout cancellations
Don't spin trying to cancel timeouts that are reachable but not cancellable, e.g. already executing. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/ab8a7440a60bbdf69ae514f672ad050e43dd1b03.1655684496.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/timeout.c')
-rw-r--r--io_uring/timeout.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index 557c637af158..a79a7d6ef1b3 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -49,7 +49,7 @@ static inline void io_put_req(struct io_kiocb *req)
}
}
-static void io_kill_timeout(struct io_kiocb *req, int status)
+static bool io_kill_timeout(struct io_kiocb *req, int status)
__must_hold(&req->ctx->completion_lock)
__must_hold(&req->ctx->timeout_lock)
{
@@ -64,7 +64,9 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&timeout->list);
io_req_tw_post_queue(req, status, 0);
+ return true;
}
+ return false;
}
__cold void io_flush_timeouts(struct io_ring_ctx *ctx)
@@ -620,10 +622,9 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
- if (io_match_task(req, tsk, cancel_all)) {
- io_kill_timeout(req, -ECANCELED);
+ if (io_match_task(req, tsk, cancel_all) &&
+ io_kill_timeout(req, -ECANCELED))
canceled++;
- }
}
spin_unlock_irq(&ctx->timeout_lock);
io_commit_cqring(ctx);