summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2025-10-09 10:55:08 -0600
committerJens Axboe <axboe@kernel.dk>2025-10-20 10:37:48 -0600
commitab673c1bcaf20ac70352eeb6bf5b828462676693 (patch)
treeb9b0a63968c3a4e7b2fd20c4e308640c3b78fd63
parenta48c0cbf28c03f6c590a14ceb31bf6e619c2f6da (diff)
io_uring/waitid: use io_waitid_remove_wq() consistently
Use it everywhere that the wait_queue_entry is removed from the head, and be a bit more cautious in zeroing out iw->head whenever the entry is removed from the list. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/waitid.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/io_uring/waitid.c b/io_uring/waitid.c
index ebe3769c54dc..c5e0d979903a 100644
--- a/io_uring/waitid.c
+++ b/io_uring/waitid.c
@@ -179,18 +179,18 @@ bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
static inline bool io_waitid_drop_issue_ref(struct io_kiocb *req)
{
struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
- struct io_waitid_async *iwa = req->async_data;
if (!atomic_sub_return(1, &iw->refs))
return false;
+ io_waitid_remove_wq(req);
+
/*
* Wakeup triggered, racing with us. It was prevented from
* completing because of that, queue up the tw to do that.
*/
req->io_task_work.func = io_waitid_cb;
io_req_task_work_add(req);
- remove_wait_queue(iw->head, &iwa->wo.child_wait);
return true;
}
@@ -245,6 +245,7 @@ static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
return 0;
list_del_init(&wait->entry);
+ iw->head = NULL;
/* cancel is in progress */
if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
@@ -271,6 +272,7 @@ int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
iw->which = READ_ONCE(sqe->len);
iw->upid = READ_ONCE(sqe->fd);
iw->options = READ_ONCE(sqe->file_index);
+ iw->head = NULL;
iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2));
return 0;
}
@@ -301,11 +303,16 @@ int io_waitid(struct io_kiocb *req, unsigned int issue_flags)
* callback.
*/
io_ring_submit_lock(ctx, issue_flags);
+
+ /*
+ * iw->head is valid under the ring lock, and as long as the request
+ * is on the waitid_list where cancelations may find it.
+ */
+ iw->head = &current->signal->wait_chldexit;
hlist_add_head(&req->hash_node, &ctx->waitid_list);
init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait);
iwa->wo.child_wait.private = req->tctx->task;
- iw->head = &current->signal->wait_chldexit;
add_wait_queue(iw->head, &iwa->wo.child_wait);
ret = __do_wait(&iwa->wo);
@@ -328,7 +335,7 @@ int io_waitid(struct io_kiocb *req, unsigned int issue_flags)
}
hlist_del_init(&req->hash_node);
- remove_wait_queue(iw->head, &iwa->wo.child_wait);
+ io_waitid_remove_wq(req);
ret = io_waitid_finish(req, ret);
io_ring_submit_unlock(ctx, issue_flags);