diff options
| author | Jens Axboe <axboe@kernel.dk> | 2025-12-05 10:20:47 -0700 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2025-12-05 10:23:28 -0700 |
| commit | 55d57b3bcc7efcab812a8179e2dc17d781302997 (patch) | |
| tree | f373ea85ab946ecb5bc1f70af87d0b5cce327281 | |
| parent | a4c694bfc2455e82b7caf6045ca893d123e0ed11 (diff) | |
io_uring/poll: unify poll waitqueue entry and list removal
For some cases, the order in which the waitq entry list and head
writing happens is important, for others it doesn't really matter.
But it's somewhat confusing to have them spread out over the file.
Abstract out the nicely documented code in io_pollfree_wake() and
move it into a helper, and use that helper consistently rather than
having other call sites manually do the same thing. While at it,
correct a comment function name as well.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| -rw-r--r-- | io_uring/poll.c | 43 |
1 files changed, 22 insertions, 21 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c index 3f1d716dcfab..aac4b3b881fb 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -138,14 +138,32 @@ static void io_init_poll_iocb(struct io_poll *poll, __poll_t events) init_waitqueue_func_entry(&poll->wait, io_poll_wake); } +static void io_poll_remove_waitq(struct io_poll *poll) +{ + /* + * If the waitqueue is being freed early but someone is already holds + * ownership over it, we have to tear down the request as best we can. + * That means immediately removing the request from its waitqueue and + * preventing all further accesses to the waitqueue via the request. + */ + list_del_init(&poll->wait.entry); + + /* + * Careful: this *must* be the last step, since as soon as req->head is + * NULL'ed out, the request can be completed and freed, since + * io_poll_remove_entry() will no longer need to take the waitqueue + * lock. + */ + smp_store_release(&poll->head, NULL); +} + static inline void io_poll_remove_entry(struct io_poll *poll) { struct wait_queue_head *head = smp_load_acquire(&poll->head); if (head) { spin_lock_irq(&head->lock); - list_del_init(&poll->wait.entry); - poll->head = NULL; + io_poll_remove_waitq(poll); spin_unlock_irq(&head->lock); } } @@ -368,23 +386,7 @@ static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) io_poll_mark_cancelled(req); /* we have to kick tw in case it's not already */ io_poll_execute(req, 0); - - /* - * If the waitqueue is being freed early but someone is already - * holds ownership over it, we have to tear down the request as - * best we can. That means immediately removing the request from - * its waitqueue and preventing all further accesses to the - * waitqueue via the request. - */ - list_del_init(&poll->wait.entry); - - /* - * Careful: this *must* be the last step, since as soon - * as req->head is NULL'ed out, the request can be - * completed and freed, since aio_poll_complete_work() - * will no longer need to take the waitqueue lock. - */ - smp_store_release(&poll->head, NULL); + io_poll_remove_waitq(poll); return 1; } @@ -413,8 +415,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, /* optional, saves extra locking for removal in tw handler */ if (mask && poll->events & EPOLLONESHOT) { - list_del_init(&poll->wait.entry); - poll->head = NULL; + io_poll_remove_waitq(poll); if (wqe_is_double(wait)) req->flags &= ~REQ_F_DOUBLE_POLL; else |
