summaryrefslogtreecommitdiff
path: root/io_uring/poll.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-11-29 12:08:37 -0700
committerJens Axboe <axboe@kernel.dk>2022-11-29 12:08:37 -0700
commitb2cf789f6cb6d449f2b457ee3fb055b7f431481f (patch)
tree617b605a62024f37832e51f97ca4246ad86eb4cd /io_uring/poll.c
parentb7b275e60bcd5f89771e865a8239325f86d9927d (diff)
parent5d772916855f593672de55c437925daccc8ecd73 (diff)
Merge branch 'for-6.2/io_uring' into for-6.2/io_uring-next
* for-6.2/io_uring: (41 commits) io_uring: keep unlock_post inlined in hot path io_uring: don't use complete_post in kbuf io_uring: spelling fix io_uring: remove io_req_complete_post_tw io_uring: allow multishot polled reqs to defer completion io_uring: remove overflow param from io_post_aux_cqe io_uring: add lockdep assertion in io_fill_cqe_aux io_uring: make io_fill_cqe_aux static io_uring: add io_aux_cqe which allows deferred completion io_uring: allow defer completion for aux posted cqes io_uring: defer all io_req_complete_failed io_uring: always lock in io_apoll_task_func io_uring: remove iopoll spinlock io_uring: iopoll protect complete_post io_uring: inline __io_req_complete_put() io_uring: remove io_req_tw_post_queue io_uring: use io_req_task_complete() in timeout io_uring: hold locks for io_req_complete_failed io_uring: add completion locking for iopoll io_uring: kill io_cqring_ev_posted() and __io_cq_unlock_post() ...
Diffstat (limited to 'io_uring/poll.c')
-rw-r--r--io_uring/poll.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c
index d9bf1767867e..599ba28c89b2 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -280,16 +280,14 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
continue;
if (req->apoll_events & EPOLLONESHOT)
return IOU_POLL_DONE;
- if (io_is_uring_fops(req->file))
- return IOU_POLL_DONE;
/* multishot, just fill a CQE and proceed */
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
__poll_t mask = mangle_poll(req->cqe.res &
req->apoll_events);
- if (!io_post_aux_cqe(ctx, req->cqe.user_data,
- mask, IORING_CQE_F_MORE, false)) {
+ if (!io_aux_cqe(ctx, *locked, req->cqe.user_data,
+ mask, IORING_CQE_F_MORE, false)) {
io_req_set_res(req, mask, 0);
return IOU_POLL_REMOVE_POLL_USE_RES;
}
@@ -345,26 +343,22 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
if (ret == IOU_POLL_NO_ACTION)
return;
+ io_tw_lock(req->ctx, locked);
io_poll_remove_entries(req);
io_poll_tw_hash_eject(req, locked);
if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
- io_req_complete_post(req);
+ io_req_task_complete(req, locked);
else if (ret == IOU_POLL_DONE)
io_req_task_submit(req, locked);
else
- io_req_complete_failed(req, ret);
+ io_req_defer_failed(req, ret);
}
static void __io_poll_execute(struct io_kiocb *req, int mask)
{
io_req_set_res(req, mask, 0);
- /*
- * This is useful for poll that is armed on behalf of another
- * request, and where the wakeup path could be on a different
- * CPU. We want to avoid pulling in req->apoll->events for that
- * case.
- */
+
if (req->opcode == IORING_OP_POLL_ADD)
req->io_task_work.func = io_poll_task_func;
else
@@ -429,6 +423,14 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
return 0;
if (io_poll_get_ownership(req)) {
+ /*
+ * If we trigger a multishot poll off our own wakeup path,
+ * disable multishot as there is a circular dependency between
+ * CQ posting and triggering the event.
+ */
+ if (mask & EPOLL_URING_WAKE)
+ poll->events |= EPOLLONESHOT;
+
/* optional, saves extra locking for removal in tw handler */
if (mask && poll->events & EPOLLONESHOT) {
list_del_init(&poll->wait.entry);
@@ -648,10 +650,13 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
if (req->flags & REQ_F_POLLED) {
apoll = req->apoll;
kfree(apoll->double_poll);
- } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
- (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) {
+ } else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+ entry = io_alloc_cache_get(&ctx->apoll_cache);
+ if (entry == NULL)
+ goto alloc_apoll;
apoll = container_of(entry, struct async_poll, cache);
} else {
+alloc_apoll:
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll))
return NULL;