summaryrefslogtreecommitdiff
path: root/io_uring/poll.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/poll.c')
-rw-r--r--io_uring/poll.c141
1 files changed, 77 insertions, 64 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c
index bced9edd5233..aac4b3b881fb 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -138,14 +138,32 @@ static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
init_waitqueue_func_entry(&poll->wait, io_poll_wake);
}
+static void io_poll_remove_waitq(struct io_poll *poll)
+{
+ /*
+ * If the waitqueue is being freed early but someone is already holds
+ * ownership over it, we have to tear down the request as best we can.
+ * That means immediately removing the request from its waitqueue and
+ * preventing all further accesses to the waitqueue via the request.
+ */
+ list_del_init(&poll->wait.entry);
+
+ /*
+ * Careful: this *must* be the last step, since as soon as req->head is
+ * NULL'ed out, the request can be completed and freed, since
+ * io_poll_remove_entry() will no longer need to take the waitqueue
+ * lock.
+ */
+ smp_store_release(&poll->head, NULL);
+}
+
static inline void io_poll_remove_entry(struct io_poll *poll)
{
struct wait_queue_head *head = smp_load_acquire(&poll->head);
if (head) {
spin_lock_irq(&head->lock);
- list_del_init(&poll->wait.entry);
- poll->head = NULL;
+ io_poll_remove_waitq(poll);
spin_unlock_irq(&head->lock);
}
}
@@ -220,11 +238,11 @@ static inline void io_poll_execute(struct io_kiocb *req, int res)
* req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
* poll and that the result is stored in req->cqe.
*/
-static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
+static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
{
int v;
- if (unlikely(io_should_terminate_tw()))
+ if (unlikely(tw.cancel))
return -ECANCELED;
do {
@@ -286,12 +304,13 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
return IOU_POLL_REMOVE_POLL_USE_RES;
}
} else {
- int ret = io_poll_issue(req, ts);
- if (ret == IOU_STOP_MULTISHOT)
+ int ret = io_poll_issue(req, tw);
+
+ if (ret == IOU_COMPLETE)
return IOU_POLL_REMOVE_POLL_USE_RES;
else if (ret == IOU_REQUEUE)
return IOU_POLL_REQUEUE;
- if (ret < 0)
+ if (ret != IOU_RETRY && ret < 0)
return ret;
}
@@ -309,11 +328,12 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
return IOU_POLL_NO_ACTION;
}
-void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
+void io_poll_task_func(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_kiocb *req = tw_req.req;
int ret;
- ret = io_poll_check_events(req, ts);
+ ret = io_poll_check_events(req, tw);
if (ret == IOU_POLL_NO_ACTION) {
return;
} else if (ret == IOU_POLL_REQUEUE) {
@@ -331,7 +351,7 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
poll = io_kiocb_to_cmd(req, struct io_poll);
req->cqe.res = mangle_poll(req->cqe.res & poll->events);
} else if (ret == IOU_POLL_REISSUE) {
- io_req_task_submit(req, ts);
+ io_req_task_submit(tw_req, tw);
return;
} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
req->cqe.res = ret;
@@ -339,14 +359,14 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
}
io_req_set_res(req, req->cqe.res, 0);
- io_req_task_complete(req, ts);
+ io_req_task_complete(tw_req, tw);
} else {
- io_tw_lock(req->ctx, ts);
+ io_tw_lock(req->ctx, tw);
if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
- io_req_task_complete(req, ts);
+ io_req_task_complete(tw_req, tw);
else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
- io_req_task_submit(req, ts);
+ io_req_task_submit(tw_req, tw);
else
io_req_defer_failed(req, ret);
}
@@ -366,23 +386,7 @@ static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
io_poll_mark_cancelled(req);
/* we have to kick tw in case it's not already */
io_poll_execute(req, 0);
-
- /*
- * If the waitqueue is being freed early but someone is already
- * holds ownership over it, we have to tear down the request as
- * best we can. That means immediately removing the request from
- * its waitqueue and preventing all further accesses to the
- * waitqueue via the request.
- */
- list_del_init(&poll->wait.entry);
-
- /*
- * Careful: this *must* be the last step, since as soon
- * as req->head is NULL'ed out, the request can be
- * completed and freed, since aio_poll_complete_work()
- * will no longer need to take the waitqueue lock.
- */
- smp_store_release(&poll->head, NULL);
+ io_poll_remove_waitq(poll);
return 1;
}
@@ -411,8 +415,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
/* optional, saves extra locking for removal in tw handler */
if (mask && poll->events & EPOLLONESHOT) {
- list_del_init(&poll->wait.entry);
- poll->head = NULL;
+ io_poll_remove_waitq(poll);
if (wqe_is_double(wait))
req->flags &= ~REQ_F_DOUBLE_POLL;
else
@@ -648,15 +651,12 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
if (req->flags & REQ_F_POLLED) {
apoll = req->apoll;
kfree(apoll->double_poll);
- } else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
- apoll = io_alloc_cache_get(&ctx->apoll_cache);
- if (!apoll)
- goto alloc_apoll;
- apoll->poll.retries = APOLL_MAX_RETRY;
} else {
-alloc_apoll:
- apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
- if (unlikely(!apoll))
+ if (!(issue_flags & IO_URING_F_UNLOCKED))
+ apoll = io_cache_alloc(&ctx->apoll_cache, GFP_ATOMIC);
+ else
+ apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
+ if (!apoll)
return NULL;
apoll->poll.retries = APOLL_MAX_RETRY;
}
@@ -667,33 +667,18 @@ alloc_apoll:
return apoll;
}
-int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
+int io_arm_apoll(struct io_kiocb *req, unsigned issue_flags, __poll_t mask)
{
- const struct io_issue_def *def = &io_issue_defs[req->opcode];
struct async_poll *apoll;
struct io_poll_table ipt;
- __poll_t mask = POLLPRI | POLLERR | EPOLLET;
int ret;
- if (!def->pollin && !def->pollout)
- return IO_APOLL_ABORTED;
+ mask |= EPOLLET;
if (!io_file_can_poll(req))
return IO_APOLL_ABORTED;
if (!(req->flags & REQ_F_APOLL_MULTISHOT))
mask |= EPOLLONESHOT;
- if (def->pollin) {
- mask |= EPOLLIN | EPOLLRDNORM;
-
- /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
- if (req->flags & REQ_F_CLEAR_POLLIN)
- mask &= ~EPOLLIN;
- } else {
- mask |= EPOLLOUT | EPOLLWRNORM;
- }
- if (def->poll_exclusive)
- mask |= EPOLLEXCLUSIVE;
-
apoll = io_req_alloc_apoll(req, issue_flags);
if (!apoll)
return IO_APOLL_ABORTED;
@@ -701,8 +686,6 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
req->flags |= REQ_F_POLLED;
ipt.pt._qproc = io_async_queue_proc;
- io_kbuf_recycle(req, issue_flags);
-
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
if (ret)
return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
@@ -710,6 +693,31 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
return IO_APOLL_OK;
}
+int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
+{
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
+ __poll_t mask = POLLPRI | POLLERR;
+
+ if (!def->pollin && !def->pollout)
+ return IO_APOLL_ABORTED;
+ if (!io_file_can_poll(req))
+ return IO_APOLL_ABORTED;
+
+ if (def->pollin) {
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
+ if (req->flags & REQ_F_CLEAR_POLLIN)
+ mask &= ~EPOLLIN;
+ } else {
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ }
+ if (def->poll_exclusive)
+ mask |= EPOLLEXCLUSIVE;
+
+ return io_arm_apoll(req, issue_flags, mask);
+}
+
/*
* Returns true if we found and killed one or more poll requests
*/
@@ -891,7 +899,7 @@ int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
if (ret > 0) {
io_req_set_res(req, ipt.result_mask, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
return ret ?: IOU_ISSUE_SKIP_COMPLETE;
}
@@ -930,12 +938,17 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
/* successfully updated, don't complete poll request */
- if (!ret2 || ret2 == -EIOCBQUEUED)
+ if (ret2 == IOU_ISSUE_SKIP_COMPLETE)
goto out;
+ /* request completed as part of the update, complete it */
+ else if (ret2 == IOU_COMPLETE)
+ goto complete;
}
- req_set_fail(preq);
io_req_set_res(preq, -ECANCELED, 0);
+complete:
+ if (preq->cqe.res < 0)
+ req_set_fail(preq);
preq->io_task_work.func = io_req_task_complete;
io_req_task_work_add(preq);
out:
@@ -946,5 +959,5 @@ out:
}
/* complete update request, we're done with it */
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}