summaryrefslogtreecommitdiff
path: root/io_uring/waitid.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/waitid.c')
-rw-r--r--io_uring/waitid.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/io_uring/waitid.c b/io_uring/waitid.c
index 6f851978606d..15a7daf3ff4f 100644
--- a/io_uring/waitid.c
+++ b/io_uring/waitid.c
@@ -118,26 +118,18 @@ static int io_waitid_finish(struct io_kiocb *req, int ret)
static void io_waitid_complete(struct io_kiocb *req, int ret)
{
struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
- struct io_tw_state ts = { .locked = true };
/* anyone completing better be holding a reference */
WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK));
lockdep_assert_held(&req->ctx->uring_lock);
- /*
- * Did cancel find it meanwhile?
- */
- if (hlist_unhashed(&req->hash_node))
- return;
-
hlist_del_init(&req->hash_node);
ret = io_waitid_finish(req, ret);
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
- io_req_task_complete(req, &ts);
}
static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
@@ -159,6 +151,7 @@ static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
list_del_init(&iwa->wo.child_wait.entry);
spin_unlock_irq(&iw->head->lock);
io_waitid_complete(req, -ECANCELED);
+ io_req_queue_tw_complete(req, -ECANCELED);
return true;
}
@@ -190,7 +183,7 @@ int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
return -ENOENT;
}
-bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
+bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
bool cancel_all)
{
struct hlist_node *tmp;
@@ -200,8 +193,9 @@ bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
lockdep_assert_held(&ctx->uring_lock);
hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) {
- if (!io_match_task_safe(req, task, cancel_all))
+ if (!io_match_task_safe(req, tctx, cancel_all))
continue;
+ hlist_del_init(&req->hash_node);
__io_waitid_cancel(ctx, req);
found = true;
}
@@ -263,6 +257,7 @@ static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts)
}
io_waitid_complete(req, ret);
+ io_req_task_complete(req, ts);
}
static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
@@ -290,10 +285,16 @@ static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+ struct io_waitid_async *iwa;
if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags)
return -EINVAL;
+ iwa = io_uring_alloc_async_data(NULL, req);
+ if (!unlikely(iwa))
+ return -ENOMEM;
+ iwa->req = req;
+
iw->which = READ_ONCE(sqe->len);
iw->upid = READ_ONCE(sqe->fd);
iw->options = READ_ONCE(sqe->file_index);
@@ -304,16 +305,10 @@ int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_waitid(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+ struct io_waitid_async *iwa = req->async_data;
struct io_ring_ctx *ctx = req->ctx;
- struct io_waitid_async *iwa;
int ret;
- if (io_alloc_async_data(req))
- return -ENOMEM;
-
- iwa = req->async_data;
- iwa->req = req;
-
ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info,
iw->options, NULL);
if (ret)
@@ -336,7 +331,7 @@ int io_waitid(struct io_kiocb *req, unsigned int issue_flags)
hlist_add_head(&req->hash_node, &ctx->waitid_list);
init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait);
- iwa->wo.child_wait.private = req->task;
+ iwa->wo.child_wait.private = req->tctx->task;
iw->head = &current->signal->wait_chldexit;
add_wait_queue(iw->head, &iwa->wo.child_wait);