summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-12-17 18:45:56 -0700
committerJens Axboe <axboe@kernel.dk>2019-12-17 19:57:27 -0700
commitfbf23849b1724d3ea362e346d0877a8d87978fe6 (patch)
treebeb6e22672284ce899befd7ce487c8533b5258ce /fs
parent0969e783e3a8913f79df27286501a6c21e961524 (diff)
io_uring: make IORING_OP_CANCEL_ASYNC deferrable
If we defer this command as part of a link, we have to make sure that the SQE data has been read upfront. Integrate the async cancel op into the prep handling to make it safe for SQE reuse. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b0411406c50a..1d6a5083f37f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -321,6 +321,11 @@ struct io_sync {
int flags;
};
+struct io_cancel {
+ struct file *file;
+ u64 addr;
+};
+
struct io_async_connect {
struct sockaddr_storage address;
};
@@ -362,6 +367,7 @@ struct io_kiocb {
struct io_poll_iocb poll;
struct io_accept accept;
struct io_sync sync;
+ struct io_cancel cancel;
};
const struct io_uring_sqe *sqe;
@@ -3018,18 +3024,33 @@ done:
io_put_req_find_next(req, nxt);
}
-static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
+static int io_async_cancel_prep(struct io_kiocb *req)
{
const struct io_uring_sqe *sqe = req->sqe;
- struct io_ring_ctx *ctx = req->ctx;
- if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ if (req->flags & REQ_F_PREPPED)
+ return 0;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
sqe->cancel_flags)
return -EINVAL;
- io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt, 0);
+ req->flags |= REQ_F_PREPPED;
+ req->cancel.addr = READ_ONCE(sqe->addr);
+ return 0;
+}
+
+static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret;
+
+ ret = io_async_cancel_prep(req);
+ if (ret)
+ return ret;
+
+ io_async_find_and_cancel(ctx, req, req->cancel.addr, nxt, 0);
return 0;
}
@@ -3087,6 +3108,9 @@ static int io_req_defer_prep(struct io_kiocb *req)
case IORING_OP_TIMEOUT:
ret = io_timeout_prep(req, io, false);
break;
+ case IORING_OP_ASYNC_CANCEL:
+ ret = io_async_cancel_prep(req);
+ break;
case IORING_OP_LINK_TIMEOUT:
ret = io_timeout_prep(req, io, true);
break;