summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-09-06 12:10:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-09-06 12:10:27 -0700
commita8205e310011f09cc73cd577d7b0074c57b9bb54 (patch)
treeec0fdc69c45110921b98c6e0664a970f1e193849
parent2ccdd9f8b2ce7290aad6f0a34200ad394e61f940 (diff)
parentc127a2a1b7baa5eb40a7e2de4b7f0c51ccbbb2ef (diff)
Merge tag 'io_uring-5.9-2020-09-06' of git://git.kernel.dk/linux-block
Pull more io_uring fixes from Jens Axboe: "Two followup fixes. One is fixing a regression from this merge window, the other is two commits fixing cancelation of deferred requests. Both have gone through full testing, and both spawned a few new regression test additions to liburing. - Don't play games with const, properly store the output iovec and assign it as needed. - Deferred request cancelation fix (Pavel)" * tag 'io_uring-5.9-2020-09-06' of git://git.kernel.dk/linux-block: io_uring: fix linked deferred ->files cancellation io_uring: fix cancel of deferred reqs with ->files io_uring: fix explicit async read/write mapping for large segments
-rw-r--r--fs/io_uring.c55
1 files changed, 52 insertions, 3 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0d7be2e9d005..3790c7fe9fee 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2980,14 +2980,15 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
bool force_nonblock)
{
struct io_async_rw *iorw = &req->io->rw;
+ struct iovec *iov;
ssize_t ret;
- iorw->iter.iov = iorw->fast_iov;
- ret = __io_import_iovec(rw, req, (struct iovec **) &iorw->iter.iov,
- &iorw->iter, !force_nonblock);
+ iorw->iter.iov = iov = iorw->fast_iov;
+ ret = __io_import_iovec(rw, req, &iov, &iorw->iter, !force_nonblock);
if (unlikely(ret < 0))
return ret;
+ iorw->iter.iov = iov;
io_req_map_rw(req, iorw->iter.iov, iorw->fast_iov, &iorw->iter);
return 0;
}
@@ -8023,6 +8024,28 @@ static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
return false;
}
+static inline bool io_match_files(struct io_kiocb *req,
+ struct files_struct *files)
+{
+ return (req->flags & REQ_F_WORK_INITIALIZED) && req->work.files == files;
+}
+
+static bool io_match_link_files(struct io_kiocb *req,
+ struct files_struct *files)
+{
+ struct io_kiocb *link;
+
+ if (io_match_files(req, files))
+ return true;
+ if (req->flags & REQ_F_LINK_HEAD) {
+ list_for_each_entry(link, &req->link_list, link_list) {
+ if (io_match_files(link, files))
+ return true;
+ }
+ }
+ return false;
+}
+
/*
* We're looking to cancel 'req' because it's holding on to our files, but
* 'req' could be a link to another request. See if it is, and cancel that
@@ -8097,12 +8120,38 @@ static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
io_timeout_remove_link(ctx, req);
}
+static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+{
+ struct io_defer_entry *de = NULL;
+ LIST_HEAD(list);
+
+ spin_lock_irq(&ctx->completion_lock);
+ list_for_each_entry_reverse(de, &ctx->defer_list, list) {
+ if (io_match_link_files(de->req, files)) {
+ list_cut_position(&list, &ctx->defer_list, &de->list);
+ break;
+ }
+ }
+ spin_unlock_irq(&ctx->completion_lock);
+
+ while (!list_empty(&list)) {
+ de = list_first_entry(&list, struct io_defer_entry, list);
+ list_del_init(&de->list);
+ req_set_fail_links(de->req);
+ io_put_req(de->req);
+ io_req_complete(de->req, -ECANCELED);
+ kfree(de);
+ }
+}
+
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct files_struct *files)
{
if (list_empty_careful(&ctx->inflight_list))
return;
+ io_cancel_defer_files(ctx, files);
/* cancel all at once, should be faster than doing it one by one*/
io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);