From 74566df3a71c1b92da608868cca787557d8be7b2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 13 Jan 2020 19:23:24 -0700 Subject: io_uring: don't setup async context for read/write fixed We don't need it, and if we have it, then the retry handler will attempt to copy the non-existent iovec with the inline iovec, with a segment count that doesn't make sense. Fixes: f67676d160c6 ("io_uring: ensure async punted read/write requests copy iovec") Reported-by: Jonathan Lemon Signed-off-by: Jens Axboe --- fs/io_uring.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index 38b54051facd..8321c2f5589b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1786,6 +1786,9 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size, struct iovec *iovec, struct iovec *fast_iov, struct iov_iter *iter) { + if (req->opcode == IORING_OP_READ_FIXED || + req->opcode == IORING_OP_WRITE_FIXED) + return 0; if (!req->io && io_alloc_async_ctx(req)) return -ENOMEM; -- cgit From e0bbb3461ae000baec13e8ec5b5063202df228df Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 14 Jan 2020 22:06:11 -0700 Subject: io-wq: cancel work if we fail getting a mm reference If we require mm and user context, mark the request for cancellation if we fail to acquire the desired mm. Signed-off-by: Jens Axboe --- fs/io-wq.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index 541c8a3e0bbb..5147d2213b01 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -445,10 +445,14 @@ next: task_unlock(current); } if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm && - wq->mm && mmget_not_zero(wq->mm)) { - use_mm(wq->mm); - set_fs(USER_DS); - worker->mm = wq->mm; + wq->mm) { + if (mmget_not_zero(wq->mm)) { + use_mm(wq->mm); + set_fs(USER_DS); + worker->mm = wq->mm; + } else { + work->flags |= IO_WQ_WORK_CANCEL; + } } if (!worker->creds) worker->creds = override_creds(wq->creds); -- cgit From 78912934f4f7dd7a424159c69bf9bdd46e823781 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 14 Jan 2020 22:09:06 -0700 Subject: io_uring: be consistent in assigning next work from handler If we pass back dependent work in case of links, we need to always ensure that we call the link setup and work prep handler. If not, we might be missing some setup for the next work item. Signed-off-by: Jens Axboe --- fs/io_uring.c | 52 ++++++++++++++++++++++++++++------------------------ 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 8321c2f5589b..e32268ce38a5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2037,6 +2037,28 @@ static bool io_req_cancelled(struct io_kiocb *req) return false; } +static void io_link_work_cb(struct io_wq_work **workptr) +{ + struct io_wq_work *work = *workptr; + struct io_kiocb *link = work->data; + + io_queue_linked_timeout(link); + work->func = io_wq_submit_work; +} + +static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt) +{ + struct io_kiocb *link; + + io_prep_async_work(nxt, &link); + *workptr = &nxt->work; + if (link) { + nxt->work.flags |= IO_WQ_WORK_CB; + nxt->work.func = io_link_work_cb; + nxt->work.data = link; + } +} + static void io_fsync_finish(struct io_wq_work **workptr) { struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); @@ -2055,7 +2077,7 @@ static void io_fsync_finish(struct io_wq_work **workptr) io_cqring_add_event(req, ret); io_put_req_find_next(req, &nxt); if (nxt) - *workptr = &nxt->work; + io_wq_assign_next(workptr, nxt); } static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt, @@ -2111,7 +2133,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr) io_cqring_add_event(req, ret); io_put_req_find_next(req, &nxt); if (nxt) - *workptr = &nxt->work; + io_wq_assign_next(workptr, nxt); } static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt, @@ -2377,7 +2399,7 @@ static void io_accept_finish(struct io_wq_work **workptr) return; __io_accept(req, &nxt, false); if (nxt) - *workptr = &nxt->work; + io_wq_assign_next(workptr, nxt); } #endif @@ -2608,7 +2630,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr) req_set_fail_links(req); io_put_req_find_next(req, &nxt); if (nxt) - *workptr = &nxt->work; + io_wq_assign_next(workptr, nxt); } static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, @@ -3271,15 +3293,6 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, return 0; } -static void io_link_work_cb(struct io_wq_work **workptr) -{ - struct io_wq_work *work = *workptr; - struct io_kiocb *link = work->data; - - io_queue_linked_timeout(link); - work->func = io_wq_submit_work; -} - static void io_wq_submit_work(struct io_wq_work **workptr) { struct io_wq_work *work = *workptr; @@ -3316,17 +3329,8 @@ static void io_wq_submit_work(struct io_wq_work **workptr) } /* if a dependent link is ready, pass it back */ - if (!ret && nxt) { - struct io_kiocb *link; - - io_prep_async_work(nxt, &link); - *workptr = &nxt->work; - if (link) { - nxt->work.flags |= IO_WQ_WORK_CB; - nxt->work.func = io_link_work_cb; - nxt->work.data = link; - } - } + if (!ret && nxt) + io_wq_assign_next(workptr, nxt); } static bool io_req_op_valid(int op) -- cgit From 797f3f535d59f05ad12c629338beef6cb801d19e Mon Sep 17 00:00:00 2001 From: Bijan Mottahedeh Date: Wed, 15 Jan 2020 18:37:45 -0800 Subject: io_uring: clear req->result always before issuing a read/write request req->result is cleared when io_issue_sqe() calls io_read/write_pre() routines. Those routines however are not called when the sqe argument is NULL, which is the case when io_issue_sqe() is called from io_wq_submit_work(). io_issue_sqe() may then examine a stale result if a polled request had previously failed with -EAGAIN: if (ctx->flags & IORING_SETUP_IOPOLL) { if (req->result == -EAGAIN) return -EAGAIN; io_iopoll_req_issued(req); } and in turn cause a subsequently completed request to be re-issued in io_wq_submit_work(). Signed-off-by: Bijan Mottahedeh Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index e32268ce38a5..3130ed16456e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1843,6 +1843,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, if (!force_nonblock) req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT; + req->result = 0; io_size = ret; if (req->flags & REQ_F_LINK) req->result = io_size; @@ -1930,6 +1931,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, if (!force_nonblock) req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT; + req->result = 0; io_size = ret; if (req->flags & REQ_F_LINK) req->result = io_size; -- cgit From 11ba820bf163e224bf5dd44e545a66a44a5b1d7a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 15 Jan 2020 21:51:17 -0700 Subject: io_uring: ensure workqueue offload grabs ring mutex for poll list A previous commit moved the locking for the async sqthread, but didn't take into account that the io-wq workers still need it. We can't use req->in_async for this anymore as both the sqthread and io-wq workers set it, gate the need for locking on io_wq_current_is_worker() instead. Fixes: 8a4955ff1cca ("io_uring: sqthread should grab ctx->uring_lock for submissions") Reported-by: Bijan Mottahedeh Signed-off-by: Jens Axboe --- fs/io_uring.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index 3130ed16456e..52e5764540e4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3286,10 +3286,19 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, return ret; if (ctx->flags & IORING_SETUP_IOPOLL) { + const bool in_async = io_wq_current_is_worker(); + if (req->result == -EAGAIN) return -EAGAIN; + /* workqueue context doesn't hold uring_lock, grab it now */ + if (in_async) + mutex_lock(&ctx->uring_lock); + io_iopoll_req_issued(req); + + if (in_async) + mutex_unlock(&ctx->uring_lock); } return 0; -- cgit From 44d282796f81eb1debc1d7cb53245b4cb3214cb5 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 16 Jan 2020 19:00:24 -0700 Subject: io_uring: only allow submit from owning task If the credentials or the mm doesn't match, don't allow the task to submit anything on behalf of this ring. The task that owns the ring can pass the file descriptor to another task, but we don't want to allow that task to submit an SQE that then assumes the ring mm and creds if it needs to go async. Cc: stable@vger.kernel.org Suggested-by: Stefan Metzmacher Signed-off-by: Jens Axboe --- fs/io_uring.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index 52e5764540e4..187dd94fd6b1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5159,6 +5159,12 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, } else if (to_submit) { struct mm_struct *cur_mm; + if (current->mm != ctx->sqo_mm || + current_cred() != ctx->creds) { + ret = -EPERM; + goto out; + } + to_submit = min(to_submit, ctx->sq_entries); mutex_lock(&ctx->uring_lock); /* already have mm, so io_submit_sqes() won't try to grab it */ -- cgit