summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-03-03 21:33:13 +0300
committerJens Axboe <axboe@kernel.dk>2020-03-03 20:02:49 -0700
commit7a743e225b2a9da772b28a50031e1ccd8a8ce404 (patch)
tree93040486f98e7ace0000767e50d7afe38a9d073c /fs/io_uring.c
parent014db0073cc6a12e1f421b9231d6f3aa35735823 (diff)
io_uring: get next work with submission ref drop
If after dropping the submission reference req->refs == 1, the request is done, because this one is for io_put_work() and will be dropped synchronously shortly after. In this case it's safe to steal a next work from the request. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c89
1 files changed, 48 insertions, 41 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f4faaa2a9a3f..40ca9e6a5ace 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1518,6 +1518,27 @@ static void io_free_req(struct io_kiocb *req)
io_queue_async_work(nxt);
}
+static void io_link_work_cb(struct io_wq_work **workptr)
+{
+ struct io_wq_work *work = *workptr;
+ struct io_kiocb *link = work->data;
+
+ io_queue_linked_timeout(link);
+ io_wq_submit_work(workptr);
+}
+
+static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
+{
+ struct io_kiocb *link;
+
+ *workptr = &nxt->work;
+ link = io_prep_linked_timeout(nxt);
+ if (link) {
+ nxt->work.func = io_link_work_cb;
+ nxt->work.data = link;
+ }
+}
+
/*
* Drop reference to request, return next in chain (if there is one) if this
* was the last reference to this request.
@@ -1537,6 +1558,27 @@ static void io_put_req(struct io_kiocb *req)
io_free_req(req);
}
+static void io_put_req_async_completion(struct io_kiocb *req,
+ struct io_wq_work **workptr)
+{
+ /*
+ * It's in an io-wq worker, so there always should be at least
+ * one reference, which will be dropped in io_put_work() just
+ * after the current handler returns.
+ *
+ * It also means, that if the counter dropped to 1, then there is
+ * no asynchronous users left, so it's safe to steal the next work.
+ */
+ refcount_dec(&req->refs);
+ if (refcount_read(&req->refs) == 1) {
+ struct io_kiocb *nxt = NULL;
+
+ io_req_find_next(req, &nxt);
+ if (nxt)
+ io_wq_assign_next(workptr, nxt);
+ }
+}
+
/*
* Must only be used if we don't need to care about links, usually from
* within the completion handling itself.
@@ -2543,27 +2585,6 @@ static bool io_req_cancelled(struct io_kiocb *req)
return false;
}
-static void io_link_work_cb(struct io_wq_work **workptr)
-{
- struct io_wq_work *work = *workptr;
- struct io_kiocb *link = work->data;
-
- io_queue_linked_timeout(link);
- io_wq_submit_work(workptr);
-}
-
-static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
-{
- struct io_kiocb *link;
-
- *workptr = &nxt->work;
- link = io_prep_linked_timeout(nxt);
- if (link) {
- nxt->work.func = io_link_work_cb;
- nxt->work.data = link;
- }
-}
-
static void __io_fsync(struct io_kiocb *req)
{
loff_t end = req->sync.off + req->sync.len;
@@ -2581,14 +2602,11 @@ static void __io_fsync(struct io_kiocb *req)
static void io_fsync_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
if (io_req_cancelled(req))
return;
__io_fsync(req);
- io_put_req(req); /* drop submission reference */
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ io_put_req_async_completion(req, workptr);
}
static int io_fsync(struct io_kiocb *req, bool force_nonblock)
@@ -2617,14 +2635,11 @@ static void __io_fallocate(struct io_kiocb *req)
static void io_fallocate_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
if (io_req_cancelled(req))
return;
__io_fallocate(req);
- io_put_req(req); /* drop submission reference */
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ io_put_req_async_completion(req, workptr);
}
static int io_fallocate_prep(struct io_kiocb *req,
@@ -2988,13 +3003,10 @@ static void __io_close_finish(struct io_kiocb *req)
static void io_close_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
/* not cancellable, don't do io_req_cancelled() */
__io_close_finish(req);
- io_put_req(req); /* drop submission reference */
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ io_put_req_async_completion(req, workptr);
}
static int io_close(struct io_kiocb *req, bool force_nonblock)
@@ -3436,14 +3448,11 @@ static int __io_accept(struct io_kiocb *req, bool force_nonblock)
static void io_accept_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
if (io_req_cancelled(req))
return;
__io_accept(req, false);
- io_put_req(req); /* drop submission reference */
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ io_put_req_async_completion(req, workptr);
}
#endif
@@ -4682,7 +4691,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
{
struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
int ret = 0;
/* if NO_CANCEL is set, we must still run the work */
@@ -4711,9 +4719,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
io_put_req(req);
}
- io_put_req(req); /* drop submission reference */
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ io_put_req_async_completion(req, workptr);
}
static int io_req_needs_file(struct io_kiocb *req, int fd)
@@ -6103,6 +6109,7 @@ static void io_put_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ /* Consider that io_put_req_async_completion() relies on this ref */
io_put_req(req);
}