summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/io-wq.c16
-rw-r--r--fs/io-wq.h4
-rw-r--r--fs/io_uring.c26
3 files changed, 12 insertions, 34 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 2e2f14f42bf2..63ef195b1acb 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -555,23 +555,21 @@ get_next:
/* handle a whole dependent link */
do {
- struct io_wq_work *old_work, *next_hashed, *linked;
+ struct io_wq_work *next_hashed, *linked;
unsigned int hash = io_get_work_hash(work);
next_hashed = wq_next_work(work);
io_impersonate_work(worker, work);
+ wq->do_work(work);
+ io_assign_current_work(worker, NULL);
- old_work = work;
- linked = wq->do_work(work);
-
+ linked = wq->free_work(work);
work = next_hashed;
if (!work && linked && !io_wq_is_hashed(linked)) {
work = linked;
linked = NULL;
}
io_assign_current_work(worker, work);
- wq->free_work(old_work);
-
if (linked)
io_wqe_enqueue(wqe, linked);
@@ -850,11 +848,9 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
struct io_wq *wq = wqe->wq;
do {
- struct io_wq_work *old_work = work;
-
work->flags |= IO_WQ_WORK_CANCEL;
- work = wq->do_work(work);
- wq->free_work(old_work);
+ wq->do_work(work);
+ work = wq->free_work(work);
} while (work);
}
diff --git a/fs/io-wq.h b/fs/io-wq.h
index e1ffb80a4a1d..e37a0f217cc8 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -106,8 +106,8 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
return container_of(work->list.next, struct io_wq_work, list);
}
-typedef void (free_work_fn)(struct io_wq_work *);
-typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *);
+typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
+typedef void (io_wq_work_fn)(struct io_wq_work *);
struct io_wq_data {
struct user_struct *user;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5ee6a9273fca..b740a39110d6 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2379,22 +2379,6 @@ static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
io_free_req_deferred(req);
}
-static struct io_wq_work *io_steal_work(struct io_kiocb *req)
-{
- struct io_kiocb *nxt;
-
- /*
- * A ref is owned by io-wq in which context we're. So, if that's the
- * last one, it's safe to steal next work. False negatives are Ok,
- * it just will be re-punted async in io_put_work()
- */
- if (refcount_read(&req->refs) != 1)
- return NULL;
-
- nxt = io_req_find_next(req);
- return nxt ? &nxt->work : NULL;
-}
-
static void io_double_put_req(struct io_kiocb *req)
{
/* drop both submit and complete references */
@@ -6343,7 +6327,7 @@ static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
return 0;
}
-static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
+static void io_wq_submit_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_kiocb *timeout;
@@ -6394,8 +6378,6 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
if (lock_ctx)
mutex_unlock(&lock_ctx->uring_lock);
}
-
- return io_steal_work(req);
}
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
@@ -8067,12 +8049,12 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
return __io_sqe_files_update(ctx, &up, nr_args);
}
-static void io_free_work(struct io_wq_work *work)
+static struct io_wq_work *io_free_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- /* Consider that io_steal_work() relies on this ref */
- io_put_req(req);
+ req = io_put_req_find_next(req);
+ return req ? &req->work : NULL;
}
static int io_init_wq_offload(struct io_ring_ctx *ctx,