summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-06-23 12:23:26 +0100
committerJens Axboe <axboe@kernel.dk>2023-06-23 08:19:39 -0600
commit91c7884ac9a92ffbf78af7fc89603daf24f448a9 (patch)
tree28e371d107750ca053e4a4c8e814245136061026 /io_uring
parent2fdd6fb5ff958a0f6b403e3f3ffd645b60b2a2b2 (diff)
io_uring: remove IOU_F_TWQ_FORCE_NORMAL
Extract a function for non-local task_work_add, and use it directly from io_move_task_work_from_local(). Now we don't use IOU_F_TWQ_FORCE_NORMAL and it can be killed. As a small positive side effect we don't grab task->io_uring in io_req_normal_work_add anymore, which is not needed for io_req_local_work_add(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/2e55571e8ff2927ae3cc12da606d204e2485525b.1687518903.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c25
-rw-r--r--io_uring/io_uring.h5
2 files changed, 15 insertions, 15 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 3eec5c761d0a..776d1aa73d26 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1317,7 +1317,7 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx)
}
}
-static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
+static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned nr_wait, nr_tw, nr_tw_prev;
@@ -1368,19 +1368,11 @@ static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
}
-void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
+static void io_req_normal_work_add(struct io_kiocb *req)
{
struct io_uring_task *tctx = req->task->io_uring;
struct io_ring_ctx *ctx = req->ctx;
- if (!(flags & IOU_F_TWQ_FORCE_NORMAL) &&
- (ctx->flags & IORING_SETUP_DEFER_TASKRUN)) {
- rcu_read_lock();
- io_req_local_work_add(req, flags);
- rcu_read_unlock();
- return;
- }
-
/* task_work already pending, we're done */
if (!llist_add(&req->io_task_work.node, &tctx->task_list))
return;
@@ -1394,6 +1386,17 @@ void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
io_fallback_tw(tctx);
}
+void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
+{
+ if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
+ rcu_read_lock();
+ io_req_local_work_add(req, flags);
+ rcu_read_unlock();
+ } else {
+ io_req_normal_work_add(req);
+ }
+}
+
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
{
struct llist_node *node;
@@ -1404,7 +1407,7 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
io_task_work.node);
node = node->next;
- __io_req_task_work_add(req, IOU_F_TWQ_FORCE_NORMAL);
+ io_req_normal_work_add(req);
}
}
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 9718897133db..20ba6df49b1f 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -16,9 +16,6 @@
#endif
enum {
- /* don't use deferred task_work */
- IOU_F_TWQ_FORCE_NORMAL = 1,
-
/*
* A hint to not wake right away but delay until there are enough of
* tw's queued to match the number of CQEs the task is waiting for.
@@ -26,7 +23,7 @@ enum {
* Must not be used wirh requests generating more than one CQE.
* It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
*/
- IOU_F_TWQ_LAZY_WAKE = 2,
+ IOU_F_TWQ_LAZY_WAKE = 1,
};
enum {