summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-07-28 10:19:44 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-07-28 10:19:44 -0700
commit9c65505826395e1193495ad73087bcdaa4347813 (patch)
treec3fc5c28a2942c676fe7b9347495d43b6e0cace8 /io_uring
parent0299a13af0be43f2679047c7236e3a58e587f3f8 (diff)
parent7b72d661f1f2f950ab8c12de7e2bc48bdac8ed69 (diff)
Merge tag 'io_uring-6.5-2023-07-28' of git://git.kernel.dk/linux
Pull io_uring fix from Jens Axboe: "Just a single tweak to a patch from last week, to avoid having idle cqring waits be attributed as iowait" * tag 'io_uring-6.5-2023-07-28' of git://git.kernel.dk/linux: io_uring: gate iowait schedule on having pending requests
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 89a611541bc4..f4591b912ea8 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2493,11 +2493,20 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
return 0;
}
+static bool current_pending_io(void)
+{
+ struct io_uring_task *tctx = current->io_uring;
+
+ if (!tctx)
+ return false;
+ return percpu_counter_read_positive(&tctx->inflight);
+}
+
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq)
{
- int token, ret;
+ int io_wait, ret;
if (unlikely(READ_ONCE(ctx->check_cq)))
return 1;
@@ -2511,17 +2520,19 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
return 0;
/*
- * Use io_schedule_prepare/finish, so cpufreq can take into account
- * that the task is waiting for IO - turns out to be important for low
- * QD IO.
+ * Mark us as being in io_wait if we have pending requests, so cpufreq
+ * can take into account that the task is waiting for IO - turns out
+ * to be important for low QD IO.
*/
- token = io_schedule_prepare();
+ io_wait = current->in_iowait;
+ if (current_pending_io())
+ current->in_iowait = 1;
ret = 0;
if (iowq->timeout == KTIME_MAX)
schedule();
else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
ret = -ETIME;
- io_schedule_finish(token);
+ current->in_iowait = io_wait;
return ret;
}