summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-10-27 14:10:32 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2023-10-27 14:10:32 -1000
commit56567a20b22bdbf85c3e55eee3bf2bd23fa2f108 (patch)
tree4399adee6ec6ba2a43b1a57cf573cac5c231e6ee
parent2dc4e0f45593abc53d58d0073b3b314f864c522b (diff)
parent838b35bb6a89c36da07ca39520ec071d9250334d (diff)
Merge tag 'io_uring-6.6-2023-10-27' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: "Fix for an issue reported where reading fdinfo could find a NULL thread as we didn't properly synchronize, and then a disable for the IOCB_DIO_CALLER_COMP optimization as a recent reported highlighted how that could lead to deadlocks if the task issued async O_DIRECT writes and then proceeded to do sync fallocate() calls" * tag 'io_uring-6.6-2023-10-27' of git://git.kernel.dk/linux: io_uring/rw: disable IOCB_DIO_CALLER_COMP io_uring/fdinfo: lock SQ thread while retrieving thread cpu/pid
-rw-r--r--io_uring/fdinfo.c18
-rw-r--r--io_uring/rw.c9
2 files changed, 12 insertions, 15 deletions
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index c53678875416..f04a43044d91 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -53,7 +53,6 @@ static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
{
struct io_ring_ctx *ctx = f->private_data;
- struct io_sq_data *sq = NULL;
struct io_overflow_cqe *ocqe;
struct io_rings *r = ctx->rings;
unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
@@ -64,6 +63,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
unsigned int cq_shift = 0;
unsigned int sq_shift = 0;
unsigned int sq_entries, cq_entries;
+ int sq_pid = -1, sq_cpu = -1;
bool has_lock;
unsigned int i;
@@ -143,13 +143,19 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
has_lock = mutex_trylock(&ctx->uring_lock);
if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
- sq = ctx->sq_data;
- if (!sq->thread)
- sq = NULL;
+ struct io_sq_data *sq = ctx->sq_data;
+
+ if (mutex_trylock(&sq->lock)) {
+ if (sq->thread) {
+ sq_pid = task_pid_nr(sq->thread);
+ sq_cpu = task_cpu(sq->thread);
+ }
+ mutex_unlock(&sq->lock);
+ }
}
- seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
- seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
+ seq_printf(m, "SqThread:\t%d\n", sq_pid);
+ seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
struct file *f = io_file_from_index(&ctx->file_table, i);
diff --git a/io_uring/rw.c b/io_uring/rw.c
index c8c822fa7980..807d83ab756e 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -913,15 +913,6 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
kiocb_start_write(kiocb);
kiocb->ki_flags |= IOCB_WRITE;
- /*
- * For non-polled IO, set IOCB_DIO_CALLER_COMP, stating that our handler
- * groks deferring the completion to task context. This isn't
- * necessary and useful for polled IO as that can always complete
- * directly.
- */
- if (!(kiocb->ki_flags & IOCB_HIPRI))
- kiocb->ki_flags |= IOCB_DIO_CALLER_COMP;
-
if (likely(req->file->f_op->write_iter))
ret2 = call_write_iter(req->file, kiocb, &s->iter);
else if (req->file->f_op->write)