summaryrefslogtreecommitdiff
path: root/drivers/block/ublk_drv.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2023-10-09 17:33:17 +0800
committerJens Axboe <axboe@kernel.dk>2023-10-17 08:27:55 -0600
commit3421c7f68bba52281bbb38bc76c18dc03cb689e4 (patch)
tree797f00777984d521a287b39858ba91d53c9b59ac /drivers/block/ublk_drv.c
parent8ed90e370f9b5442a2766b357b92e36bbf6ba82d (diff)
ublk: make sure io cmd handled in submitter task context
In well-done ublk server implementation, ublk io command won't be linked into any link chain. Meantime they are always handled in no-wait style, so basically io cmd is always handled in submitter task context. However, the server may set IOSQE_ASYNC, or io command is linked to one chain mistakenly, then we may still run into io-wq context and ctx->uring_lock isn't held. So in case of IO_URING_F_UNLOCKED, schedule this command by io_uring_cmd_complete_in_task to force running it in submitter task. Then ublk_ch_uring_cmd_local() is guaranteed to run with context uring_lock held, and we needn't to worry about sync among submission code path any more. Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20231009093324.957829-3-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block/ublk_drv.c')
-rw-r--r--drivers/block/ublk_drv.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 59d3652699f5..2e2efae62c77 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1811,7 +1811,8 @@ fail_put:
return NULL;
}
-static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
/*
* Not necessary for async retry, but let's keep it simple and always
@@ -1825,9 +1826,28 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
.addr = READ_ONCE(ub_src->addr)
};
+ WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
+
return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
}
+static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ ublk_ch_uring_cmd_local(cmd, issue_flags);
+}
+
+static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ /* well-implemented server won't run into unlocked */
+ if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
+ io_uring_cmd_complete_in_task(cmd, ublk_ch_uring_cmd_cb);
+ return -EIOCBQUEUED;
+ }
+
+ return ublk_ch_uring_cmd_local(cmd, issue_flags);
+}
+
static inline bool ublk_check_ubuf_dir(const struct request *req,
int ubuf_dir)
{