summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2025-03-25 21:51:52 +0800
committerJens Axboe <axboe@kernel.dk>2025-04-02 07:06:59 -0600
commit1045afae4b8892dab99d320b17bff3b9c1f407d8 (patch)
tree0d3b155f172c9435bc00727afa89b6eb939b9041 /io_uring
parent149974fdb8e186a1c72b87cee806373624a8a375 (diff)
io_uring: support vectored kernel fixed buffer
io_uring has supported fixed kernel buffer via io_buffer_register_bvec() and io_buffer_unregister_bvec(). The vectored fixed buffer has been ready, so it is natural to support fixed kernel buffer, one use case is ublk. Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20250325135155.935398-4-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/rsrc.c91
1 files changed, 88 insertions, 3 deletions
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 1e56cd3f55d1..5e64a8bb30a4 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -1362,6 +1362,82 @@ static int io_estimate_bvec_size(struct iovec *iov, unsigned nr_iovs,
return max_segs;
}
+static int io_vec_fill_kern_bvec(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu,
+ struct iovec *iovec, unsigned nr_iovs,
+ struct iou_vec *vec)
+{
+ const struct bio_vec *src_bvec = imu->bvec;
+ struct bio_vec *res_bvec = vec->bvec;
+ unsigned res_idx = 0;
+ size_t total_len = 0;
+ unsigned iov_idx;
+
+ for (iov_idx = 0; iov_idx < nr_iovs; iov_idx++) {
+ size_t offset = (size_t)(uintptr_t)iovec[iov_idx].iov_base;
+ size_t iov_len = iovec[iov_idx].iov_len;
+ struct bvec_iter bi = {
+ .bi_size = offset + iov_len,
+ };
+ struct bio_vec bv;
+
+ bvec_iter_advance(src_bvec, &bi, offset);
+ for_each_mp_bvec(bv, src_bvec, bi, bi)
+ res_bvec[res_idx++] = bv;
+ total_len += iov_len;
+ }
+ iov_iter_bvec(iter, ddir, res_bvec, res_idx, total_len);
+ return 0;
+}
+
+static int iov_kern_bvec_size(const struct iovec *iov,
+ const struct io_mapped_ubuf *imu,
+ unsigned int *nr_seg)
+{
+ size_t offset = (size_t)(uintptr_t)iov->iov_base;
+ const struct bio_vec *bvec = imu->bvec;
+ int start = 0, i = 0;
+ size_t off = 0;
+ int ret;
+
+ ret = validate_fixed_range(offset, iov->iov_len, imu);
+ if (unlikely(ret))
+ return ret;
+
+ for (i = 0; off < offset + iov->iov_len && i < imu->nr_bvecs;
+ off += bvec[i].bv_len, i++) {
+ if (offset >= off && offset < off + bvec[i].bv_len)
+ start = i;
+ }
+ *nr_seg = i - start;
+ return 0;
+}
+
+static int io_kern_bvec_size(struct iovec *iov, unsigned nr_iovs,
+ struct io_mapped_ubuf *imu, unsigned *nr_segs)
+{
+ unsigned max_segs = 0;
+ size_t total_len = 0;
+ unsigned i;
+ int ret;
+
+ *nr_segs = 0;
+ for (i = 0; i < nr_iovs; i++) {
+ if (unlikely(!iov[i].iov_len))
+ return -EFAULT;
+ if (unlikely(check_add_overflow(total_len, iov[i].iov_len,
+ &total_len)))
+ return -EOVERFLOW;
+ ret = iov_kern_bvec_size(&iov[i], imu, &max_segs);
+ if (unlikely(ret))
+ return ret;
+ *nr_segs += max_segs;
+ }
+ if (total_len > MAX_RW_COUNT)
+ return -EINVAL;
+ return 0;
+}
+
int io_import_reg_vec(int ddir, struct iov_iter *iter,
struct io_kiocb *req, struct iou_vec *vec,
unsigned nr_iovs, unsigned issue_flags)
@@ -1376,14 +1452,20 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
if (!node)
return -EFAULT;
imu = node->buf;
- if (imu->is_kbuf)
- return -EOPNOTSUPP;
if (!(imu->dir & (1 << ddir)))
return -EFAULT;
iovec_off = vec->nr - nr_iovs;
iov = vec->iovec + iovec_off;
- nr_segs = io_estimate_bvec_size(iov, nr_iovs, imu);
+
+ if (imu->is_kbuf) {
+ int ret = io_kern_bvec_size(iov, nr_iovs, imu, &nr_segs);
+
+ if (unlikely(ret))
+ return ret;
+ } else {
+ nr_segs = io_estimate_bvec_size(iov, nr_iovs, imu);
+ }
if (sizeof(struct bio_vec) > sizeof(struct iovec)) {
size_t bvec_bytes;
@@ -1410,6 +1492,9 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
req->flags |= REQ_F_NEED_CLEANUP;
}
+ if (imu->is_kbuf)
+ return io_vec_fill_kern_bvec(ddir, iter, imu, iov, nr_iovs, vec);
+
return io_vec_fill_bvec(ddir, iter, imu, iov, nr_iovs, vec);
}