summaryrefslogtreecommitdiff
path: root/drivers/nvme/target/io-cmd-bdev.c
diff options
context:
space:
mode:
authorSagi Grimberg <sagi@grimberg.me>2018-09-28 15:40:43 -0700
committerChristoph Hellwig <hch@lst.de>2018-10-01 14:16:13 -0700
commit73383adfad245bb84e6d6ef7830f01048fcfc217 (patch)
treebd1c2975f84686cc1f5cbbddceddd080fcfdf4e9 /drivers/nvme/target/io-cmd-bdev.c
parent783f4a4408e1251d17f333ad56abac24dde988b9 (diff)
nvmet: don't split large I/Os unconditionally
If we know that the I/O size exceeds our inline bio vec, no point using it and split the rest to begin with. We could in theory reuse the inline bio and only allocate the bio_vec, but its really not worth optimizing for. Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/nvme/target/io-cmd-bdev.c')
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 7bc9f6240432..f93fb5711142 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -58,7 +58,7 @@ static void nvmet_bio_done(struct bio *bio)
static void nvmet_bdev_execute_rw(struct nvmet_req *req)
{
int sg_cnt = req->sg_cnt;
- struct bio *bio = &req->b.inline_bio;
+ struct bio *bio;
struct scatterlist *sg;
sector_t sector;
blk_qc_t cookie;
@@ -81,7 +81,12 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
sector = le64_to_cpu(req->cmd->rw.slba);
sector <<= (req->ns->blksize_shift - 9);
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ bio = &req->b.inline_bio;
+ bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ } else {
+ bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ }
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;