summaryrefslogtreecommitdiff
path: root/drivers/nvme/target/io-cmd-bdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target/io-cmd-bdev.c')
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c187
1 files changed, 102 insertions, 85 deletions
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 3dd6f566a240..8d246b8ca604 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -5,16 +5,16 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
+#include <linux/memremap.h>
#include <linux/module.h>
#include "nvmet.h"
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
{
- const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
- /* Number of logical blocks per physical block. */
- const u32 lpp = ql->physical_block_size / ql->logical_block_size;
/* Logical blocks per physical block, 0's based. */
- const __le16 lpp0b = to0based(lpp);
+ const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) /
+ bdev_logical_block_size(bdev));
/*
* For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
@@ -36,30 +36,46 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
*/
id->nsfeat |= 1 << 4;
/* NPWG = Namespace Preferred Write Granularity. 0's based */
- id->npwg = lpp0b;
+ id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev));
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
- id->npdg = to0based(ql->discard_granularity / ql->logical_block_size);
+ id->npdg = to0based(bdev_discard_granularity(bdev) /
+ bdev_logical_block_size(bdev));
/* NPDG = Namespace Preferred Deallocate Alignment */
id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */
- id->nows = to0based(ql->io_opt / ql->logical_block_size);
+ id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
+
+ /* Set WZDS and DRB if device supports unmapped write zeroes */
+ if (bdev_write_zeroes_unmap_sectors(bdev))
+ id->dlfeat = (1 << 3) | 0x1;
+}
+
+void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
+{
+ if (ns->bdev_file) {
+ fput(ns->bdev_file);
+ ns->bdev = NULL;
+ ns->bdev_file = NULL;
+ }
}
static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
{
struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
- if (bi) {
- ns->metadata_size = bi->tuple_size;
- if (bi->profile == &t10_pi_type1_crc)
+ if (!bi)
+ return;
+
+ if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) {
+ ns->metadata_size = bi->metadata_size;
+ if (bi->flags & BLK_INTEGRITY_REF_TAG)
ns->pi_type = NVME_NS_DPS_PI_TYPE1;
- else if (bi->profile == &t10_pi_type3_crc)
- ns->pi_type = NVME_NS_DPS_PI_TYPE3;
else
- /* Unsupported metadata type */
- ns->metadata_size = 0;
+ ns->pi_type = NVME_NS_DPS_PI_TYPE3;
+ } else {
+ ns->metadata_size = 0;
}
}
@@ -67,42 +83,51 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
{
int ret;
- ns->bdev = blkdev_get_by_path(ns->device_path,
- FMODE_READ | FMODE_WRITE, NULL);
- if (IS_ERR(ns->bdev)) {
- ret = PTR_ERR(ns->bdev);
+ /*
+ * When buffered_io namespace attribute is enabled that means user want
+ * this block device to be used as a file, so block device can take
+ * an advantage of cache.
+ */
+ if (ns->buffered_io)
+ return -ENOTBLK;
+
+ ns->bdev_file = bdev_file_open_by_path(ns->device_path,
+ BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
+ if (IS_ERR(ns->bdev_file)) {
+ ret = PTR_ERR(ns->bdev_file);
if (ret != -ENOTBLK) {
- pr_err("failed to open block device %s: (%ld)\n",
- ns->device_path, PTR_ERR(ns->bdev));
+ pr_err("failed to open block device %s: (%d)\n",
+ ns->device_path, ret);
}
- ns->bdev = NULL;
+ ns->bdev_file = NULL;
return ret;
}
- ns->size = i_size_read(ns->bdev->bd_inode);
+ ns->bdev = file_bdev(ns->bdev_file);
+ ns->size = bdev_nr_bytes(ns->bdev);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
ns->pi_type = 0;
ns->metadata_size = 0;
- if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
nvmet_bdev_ns_enable_integrity(ns);
- return 0;
-}
-
-void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
-{
- if (ns->bdev) {
- blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
- ns->bdev = NULL;
+ if (bdev_is_zoned(ns->bdev)) {
+ if (!nvmet_bdev_zns_enable(ns)) {
+ nvmet_bdev_ns_disable(ns);
+ return -EINVAL;
+ }
+ ns->csi = NVME_CSI_ZNS;
}
+
+ return 0;
}
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
{
- ns->size = i_size_read(ns->bdev->bd_inode);
+ ns->size = bdev_nr_bytes(ns->bdev);
}
-static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
+u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
{
u16 status = NVME_SC_SUCCESS;
@@ -112,36 +137,28 @@ static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
* Right now there exists M : 1 mapping between block layer error
* to the NVMe status code (see nvme_error_status()). For consistency,
* when we reverse map we use most appropriate NVMe Status code from
- * the group of the NVMe staus codes used in the nvme_error_status().
+ * the group of the NVMe status codes used in the nvme_error_status().
*/
switch (blk_sts) {
case BLK_STS_NOSPC:
- status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
+ status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_rw_command, length);
break;
case BLK_STS_TARGET:
- status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_rw_command, slba);
break;
case BLK_STS_NOTSUPP:
+ status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_common_command, opcode);
- switch (req->cmd->common.opcode) {
- case nvme_cmd_dsm:
- case nvme_cmd_write_zeroes:
- status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
- break;
- default:
- status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
- }
break;
case BLK_STS_MEDIUM:
status = NVME_SC_ACCESS_DENIED;
req->error_loc = offsetof(struct nvme_rw_command, nsid);
break;
case BLK_STS_IOERR:
- /* fallthru */
default:
- status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_common_command, opcode);
}
@@ -165,8 +182,7 @@ static void nvmet_bio_done(struct bio *bio)
struct nvmet_req *req = bio->bi_private;
nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
- if (bio != &req->b.inline_bio)
- bio_put(bio);
+ nvmet_req_bio_put(req, bio);
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -175,29 +191,27 @@ static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
{
struct blk_integrity *bi;
struct bio_integrity_payload *bip;
- struct block_device *bdev = req->ns->bdev;
int rc;
size_t resid, len;
- bi = bdev_get_integrity(bdev);
+ bi = bdev_get_integrity(req->ns->bdev);
if (unlikely(!bi)) {
pr_err("Unable to locate bio_integrity\n");
return -ENODEV;
}
bip = bio_integrity_alloc(bio, GFP_NOIO,
- min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES));
+ bio_max_segs(req->metadata_sg_cnt));
if (IS_ERR(bip)) {
pr_err("Unable to allocate bio_integrity_payload\n");
return PTR_ERR(bip);
}
- bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
/* virtual start sector must be in integrity interval units */
bip_set_seed(bip, bio->bi_iter.bi_sector >>
(bi->interval_exp - SECTOR_SHIFT));
- resid = bip->bip_iter.bi_size;
+ resid = bio_integrity_bytes(bi, bio_sectors(bio));
while (resid > 0 && sg_miter_next(miter)) {
len = min_t(size_t, miter->length, resid);
rc = bio_integrity_add_page(bio, miter->page, len,
@@ -226,12 +240,13 @@ static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
static void nvmet_bdev_execute_rw(struct nvmet_req *req)
{
- int sg_cnt = req->sg_cnt;
+ unsigned int sg_cnt = req->sg_cnt;
struct bio *bio;
struct scatterlist *sg;
struct blk_plug plug;
sector_t sector;
- int op, i, rc;
+ blk_opf_t opf;
+ int i, rc;
struct sg_mapping_iter prot_miter;
unsigned int iter_flags;
unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
@@ -245,32 +260,34 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
if (req->cmd->rw.opcode == nvme_cmd_write) {
- op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
+ opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
- op |= REQ_FUA;
+ opf |= REQ_FUA;
iter_flags = SG_MITER_TO_SG;
} else {
- op = REQ_OP_READ;
+ opf = REQ_OP_READ;
iter_flags = SG_MITER_FROM_SG;
}
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_LR))
+ opf |= REQ_FAILFAST_DEV;
+
if (is_pci_p2pdma_page(sg_page(req->sg)))
- op |= REQ_NOMERGE;
+ opf |= REQ_NOMERGE;
- sector = le64_to_cpu(req->cmd->rw.slba);
- sector <<= (req->ns->blksize_shift - 9);
+ sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
- if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), opf);
} else {
- bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf,
+ GFP_KERNEL);
}
- bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio->bi_opf = op;
blk_start_plug(&plug);
if (req->metadata_len)
@@ -291,10 +308,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
}
- bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
- bio_set_dev(bio, req->ns->bdev);
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
+ opf, GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
- bio->bi_opf = op;
bio_chain(bio, prev);
submit_bio(prev);
@@ -320,22 +336,29 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
{
struct bio *bio = &req->b.inline_bio;
+ if (!bdev_write_cache(req->ns->bdev)) {
+ nvmet_req_complete(req, NVME_SC_SUCCESS);
+ return;
+ }
+
if (!nvmet_check_transfer_len(req, 0))
return;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
- bio_set_dev(bio, req->ns->bdev);
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH);
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(bio);
}
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
- if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
- return NVME_SC_INTERNAL | NVME_SC_DNR;
+ if (!bdev_write_cache(req->ns->bdev))
+ return 0;
+
+ if (blkdev_issue_flush(req->ns->bdev))
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
return 0;
}
@@ -346,9 +369,9 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
int ret;
ret = __blkdev_issue_discard(ns->bdev,
- le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
+ nvmet_lba_to_sect(ns, range->slba),
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
- GFP_KERNEL, 0, bio);
+ GFP_KERNEL, bio);
if (ret && ret != -EOPNOTSUPP) {
req->error_slba = le64_to_cpu(range->slba);
return errno_to_nvme_status(req, ret);
@@ -415,8 +438,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
- sector = le64_to_cpu(write_zeroes->slba) <<
- (req->ns->blksize_shift - 9);
+ sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
(req->ns->blksize_shift - 9));
@@ -433,9 +455,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
{
- struct nvme_command *cmd = req->cmd;
-
- switch (cmd->common.opcode) {
+ switch (req->cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_bdev_execute_rw;
@@ -452,9 +472,6 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
req->execute = nvmet_bdev_execute_write_zeroes;
return 0;
default:
- pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
- req->sq->qid);
- req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return nvmet_report_invalid_opcode(req);
}
}