diff options
Diffstat (limited to 'drivers/nvme/target/io-cmd-bdev.c')
| -rw-r--r-- | drivers/nvme/target/io-cmd-bdev.c | 338 |
1 files changed, 251 insertions, 87 deletions
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index b6d030d3259f..8d246b8ca604 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -1,50 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 /* * NVMe I/O command implementation. * Copyright (c) 2015-2016 HGST, a Western Digital Company. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/blkdev.h> +#include <linux/blk-integrity.h> +#include <linux/memremap.h> #include <linux/module.h> #include "nvmet.h" +void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) +{ + /* Logical blocks per physical block, 0's based. */ + const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) / + bdev_logical_block_size(bdev)); + + /* + * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN, + * NAWUPF, and NACWU are defined for this namespace and should be + * used by the host for this namespace instead of the AWUN, AWUPF, + * and ACWU fields in the Identify Controller data structure. If + * any of these fields are zero that means that the corresponding + * field from the identify controller data structure should be used. + */ + id->nsfeat |= 1 << 1; + id->nawun = lpp0b; + id->nawupf = lpp0b; + id->nacwu = lpp0b; + + /* + * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and + * NOWS are defined for this namespace and should be used by + * the host for I/O optimization. + */ + id->nsfeat |= 1 << 4; + /* NPWG = Namespace Preferred Write Granularity. 0's based */ + id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev)); + /* NPWA = Namespace Preferred Write Alignment. 0's based */ + id->npwa = id->npwg; + /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */ + id->npdg = to0based(bdev_discard_granularity(bdev) / + bdev_logical_block_size(bdev)); + /* NPDG = Namespace Preferred Deallocate Alignment */ + id->npda = id->npdg; + /* NOWS = Namespace Optimal Write Size */ + id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev)); + + /* Set WZDS and DRB if device supports unmapped write zeroes */ + if (bdev_write_zeroes_unmap_sectors(bdev)) + id->dlfeat = (1 << 3) | 0x1; +} + +void nvmet_bdev_ns_disable(struct nvmet_ns *ns) +{ + if (ns->bdev_file) { + fput(ns->bdev_file); + ns->bdev = NULL; + ns->bdev_file = NULL; + } +} + +static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns) +{ + struct blk_integrity *bi = bdev_get_integrity(ns->bdev); + + if (!bi) + return; + + if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) { + ns->metadata_size = bi->metadata_size; + if (bi->flags & BLK_INTEGRITY_REF_TAG) + ns->pi_type = NVME_NS_DPS_PI_TYPE1; + else + ns->pi_type = NVME_NS_DPS_PI_TYPE3; + } else { + ns->metadata_size = 0; + } +} + int nvmet_bdev_ns_enable(struct nvmet_ns *ns) { int ret; - ns->bdev = blkdev_get_by_path(ns->device_path, - FMODE_READ | FMODE_WRITE, NULL); - if (IS_ERR(ns->bdev)) { - ret = PTR_ERR(ns->bdev); + /* + * When buffered_io namespace attribute is enabled that means user want + * this block device to be used as a file, so block device can take + * an advantage of cache. + */ + if (ns->buffered_io) + return -ENOTBLK; + + ns->bdev_file = bdev_file_open_by_path(ns->device_path, + BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL); + if (IS_ERR(ns->bdev_file)) { + ret = PTR_ERR(ns->bdev_file); if (ret != -ENOTBLK) { - pr_err("failed to open block device %s: (%ld)\n", - ns->device_path, PTR_ERR(ns->bdev)); + pr_err("failed to open block device %s: (%d)\n", + ns->device_path, ret); } - ns->bdev = NULL; + ns->bdev_file = NULL; return ret; } - ns->size = i_size_read(ns->bdev->bd_inode); + ns->bdev = file_bdev(ns->bdev_file); + ns->size = bdev_nr_bytes(ns->bdev); ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); + + ns->pi_type = 0; + ns->metadata_size = 0; + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) + nvmet_bdev_ns_enable_integrity(ns); + + if (bdev_is_zoned(ns->bdev)) { + if (!nvmet_bdev_zns_enable(ns)) { + nvmet_bdev_ns_disable(ns); + return -EINVAL; + } + ns->csi = NVME_CSI_ZNS; + } + return 0; } -void nvmet_bdev_ns_disable(struct nvmet_ns *ns) +void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns) { - if (ns->bdev) { - blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ); - ns->bdev = NULL; - } + ns->size = bdev_nr_bytes(ns->bdev); } -static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) +u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) { u16 status = NVME_SC_SUCCESS; @@ -54,36 +137,28 @@ static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) * Right now there exists M : 1 mapping between block layer error * to the NVMe status code (see nvme_error_status()). For consistency, * when we reverse map we use most appropriate NVMe Status code from - * the group of the NVMe staus codes used in the nvme_error_status(). + * the group of the NVMe status codes used in the nvme_error_status(). */ switch (blk_sts) { case BLK_STS_NOSPC: - status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; + status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_rw_command, length); break; case BLK_STS_TARGET: - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_rw_command, slba); break; case BLK_STS_NOTSUPP: + status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_common_command, opcode); - switch (req->cmd->common.opcode) { - case nvme_cmd_dsm: - case nvme_cmd_write_zeroes: - status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; - break; - default: - status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; - } break; case BLK_STS_MEDIUM: status = NVME_SC_ACCESS_DENIED; req->error_loc = offsetof(struct nvme_rw_command, nsid); break; case BLK_STS_IOERR: - /* fallthru */ default: - status = NVME_SC_INTERNAL | NVME_SC_DNR; + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_common_command, opcode); } @@ -107,17 +182,77 @@ static void nvmet_bio_done(struct bio *bio) struct nvmet_req *req = bio->bi_private; nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); - if (bio != &req->b.inline_bio) - bio_put(bio); + nvmet_req_bio_put(req, bio); } +#ifdef CONFIG_BLK_DEV_INTEGRITY +static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, + struct sg_mapping_iter *miter) +{ + struct blk_integrity *bi; + struct bio_integrity_payload *bip; + int rc; + size_t resid, len; + + bi = bdev_get_integrity(req->ns->bdev); + if (unlikely(!bi)) { + pr_err("Unable to locate bio_integrity\n"); + return -ENODEV; + } + + bip = bio_integrity_alloc(bio, GFP_NOIO, + bio_max_segs(req->metadata_sg_cnt)); + if (IS_ERR(bip)) { + pr_err("Unable to allocate bio_integrity_payload\n"); + return PTR_ERR(bip); + } + + /* virtual start sector must be in integrity interval units */ + bip_set_seed(bip, bio->bi_iter.bi_sector >> + (bi->interval_exp - SECTOR_SHIFT)); + + resid = bio_integrity_bytes(bi, bio_sectors(bio)); + while (resid > 0 && sg_miter_next(miter)) { + len = min_t(size_t, miter->length, resid); + rc = bio_integrity_add_page(bio, miter->page, len, + offset_in_page(miter->addr)); + if (unlikely(rc != len)) { + pr_err("bio_integrity_add_page() failed; %d\n", rc); + sg_miter_stop(miter); + return -ENOMEM; + } + + resid -= len; + if (len < miter->length) + miter->consumed -= miter->length - len; + } + sg_miter_stop(miter); + + return 0; +} +#else +static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, + struct sg_mapping_iter *miter) +{ + return -EINVAL; +} +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + static void nvmet_bdev_execute_rw(struct nvmet_req *req) { - int sg_cnt = req->sg_cnt; + unsigned int sg_cnt = req->sg_cnt; struct bio *bio; struct scatterlist *sg; + struct blk_plug plug; sector_t sector; - int op, op_flags = 0, i; + blk_opf_t opf; + int i, rc; + struct sg_mapping_iter prot_miter; + unsigned int iter_flags; + unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len; + + if (!nvmet_check_transfer_len(req, total_len)) + return; if (!req->sg_cnt) { nvmet_req_complete(req, 0); @@ -125,41 +260,57 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) } if (req->cmd->rw.opcode == nvme_cmd_write) { - op = REQ_OP_WRITE; - op_flags = REQ_SYNC | REQ_IDLE; + opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) - op_flags |= REQ_FUA; + opf |= REQ_FUA; + iter_flags = SG_MITER_TO_SG; } else { - op = REQ_OP_READ; + opf = REQ_OP_READ; + iter_flags = SG_MITER_FROM_SG; } + if (req->cmd->rw.control & cpu_to_le16(NVME_RW_LR)) + opf |= REQ_FAILFAST_DEV; + if (is_pci_p2pdma_page(sg_page(req->sg))) - op_flags |= REQ_NOMERGE; + opf |= REQ_NOMERGE; - sector = le64_to_cpu(req->cmd->rw.slba); - sector <<= (req->ns->blksize_shift - 9); + sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); - if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) { + if (nvmet_use_inline_bvec(req)) { bio = &req->b.inline_bio; - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); + bio_init(bio, req->ns->bdev, req->inline_bvec, + ARRAY_SIZE(req->inline_bvec), opf); } else { - bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); + bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf, + GFP_KERNEL); } - bio_set_dev(bio, req->ns->bdev); bio->bi_iter.bi_sector = sector; bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; - bio_set_op_attrs(bio, op, op_flags); + + blk_start_plug(&plug); + if (req->metadata_len) + sg_miter_start(&prot_miter, req->metadata_sg, + req->metadata_sg_cnt, iter_flags); for_each_sg(req->sg, sg, req->sg_cnt, i) { while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { struct bio *prev = bio; - bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); - bio_set_dev(bio, req->ns->bdev); + if (req->metadata_len) { + rc = nvmet_bdev_alloc_bip(req, bio, + &prot_miter); + if (unlikely(rc)) { + bio_io_error(bio); + return; + } + } + + bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), + opf, GFP_KERNEL); bio->bi_iter.bi_sector = sector; - bio_set_op_attrs(bio, op, op_flags); bio_chain(bio, prev); submit_bio(prev); @@ -169,26 +320,45 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) sg_cnt--; } + if (req->metadata_len) { + rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter); + if (unlikely(rc)) { + bio_io_error(bio); + return; + } + } + submit_bio(bio); + blk_finish_plug(&plug); } static void nvmet_bdev_execute_flush(struct nvmet_req *req) { struct bio *bio = &req->b.inline_bio; - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); - bio_set_dev(bio, req->ns->bdev); + if (!bdev_write_cache(req->ns->bdev)) { + nvmet_req_complete(req, NVME_SC_SUCCESS); + return; + } + + if (!nvmet_check_transfer_len(req, 0)) + return; + + bio_init(bio, req->ns->bdev, req->inline_bvec, + ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH); bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; submit_bio(bio); } u16 nvmet_bdev_flush(struct nvmet_req *req) { - if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL)) - return NVME_SC_INTERNAL | NVME_SC_DNR; + if (!bdev_write_cache(req->ns->bdev)) + return 0; + + if (blkdev_issue_flush(req->ns->bdev)) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; return 0; } @@ -199,14 +369,14 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req, int ret; ret = __blkdev_issue_discard(ns->bdev, - le64_to_cpu(range->slba) << (ns->blksize_shift - 9), + nvmet_lba_to_sect(ns, range->slba), le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), - GFP_KERNEL, 0, bio); - - if (ret) + GFP_KERNEL, bio); + if (ret && ret != -EOPNOTSUPP) { req->error_slba = le64_to_cpu(range->slba); - - return blk_to_nvme_status(req, errno_to_blk_status(ret)); + return errno_to_nvme_status(req, ret); + } + return NVME_SC_SUCCESS; } static void nvmet_bdev_execute_discard(struct nvmet_req *req) @@ -230,12 +400,10 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req) if (bio) { bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; - if (status) { - bio->bi_status = BLK_STS_IOERR; - bio_endio(bio); - } else { + if (status) + bio_io_error(bio); + else submit_bio(bio); - } } else { nvmet_req_complete(req, status); } @@ -243,6 +411,9 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req) static void nvmet_bdev_execute_dsm(struct nvmet_req *req) { + if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) + return; + switch (le32_to_cpu(req->cmd->dsm.attributes)) { case NVME_DSMGMT_AD: nvmet_bdev_execute_discard(req); @@ -260,54 +431,47 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) { struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; struct bio *bio = NULL; - u16 status = NVME_SC_SUCCESS; sector_t sector; sector_t nr_sector; int ret; - sector = le64_to_cpu(write_zeroes->slba) << - (req->ns->blksize_shift - 9); + if (!nvmet_check_transfer_len(req, 0)) + return; + + sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba); nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << (req->ns->blksize_shift - 9)); ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, GFP_KERNEL, &bio, 0); - status = blk_to_nvme_status(req, errno_to_blk_status(ret)); if (bio) { bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; submit_bio(bio); } else { - nvmet_req_complete(req, status); + nvmet_req_complete(req, errno_to_nvme_status(req, ret)); } } u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) { - struct nvme_command *cmd = req->cmd; - - switch (cmd->common.opcode) { + switch (req->cmd->common.opcode) { case nvme_cmd_read: case nvme_cmd_write: req->execute = nvmet_bdev_execute_rw; - req->data_len = nvmet_rw_len(req); + if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) + req->metadata_len = nvmet_rw_metadata_len(req); return 0; case nvme_cmd_flush: req->execute = nvmet_bdev_execute_flush; - req->data_len = 0; return 0; case nvme_cmd_dsm: req->execute = nvmet_bdev_execute_dsm; - req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * - sizeof(struct nvme_dsm_range); return 0; case nvme_cmd_write_zeroes: req->execute = nvmet_bdev_execute_write_zeroes; return 0; default: - pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, - req->sq->qid); - req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return nvmet_report_invalid_opcode(req); } } |
