summaryrefslogtreecommitdiff
path: root/drivers/nvme/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/core.c16
-rw-r--r--drivers/nvme/target/fc.c6
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c6
-rw-r--r--drivers/nvme/target/passthru.c6
-rw-r--r--drivers/nvme/target/pci-epf.c25
-rw-r--r--drivers/nvme/target/rdma.c6
-rw-r--r--drivers/nvme/target/zns.c2
7 files changed, 40 insertions, 27 deletions
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 175c5b6d4dd5..0dd7bd99afa3 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -581,8 +581,6 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
if (ns->enabled)
goto out_unlock;
- ret = -EMFILE;
-
ret = nvmet_bdev_ns_enable(ns);
if (ret == -ENOTBLK)
ret = nvmet_file_ns_enable(ns);
@@ -1962,24 +1960,24 @@ static int __init nvmet_init(void)
if (!nvmet_wq)
goto out_free_buffered_work_queue;
- error = nvmet_init_discovery();
+ error = nvmet_init_debugfs();
if (error)
goto out_free_nvmet_work_queue;
- error = nvmet_init_debugfs();
+ error = nvmet_init_discovery();
if (error)
- goto out_exit_discovery;
+ goto out_exit_debugfs;
error = nvmet_init_configfs();
if (error)
- goto out_exit_debugfs;
+ goto out_exit_discovery;
return 0;
-out_exit_debugfs:
- nvmet_exit_debugfs();
out_exit_discovery:
nvmet_exit_discovery();
+out_exit_debugfs:
+ nvmet_exit_debugfs();
out_free_nvmet_work_queue:
destroy_workqueue(nvmet_wq);
out_free_buffered_work_queue:
@@ -1994,8 +1992,8 @@ out_destroy_bvec_cache:
static void __exit nvmet_exit(void)
{
nvmet_exit_configfs();
- nvmet_exit_debugfs();
nvmet_exit_discovery();
+ nvmet_exit_debugfs();
ida_destroy(&cntlid_ida);
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 25598a46bf0d..a9b18c051f5b 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -459,7 +459,7 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
* down, and the related FC-NVME Association ID and Connection IDs
* become invalid.
*
- * The behavior of the fc-nvme target is such that it's
+ * The behavior of the fc-nvme target is such that its
* understanding of the association and connections will implicitly
* be torn down. The action is implicit as it may be due to a loss of
* connectivity with the fc-nvme host, so the target may never get a
@@ -2313,7 +2313,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
if (ret) {
/*
- * should be ok to set w/o lock as its in the thread of
+ * should be ok to set w/o lock as it's in the thread of
* execution (not an async timer routine) and doesn't
* contend with any clearing action
*/
@@ -2629,7 +2629,7 @@ transport_error:
* and the api of the FC LLDD which may issue a hw command to send the
* response, but the LLDD may not get the hw completion for that command
* and upcall the nvmet_fc layer before a new command may be
- * asynchronously received - its possible for a command to be received
+ * asynchronously received - it's possible for a command to be received
* before the LLDD and nvmet_fc have recycled the job structure. It gives
* the appearance of more commands received than fits in the sq.
* To alleviate this scenario, a temporary queue is maintained in the
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index eba42df2f821..8d246b8ca604 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -46,6 +46,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */
id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
+
+ /* Set WZDS and DRB if device supports unmapped write zeroes */
+ if (bdev_write_zeroes_unmap_sectors(bdev))
+ id->dlfeat = (1 << 3) | 0x1;
}
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
@@ -65,7 +69,7 @@ static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
return;
if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) {
- ns->metadata_size = bi->tuple_size;
+ ns->metadata_size = bi->metadata_size;
if (bi->flags & BLK_INTEGRITY_REF_TAG)
ns->pi_type = NVME_NS_DPS_PI_TYPE1;
else
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index b7515c53829b..0c361b1e3566 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -106,7 +106,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
pctrl->max_hw_sectors);
/*
- * nvmet_passthru_map_sg is limitted to using a single bio so limit
+ * nvmet_passthru_map_sg is limited to using a single bio so limit
* the mdts based on BIO_MAX_VECS as well
*/
max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT,
@@ -147,7 +147,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
* When passthru controller is setup using nvme-loop transport it will
* export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
* the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
- * code path with duplicate ctr subsynqn. In order to prevent that we
+ * code path with duplicate ctrl subsysnqn. In order to prevent that we
* mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
*/
memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
@@ -533,6 +533,8 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
case NVME_FEAT_HOST_ID:
req->execute = nvmet_execute_get_features;
return NVME_SC_SUCCESS;
+ case NVME_FEAT_FDP:
+ return nvmet_setup_passthru_command(req);
default:
return nvmet_passthru_get_set_features(req);
}
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index a4295a5b8d28..2e78397a7373 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -1242,8 +1242,11 @@ static void nvmet_pci_epf_queue_response(struct nvmet_req *req)
iod->status = le16_to_cpu(req->cqe->status) >> 1;
- /* If we have no data to transfer, directly complete the command. */
- if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
+ /*
+ * If the command failed or we have no data to transfer, complete the
+ * command immediately.
+ */
+ if (iod->status || !iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
nvmet_pci_epf_complete_iod(iod);
return;
}
@@ -1604,8 +1607,13 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
goto complete;
}
+ /*
+ * If nvmet_req_init() fails (e.g., unsupported opcode) it will call
+ * __nvmet_req_complete() internally which will call
+ * nvmet_pci_epf_queue_response() and will complete the command directly.
+ */
if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops))
- goto complete;
+ return;
iod->data_len = nvmet_req_transfer_len(req);
if (iod->data_len) {
@@ -1643,10 +1651,11 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
wait_for_completion(&iod->done);
- if (iod->status == NVME_SC_SUCCESS) {
- WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
- nvmet_pci_epf_transfer_iod_data(iod);
- }
+ if (iod->status != NVME_SC_SUCCESS)
+ return;
+
+ WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
+ nvmet_pci_epf_transfer_iod_data(iod);
complete:
nvmet_pci_epf_complete_iod(iod);
@@ -1860,7 +1869,7 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc);
if (ctrl->io_cqes < sizeof(struct nvme_completion)) {
dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n",
- ctrl->io_sqes, sizeof(struct nvme_completion));
+ ctrl->io_cqes, sizeof(struct nvme_completion));
goto err;
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 67f61c67c167..0485e25ab797 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1731,7 +1731,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
* We registered an ib_client to handle device removal for queues,
* so we only need to handle the listening port cm_ids. In this case
* we nullify the priv to prevent double cm_id destruction and destroying
- * the cm_id implicitely by returning a non-zero rc to the callout.
+ * the cm_id implicitly by returning a non-zero rc to the callout.
*/
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
struct nvmet_rdma_queue *queue)
@@ -1742,7 +1742,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
/*
* This is a queue cm_id. we have registered
* an ib_client to handle queues removal
- * so don't interfear and just return.
+ * so don't interfere and just return.
*/
return 0;
}
@@ -1760,7 +1760,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
/*
* We need to return 1 so that the core will destroy
- * it's own ID. What a great API design..
+ * its own ID. What a great API design..
*/
return 1;
}
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 29a60fabfcc8..15a579cf528c 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -541,7 +541,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
struct bio *bio;
int sg_cnt;
- /* Request is completed on len mismatch in nvmet_check_transter_len() */
+ /* Request is completed on len mismatch in nvmet_check_transfer_len() */
if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
return;