summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/auth.c4
-rw-r--r--drivers/nvme/host/core.c16
-rw-r--r--drivers/nvme/host/fc.c4
-rw-r--r--drivers/nvme/host/pci.c2
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/target/core.c14
-rw-r--r--drivers/nvme/target/fc.c6
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/rdma.c6
9 files changed, 37 insertions, 19 deletions
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index f6ddbe553289..201fc8809a62 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -742,7 +742,7 @@ static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
"%s: qid %d failed to generate digest, error %d\n",
__func__, chap->qid, ret);
goto out_free_psk;
- };
+ }
dev_dbg(ctrl->device, "%s: generated digest %s\n",
__func__, digest);
ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len,
@@ -752,7 +752,7 @@ static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
"%s: qid %d failed to derive TLS psk, error %d\n",
__func__, chap->qid, ret);
goto out_free_digest;
- };
+ }
tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring,
ctrl->opts->host->nqn,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9d988f4cb87a..812c1565114f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3158,6 +3158,11 @@ static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
return ctrl->opts && ctrl->opts->discovery_nqn;
}
+static inline bool nvme_admin_ctrl(struct nvme_ctrl *ctrl)
+{
+ return ctrl->cntrltype == NVME_CTRL_ADMIN;
+}
+
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@@ -3670,6 +3675,17 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
if (ret)
return ret;
+ if (nvme_admin_ctrl(ctrl)) {
+ /*
+ * An admin controller has one admin queue, but no I/O queues.
+ * Override queue_count so it only creates an admin queue.
+ */
+ dev_dbg(ctrl->device,
+ "Subsystem %s is an administrative controller",
+ ctrl->subsys->subnqn);
+ ctrl->queue_count = 1;
+ }
+
ret = nvme_configure_apst(ctrl);
if (ret < 0)
return ret;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 08a5ea3e9383..3e12d4683ac7 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1363,7 +1363,7 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
* down, and the related FC-NVME Association ID and Connection IDs
* become invalid.
*
- * The behavior of the fc-nvme initiator is such that it's
+ * The behavior of the fc-nvme initiator is such that its
* understanding of the association and connections will implicitly
* be torn down. The action is implicit as it may be due to a loss of
* connectivity with the fc-nvme target, so you may never get a
@@ -2777,7 +2777,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
* as WRITE ZEROES will return a non-zero rq payload_bytes yet
* there is no actual payload to be transferred.
* To get it right, key data transmission on there being 1 or
- * more physical segments in the sg list. If there is no
+ * more physical segments in the sg list. If there are no
* physical segments, there is no payload.
*/
if (blk_rq_nr_phys_segments(rq)) {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 071efec25346..2c6d9506b172 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -935,7 +935,7 @@ static blk_status_t nvme_pci_setup_data_sgl(struct request *req,
nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped);
if (unlikely(iter->status))
- nvme_free_sgls(req);
+ nvme_unmap_data(req);
return iter->status;
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 9233f088fac8..c0fe8cfb7229 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2179,7 +2179,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
/*
* Only start IO queues for which we have allocated the tagset
- * and limitted it to the available queues. On reconnects, the
+ * and limited it to the available queues. On reconnects, the
* queue number might have changed.
*/
nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 884286f90688..0dd7bd99afa3 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1960,24 +1960,24 @@ static int __init nvmet_init(void)
if (!nvmet_wq)
goto out_free_buffered_work_queue;
- error = nvmet_init_discovery();
+ error = nvmet_init_debugfs();
if (error)
goto out_free_nvmet_work_queue;
- error = nvmet_init_debugfs();
+ error = nvmet_init_discovery();
if (error)
- goto out_exit_discovery;
+ goto out_exit_debugfs;
error = nvmet_init_configfs();
if (error)
- goto out_exit_debugfs;
+ goto out_exit_discovery;
return 0;
-out_exit_debugfs:
- nvmet_exit_debugfs();
out_exit_discovery:
nvmet_exit_discovery();
+out_exit_debugfs:
+ nvmet_exit_debugfs();
out_free_nvmet_work_queue:
destroy_workqueue(nvmet_wq);
out_free_buffered_work_queue:
@@ -1992,8 +1992,8 @@ out_destroy_bvec_cache:
static void __exit nvmet_exit(void)
{
nvmet_exit_configfs();
- nvmet_exit_debugfs();
nvmet_exit_discovery();
+ nvmet_exit_debugfs();
ida_destroy(&cntlid_ida);
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 25598a46bf0d..a9b18c051f5b 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -459,7 +459,7 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
* down, and the related FC-NVME Association ID and Connection IDs
* become invalid.
*
- * The behavior of the fc-nvme target is such that it's
+ * The behavior of the fc-nvme target is such that its
* understanding of the association and connections will implicitly
* be torn down. The action is implicit as it may be due to a loss of
* connectivity with the fc-nvme host, so the target may never get a
@@ -2313,7 +2313,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
if (ret) {
/*
- * should be ok to set w/o lock as its in the thread of
+ * should be ok to set w/o lock as it's in the thread of
* execution (not an async timer routine) and doesn't
* contend with any clearing action
*/
@@ -2629,7 +2629,7 @@ transport_error:
* and the api of the FC LLDD which may issue a hw command to send the
* response, but the LLDD may not get the hw completion for that command
* and upcall the nvmet_fc layer before a new command may be
- * asynchronously received - its possible for a command to be received
+ * asynchronously received - it's possible for a command to be received
* before the LLDD and nvmet_fc have recycled the job structure. It gives
* the appearance of more commands received than fits in the sq.
* To alleviate this scenario, a temporary queue is maintained in the
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 3b4b0df8f879..0c361b1e3566 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -533,6 +533,8 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
case NVME_FEAT_HOST_ID:
req->execute = nvmet_execute_get_features;
return NVME_SC_SUCCESS;
+ case NVME_FEAT_FDP:
+ return nvmet_setup_passthru_command(req);
default:
return nvmet_passthru_get_set_features(req);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 67f61c67c167..0485e25ab797 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1731,7 +1731,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
* We registered an ib_client to handle device removal for queues,
* so we only need to handle the listening port cm_ids. In this case
* we nullify the priv to prevent double cm_id destruction and destroying
- * the cm_id implicitely by returning a non-zero rc to the callout.
+ * the cm_id implicitly by returning a non-zero rc to the callout.
*/
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
struct nvmet_rdma_queue *queue)
@@ -1742,7 +1742,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
/*
* This is a queue cm_id. we have registered
* an ib_client to handle queues removal
- * so don't interfear and just return.
+ * so don't interfere and just return.
*/
return 0;
}
@@ -1760,7 +1760,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
/*
* We need to return 1 so that the core will destroy
- * it's own ID. What a great API design..
+ * its own ID. What a great API design..
*/
return 1;
}