summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-01 09:27:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-01 09:27:38 -0700
commit643a7234e0960cf63f1a51a15cfc969fafcbabad (patch)
treee6522bf2e6a74148952af11a03606b152ee1e251 /drivers/nvme
parent33c8846c814c1c27c6e33af005042d15061f948b (diff)
parent15dfc662ef31a20b59097d59b0792b06770255fa (diff)
Merge tag 'for-5.16/drivers-2021-10-29' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: - paride driver cleanups (Christoph) - Remove cryptoloop support (Christoph) - null_blk poll support (me) - Now that add_disk() supports proper error handling, add it to various drivers (Luis) - Make ataflop actually work again (Michael) - s390 dasd fixes (Stefan, Heiko) - nbd fixes (Yu, Ye) - Remove redundant wq flush in mtip32xx (Christophe) - NVMe updates - fix a multipath partition scanning deadlock (Hannes Reinecke) - generate uevent once a multipath namespace is operational again (Hannes Reinecke) - support unique discovery controller NQNs (Hannes Reinecke) - fix use-after-free when a port is removed (Israel Rukshin) - clear shadow doorbell memory on resets (Keith Busch) - use struct_size (Len Baker) - add error handling support for add_disk (Luis Chamberlain) - limit the maximal queue size for RDMA controllers (Max Gurtovoy) - use a few more symbolic names (Max Gurtovoy) - fix error code in nvme_rdma_setup_ctrl (Max Gurtovoy) - add support for ->map_queues on FC (Saurav Kashyap) - support the current discovery subsystem entry (Hannes Reinecke) - use flex_array_size and struct_size (Len Baker) - bcache fixes (Christoph, Coly, Chao, Lin, Qing) - MD updates (Christoph, Guoqing, Xiao) - Misc fixes (Dan, Ding, Jiapeng, Shin'ichiro, Ye) * tag 'for-5.16/drivers-2021-10-29' of git://git.kernel.dk/linux-block: (117 commits) null_blk: Fix handling of submit_queues and poll_queues attributes block: ataflop: Fix warning comparing pointer to 0 bcache: replace snprintf in show functions with sysfs_emit bcache: move uapi header bcache.h to bcache code directory nvmet: use flex_array_size and struct_size nvmet: register discovery subsystem as 'current' nvmet: switch check for subsystem type nvme: add new discovery log page entry definitions block: ataflop: more blk-mq refactoring fixes block: remove support for cryptoloop and the xor transfer mtd: add add_disk() error handling rnbd: add error handling support for add_disk() um/drivers/ubd_kern: add error handling support for add_disk() m68k/emu/nfblock: add error handling support for add_disk() xen-blkfront: add error handling support for add_disk() bcache: add error handling support for add_disk() dm: add add_disk() error handling block: aoe: fixup coccinelle warnings nvmet: use struct_size over open coded arithmetic nvme: drop scan_lock and always kick requeue list when removing namespaces ...
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/core.c50
-rw-r--r--drivers/nvme/host/fabrics.c6
-rw-r--r--drivers/nvme/host/fabrics.h8
-rw-r--r--drivers/nvme/host/fc.c26
-rw-r--r--drivers/nvme/host/multipath.c32
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c9
-rw-r--r--drivers/nvme/host/rdma.c11
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/host/zns.c2
-rw-r--r--drivers/nvme/target/admin-cmd.c18
-rw-r--r--drivers/nvme/target/configfs.c41
-rw-r--r--drivers/nvme/target/core.c18
-rw-r--r--drivers/nvme/target/discovery.c19
-rw-r--r--drivers/nvme/target/fabrics-cmd.c3
-rw-r--r--drivers/nvme/target/nvmet.h6
-rw-r--r--drivers/nvme/target/rdma.c30
-rw-r--r--drivers/nvme/target/tcp.c16
18 files changed, 259 insertions, 39 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index eb284f45fc44..838b5e2058be 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -203,7 +203,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
dev_info(ctrl->device,
- "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
+ "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
flush_work(&ctrl->reset_work);
nvme_stop_ctrl(ctrl);
@@ -815,6 +815,7 @@ static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
static inline void nvme_setup_flush(struct nvme_ns *ns,
struct nvme_command *cmnd)
{
+ memset(cmnd, 0, sizeof(*cmnd));
cmnd->common.opcode = nvme_cmd_flush;
cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
}
@@ -866,6 +867,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_STS_IOERR;
}
+ memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->dsm.nr = cpu_to_le32(segments - 1);
@@ -882,6 +884,8 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd)
{
+ memset(cmnd, 0, sizeof(*cmnd));
+
if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
return nvme_setup_discard(ns, req, cmnd);
@@ -915,9 +919,15 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
cmnd->rw.opcode = op;
+ cmnd->rw.flags = 0;
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
+ cmnd->rw.rsvd2 = 0;
+ cmnd->rw.metadata = 0;
cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+ cmnd->rw.reftag = 0;
+ cmnd->rw.apptag = 0;
+ cmnd->rw.appmask = 0;
if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
@@ -974,10 +984,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
blk_status_t ret = BLK_STS_OK;
- if (!(req->rq_flags & RQF_DONTPREP)) {
+ if (!(req->rq_flags & RQF_DONTPREP))
nvme_clear_nvme_request(req);
- memset(cmd, 0, sizeof(*cmd));
- }
switch (req_op(req)) {
case REQ_OP_DRV_IN:
@@ -2593,6 +2601,24 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev,
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
+static ssize_t nvme_subsys_show_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ switch (subsys->subtype) {
+ case NVME_NQN_DISC:
+ return sysfs_emit(buf, "discovery\n");
+ case NVME_NQN_NVME:
+ return sysfs_emit(buf, "nvm\n");
+ default:
+ return sysfs_emit(buf, "reserved\n");
+ }
+}
+static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
+
#define nvme_subsys_show_str_function(field) \
static ssize_t subsys_##field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -2613,6 +2639,7 @@ static struct attribute *nvme_subsys_attrs[] = {
&subsys_attr_serial.attr,
&subsys_attr_firmware_rev.attr,
&subsys_attr_subsysnqn.attr,
+ &subsys_attr_subsystype.attr,
#ifdef CONFIG_NVME_MULTIPATH
&subsys_attr_iopolicy.attr,
#endif
@@ -2683,6 +2710,21 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic;
+
+ /* Versions prior to 1.4 don't necessarily report a valid type */
+ if (id->cntrltype == NVME_CTRL_DISC ||
+ !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
+ subsys->subtype = NVME_NQN_DISC;
+ else
+ subsys->subtype = NVME_NQN_NVME;
+
+ if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
+ dev_err(ctrl->device,
+ "Subsystem %s is not a discovery controller",
+ subsys->subnqn);
+ kfree(subsys);
+ return -EINVAL;
+ }
subsys->awupf = le16_to_cpu(id->awupf);
#ifdef CONFIG_NVME_MULTIPATH
subsys->iopolicy = NVME_IOPOLICY_NUMA;
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 668c6bb7a567..c5a2b71c5268 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -548,6 +548,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{ NVMF_OPT_TOS, "tos=%d" },
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
+ { NVMF_OPT_DISCOVERY, "discovery" },
{ NVMF_OPT_ERR, NULL }
};
@@ -823,6 +824,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->tos = token;
break;
+ case NVMF_OPT_DISCOVERY:
+ opts->discovery_nqn = true;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -949,7 +953,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
- NVMF_OPT_DISABLE_SQFLOW |\
+ NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
NVMF_OPT_FAIL_FAST_TMO)
static struct nvme_ctrl *
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a146cb903869..c3203ff1c654 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -67,6 +67,7 @@ enum {
NVMF_OPT_TOS = 1 << 19,
NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
NVMF_OPT_HOST_IFACE = 1 << 21,
+ NVMF_OPT_DISCOVERY = 1 << 22,
};
/**
@@ -178,6 +179,13 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
return true;
}
+static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->subsys)
+ return ctrl->opts->subsysnqn;
+ return ctrl->subsys->subnqn;
+}
+
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 580a216da619..71b3108c22f0 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -16,6 +16,7 @@
#include <linux/nvme-fc.h>
#include "fc.h"
#include <scsi/scsi_transport_fc.h>
+#include <linux/blk-mq-pci.h>
/* *************************** Data Structures/Defines ****************** */
@@ -2841,6 +2842,28 @@ nvme_fc_complete_rq(struct request *rq)
nvme_fc_ctrl_put(ctrl);
}
+static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nvme_fc_ctrl *ctrl = set->driver_data;
+ int i;
+
+ for (i = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+
+ if (!map->nr_queues) {
+ WARN_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ /* Call LLDD map queue functionality if defined */
+ if (ctrl->lport->ops->map_queues)
+ ctrl->lport->ops->map_queues(&ctrl->lport->localport,
+ map);
+ else
+ blk_mq_map_queues(map);
+ }
+ return 0;
+}
static const struct blk_mq_ops nvme_fc_mq_ops = {
.queue_rq = nvme_fc_queue_rq,
@@ -2849,6 +2872,7 @@ static const struct blk_mq_ops nvme_fc_mq_ops = {
.exit_request = nvme_fc_exit_request,
.init_hctx = nvme_fc_init_hctx,
.timeout = nvme_fc_timeout,
+ .map_queues = nvme_fc_map_queues,
};
static int
@@ -3572,7 +3596,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
- ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
+ ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
return &ctrl->ctrl;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 11440c86881e..7f2071f2460c 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -105,8 +105,11 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->head->disk)
- kblockd_schedule_work(&ns->head->requeue_work);
+ if (!ns->head->disk)
+ continue;
+ kblockd_schedule_work(&ns->head->requeue_work);
+ if (ctrl->state == NVME_CTRL_LIVE)
+ disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
up_read(&ctrl->namespaces_rwsem);
}
@@ -143,13 +146,12 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->scan_lock);
down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
- if (nvme_mpath_clear_current_path(ns))
- kblockd_schedule_work(&ns->head->requeue_work);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ nvme_mpath_clear_current_path(ns);
+ kblockd_schedule_work(&ns->head->requeue_work);
+ }
up_read(&ctrl->namespaces_rwsem);
- mutex_unlock(&ctrl->scan_lock);
}
void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
@@ -506,13 +508,23 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
static void nvme_mpath_set_live(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
+ int rc;
if (!head->disk)
return;
+ /*
+ * test_and_set_bit() is used because it is protecting against two nvme
+ * paths simultaneously calling device_add_disk() on the same namespace
+ * head.
+ */
if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
- device_add_disk(&head->subsys->dev, head->disk,
- nvme_ns_id_attr_groups);
+ rc = device_add_disk(&head->subsys->dev, head->disk,
+ nvme_ns_id_attr_groups);
+ if (rc) {
+ clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
+ return;
+ }
nvme_add_ns_head_cdev(head);
}
@@ -550,7 +562,7 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
return -EINVAL;
nr_nsids = le32_to_cpu(desc->nnsids);
- nsid_buf_size = nr_nsids * sizeof(__le32);
+ nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
if (WARN_ON_ONCE(desc->grpid == 0))
return -EINVAL;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 3652439a7458..b334af8aa264 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -373,6 +373,7 @@ struct nvme_subsystem {
char model[40];
char firmware_rev[8];
u8 cmic;
+ enum nvme_subsys_type subtype;
u16 vendor_id;
u16 awupf; /* 0's based awupf value. */
struct ida ns_ida;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index fde36c137900..ca2ee806d74b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -245,8 +245,15 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
unsigned int mem_size = nvme_dbbuf_size(dev);
- if (dev->dbbuf_dbs)
+ if (dev->dbbuf_dbs) {
+ /*
+ * Clear the dbbuf memory so the driver doesn't observe stale
+ * values from the previous instantiation.
+ */
+ memset(dev->dbbuf_dbs, 0, mem_size);
+ memset(dev->dbbuf_eis, 0, mem_size);
return 0;
+ }
dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
&dev->dbbuf_dbs_dma_addr,
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 7d9ac544db37..850f84d204d0 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1096,11 +1096,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
return ret;
if (ctrl->ctrl.icdoff) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
goto destroy_admin;
}
if (!(ctrl->ctrl.sgls & (1 << 2))) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device,
"Mandatory keyed sgls are not supported!\n");
goto destroy_admin;
@@ -1112,6 +1114,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
}
+ if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
+ dev_warn(ctrl->ctrl.device,
+ "ctrl sqsize %u > max queue size %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
+ ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
+ }
+
if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
dev_warn(ctrl->ctrl.device,
"sqsize %u > ctrl maxcmd %u, clamping down\n",
@@ -2386,7 +2395,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
- ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 19dcb9279a24..33bc83d8d992 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2585,7 +2585,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
- ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index d95010481fce..bfc259e0d7b8 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -233,6 +233,8 @@ out_free:
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *c, enum nvme_zone_mgmt_action action)
{
+ memset(c, 0, sizeof(*c));
+
c->zms.opcode = nvme_cmd_zone_mgmt_send;
c->zms.nsid = cpu_to_le32(ns->head->ns_id);
c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index aa6d84d8848e..6fb24746de06 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -264,7 +264,7 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
desc->state = req->port->ana_state[grpid];
memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
- return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
+ return struct_size(desc, nsids, count);
}
static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
@@ -278,8 +278,8 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
u16 status;
status = NVME_SC_INTERNAL;
- desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
- NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
+ desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
+ GFP_KERNEL);
if (!desc)
goto out;
@@ -374,13 +374,19 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->rab = 6;
+ if (nvmet_is_disc_subsys(ctrl->subsys))
+ id->cntrltype = NVME_CTRL_DISC;
+ else
+ id->cntrltype = NVME_CTRL_IO;
+
/*
* XXX: figure out how we can assign a IEEE OUI, but until then
* the safest is to leave it as zeroes.
*/
/* we support multiple ports, multiples hosts and ANA: */
- id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
+ id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
+ NVME_CTRL_CMIC_ANA;
/* Limit MDTS according to transport capability */
if (ctrl->ops->get_mdts)
@@ -536,7 +542,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* Our namespace might always be shared. Not just with other
* controllers, but also with any other user of the block device.
*/
- id->nmic = (1 << 0);
+ id->nmic = NVME_NS_NMIC_SHARED;
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
@@ -1008,7 +1014,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_cmd(req);
- if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
+ if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
ret = nvmet_check_ctrl_status(req);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index be5d82421e3a..091a0ca16361 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1233,6 +1233,44 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_subsys_, attr_model);
+static ssize_t nvmet_subsys_attr_discovery_nqn_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ nvmet_disc_subsys->subsysnqn);
+}
+
+static ssize_t nvmet_subsys_attr_discovery_nqn_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ char *subsysnqn;
+ int len;
+
+ len = strcspn(page, "\n");
+ if (!len)
+ return -EINVAL;
+
+ subsysnqn = kmemdup_nul(page, len, GFP_KERNEL);
+ if (!subsysnqn)
+ return -ENOMEM;
+
+ /*
+ * The discovery NQN must be different from subsystem NQN.
+ */
+ if (!strcmp(subsysnqn, subsys->subsysnqn)) {
+ kfree(subsysnqn);
+ return -EBUSY;
+ }
+ down_write(&nvmet_config_sem);
+ kfree(nvmet_disc_subsys->subsysnqn);
+ nvmet_disc_subsys->subsysnqn = subsysnqn;
+ up_write(&nvmet_config_sem);
+
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_discovery_nqn);
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
char *page)
@@ -1262,6 +1300,7 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
&nvmet_subsys_attr_attr_model,
+ &nvmet_subsys_attr_attr_discovery_nqn,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_subsys_attr_attr_pi_enable,
#endif
@@ -1553,6 +1592,8 @@ static void nvmet_port_release(struct config_item *item)
{
struct nvmet_port *port = to_nvmet_port(item);
+ /* Let inflight controllers teardown complete */
+ flush_scheduled_work();
list_del(&port->global_entry);
kfree(port->ana_state);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b8425fa34300..5119c687de68 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1140,7 +1140,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
* should verify iosqes,iocqes are zeroed, however that
* would break backwards compatibility, so don't enforce it.
*/
- if (ctrl->subsys->type != NVME_NQN_DISC &&
+ if (!nvmet_is_disc_subsys(ctrl->subsys) &&
(nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
ctrl->csts = NVME_CSTS_CFS;
@@ -1205,7 +1205,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
/* CC.EN timeout in 500msec units: */
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
- ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+ if (ctrl->ops->get_max_queue_size)
+ ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
+ else
+ ctrl->cap |= NVMET_QUEUE_SIZE - 1;
if (nvmet_is_passthru_subsys(ctrl->subsys))
nvmet_passthrough_override_cap(ctrl);
@@ -1278,7 +1281,7 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
if (subsys->allow_any_host)
return true;
- if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
+ if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
return true;
list_for_each_entry(p, &subsys->hosts, entry) {
@@ -1367,6 +1370,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
mutex_init(&ctrl->lock);
ctrl->port = req->port;
+ ctrl->ops = req->ops;
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
@@ -1405,13 +1409,11 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
}
ctrl->cntlid = ret;
- ctrl->ops = req->ops;
-
/*
* Discovery controllers may use some arbitrary high value
* in order to cleanup stale discovery sessions
*/
- if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
+ if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
kato = NVMET_DISC_KATO_MS;
/* keep-alive timeout in seconds */
@@ -1491,7 +1493,8 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
if (!port)
return NULL;
- if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
+ if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn) ||
+ !strcmp(nvmet_disc_subsys->subsysnqn, subsysnqn)) {
if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
return NULL;
return nvmet_disc_subsys;
@@ -1538,6 +1541,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
subsys->max_qid = NVMET_NR_QUEUES;
break;
case NVME_NQN_DISC:
+ case NVME_NQN_CURR:
subsys->max_qid = 0;
break;
default:
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 7aa62bc6ae84..c2162eef8ce1 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -146,7 +146,7 @@ static size_t discovery_log_entries(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_subsys_link *p;
struct nvmet_port *r;
- size_t entries = 0;
+ size_t entries = 1;
list_for_each_entry(p, &req->port->subsystems, entry) {
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
@@ -171,6 +171,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
u32 numrec = 0;
u16 status = 0;
void *buffer;
+ char traddr[NVMF_TRADDR_SIZE];
if (!nvmet_check_transfer_len(req, data_len))
return;
@@ -203,15 +204,19 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
status = NVME_SC_INTERNAL;
goto out;
}
-
hdr = buffer;
- list_for_each_entry(p, &req->port->subsystems, entry) {
- char traddr[NVMF_TRADDR_SIZE];
+ nvmet_set_disc_traddr(req, req->port, traddr);
+
+ nvmet_format_discovery_entry(hdr, req->port,
+ nvmet_disc_subsys->subsysnqn,
+ traddr, NVME_NQN_CURR, numrec);
+ numrec++;
+
+ list_for_each_entry(p, &req->port->subsystems, entry) {
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue;
- nvmet_set_disc_traddr(req, req->port, traddr);
nvmet_format_discovery_entry(hdr, req->port,
p->subsys->subsysnqn, traddr,
NVME_NQN_NVME, numrec);
@@ -268,6 +273,8 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
+ id->cntrltype = NVME_CTRL_DISC;
+
/* no limit on data transfer sizes for now */
id->mdts = 0;
id->cntlid = cpu_to_le16(ctrl->cntlid);
@@ -387,7 +394,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
int __init nvmet_init_discovery(void)
{
nvmet_disc_subsys =
- nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
+ nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_CURR);
return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 7d0454cee920..70fb587e9413 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -221,7 +221,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
- pr_info("creating controller %d for subsystem %s for NQN %s%s.\n",
+ pr_info("creating %s controller %d for subsystem %s for NQN %s%s.\n",
+ nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "");
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7143c7fa7464..af193423c10b 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -309,6 +309,7 @@ struct nvmet_fabrics_ops {
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
void (*discovery_chg)(struct nvmet_port *port);
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
+ u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
};
#define NVMET_MAX_INLINE_BIOVEC 8
@@ -576,6 +577,11 @@ static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
return req->sq->ctrl->subsys;
}
+static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
+{
+ return subsys->type != NVME_NQN_NVME;
+}
+
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 38d1f292ecc2..1deb4043e242 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1819,12 +1819,36 @@ restart:
mutex_unlock(&nvmet_rdma_queue_mutex);
}
+static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port)
+{
+ struct nvmet_rdma_queue *queue, *tmp;
+ struct nvmet_port *nport = port->nport;
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
+ queue_list) {
+ if (queue->port != nport)
+ continue;
+
+ list_del_init(&queue->queue_list);
+ __nvmet_rdma_queue_disconnect(queue);
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+}
+
static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
{
struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
if (cm_id)
rdma_destroy_id(cm_id);
+
+ /*
+ * Destroy the remaining queues, which are not belong to any
+ * controller yet. Do it here after the RDMA-CM was destroyed
+ * guarantees that no new queue will be created.
+ */
+ nvmet_rdma_destroy_port_queues(port);
}
static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
@@ -1976,6 +2000,11 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
return NVMET_RDMA_MAX_MDTS;
}
+static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
+{
+ return NVME_RDMA_MAX_QUEUE_SIZE;
+}
+
static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_RDMA,
@@ -1987,6 +2016,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.delete_ctrl = nvmet_rdma_delete_ctrl,
.disc_traddr = nvmet_rdma_disc_port_addr,
.get_mdts = nvmet_rdma_get_mdts,
+ .get_max_queue_size = nvmet_rdma_get_max_queue_size,
};
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 46c3b3be7e03..84c387e4bf43 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1740,6 +1740,17 @@ err_port:
return ret;
}
+static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
+{
+ struct nvmet_tcp_queue *queue;
+
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
+ if (queue->port == port)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+}
+
static void nvmet_tcp_remove_port(struct nvmet_port *nport)
{
struct nvmet_tcp_port *port = nport->priv;
@@ -1749,6 +1760,11 @@ static void nvmet_tcp_remove_port(struct nvmet_port *nport)
port->sock->sk->sk_user_data = NULL;
write_unlock_bh(&port->sock->sk->sk_callback_lock);
cancel_work_sync(&port->accept_work);
+ /*
+ * Destroy the remaining queues, which are not belong to any
+ * controller yet.
+ */
+ nvmet_tcp_destroy_port_queues(port);
sock_release(port->sock);
kfree(port);