summaryrefslogtreecommitdiff
path: root/drivers/nvme/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-07 09:19:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-07 09:19:14 -0700
commit513389809e138ae903b6ef43c1d5d2ffaf4dca17 (patch)
treec71e478fab1568da4706868b14eb67a75c148a8b /drivers/nvme/target
parent0a78a376ef3c2f3d397df48909f00cd75f92137a (diff)
parent30514bd2dd4e86a3ecfd6a93a3eadf7b9ea164a0 (diff)
Merge tag 'for-6.1/block-2022-10-03' of git://git.kernel.dk/linux
Pull block updates from Jens Axboe: - NVMe pull requests via Christoph: - handle number of queue changes in the TCP and RDMA drivers (Daniel Wagner) - allow changing the number of queues in nvmet (Daniel Wagner) - also consider host_iface when checking ip options (Daniel Wagner) - don't map pages which can't come from HIGHMEM (Fabio M. De Francesco) - avoid unnecessary flush bios in nvmet (Guixin Liu) - shrink and better pack the nvme_iod structure (Keith Busch) - add comment for unaligned "fake" nqn (Linjun Bao) - print actual source IP address through sysfs "address" attr (Martin Belanger) - various cleanups (Jackie Liu, Wolfram Sang, Genjian Zhang) - handle effects after freeing the request (Keith Busch) - copy firmware_rev on each init (Keith Busch) - restrict management ioctls to admin (Keith Busch) - ensure subsystem reset is single threaded (Keith Busch) - report the actual number of tagset maps in nvme-pci (Keith Busch) - small fabrics authentication fixups (Christoph Hellwig) - add common code for tagset allocation and freeing (Christoph Hellwig) - stop using the request_queue in nvmet (Christoph Hellwig) - set min_align_mask before calculating max_hw_sectors (Rishabh Bhatnagar) - send a rediscover uevent when a persistent discovery controller reconnects (Sagi Grimberg) - misc nvmet-tcp fixes (Varun Prakash, zhenwei pi) - MD pull request via Song: - Various raid5 fix and clean up, by Logan Gunthorpe and David Sloan. - Raid10 performance optimization, by Yu Kuai. - sbitmap wakeup hang fixes (Hugh, Keith, Jan, Yu) - IO scheduler switching quisce fix (Keith) - s390/dasd block driver updates (Stefan) - support for recovery for the ublk driver (ZiyangZhang) - rnbd drivers fixes and updates (Guoqing, Santosh, ye, Christoph) - blk-mq and null_blk map fixes (Bart) - various bcache fixes (Coly, Jilin, Jules) - nbd signal hang fix (Shigeru) - block writeback throttling fix (Yu) - optimize the passthrough mapping handling (me) - prepare block cgroups to being gendisk based (Christoph) - get rid of an old PSI hack in the block layer, moving it to the callers instead where it belongs (Christoph) - blk-throttle fixes and cleanups (Yu) - misc fixes and cleanups (Liu Shixin, Liu Song, Miaohe, Pankaj, Ping-Xiang, Wolfram, Saurabh, Li Jinlin, Li Lei, Lin, Li zeming, Miaohe, Bart, Coly, Gaosheng * tag 'for-6.1/block-2022-10-03' of git://git.kernel.dk/linux: (162 commits) sbitmap: fix lockup while swapping block: add rationale for not using blk_mq_plug() when applicable block: adapt blk_mq_plug() to not plug for writes that require a zone lock s390/dasd: use blk_mq_alloc_disk blk-cgroup: don't update the blkg lookup hint in blkg_conf_prep nvmet: don't look at the request_queue in nvmet_bdev_set_limits nvmet: don't look at the request_queue in nvmet_bdev_zone_mgmt_emulate_all blk-mq: use quiesced elevator switch when reinitializing queues block: replace blk_queue_nowait with bdev_nowait nvme: remove nvme_ctrl_init_connect_q nvme-loop: use the tagset alloc/free helpers nvme-loop: store the generic nvme_ctrl in set->driver_data nvme-loop: initialize sqsize later nvme-fc: use the tagset alloc/free helpers nvme-fc: store the generic nvme_ctrl in set->driver_data nvme-fc: keep ctrl->sqsize in sync with opts->queue_size nvme-rdma: use the tagset alloc/free helpers nvme-rdma: store the generic nvme_ctrl in set->driver_data nvme-tcp: use the tagset alloc/free helpers nvme-tcp: store the generic nvme_ctrl in set->driver_data ...
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/configfs.c29
-rw-r--r--drivers/nvme/target/core.c1
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c23
-rw-r--r--drivers/nvme/target/fabrics-cmd.c19
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c19
-rw-r--r--drivers/nvme/target/loop.c91
-rw-r--r--drivers/nvme/target/nvmet.h7
-rw-r--r--drivers/nvme/target/passthru.c7
-rw-r--r--drivers/nvme/target/tcp.c91
-rw-r--r--drivers/nvme/target/zns.c3
12 files changed, 142 insertions, 152 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index fc8a957fad0a..c8a061ce3ee5 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -449,7 +449,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
- strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
+ strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
/*
* Max command capsule size is sqe + in-capsule data size.
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 2bcd60758919..e34a2896fedb 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1281,6 +1281,34 @@ static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
#endif
+static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
+}
+
+static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ u16 qid_max;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+
+ if (sscanf(page, "%hu\n", &qid_max) != 1)
+ return -EINVAL;
+
+ if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ to_subsys(item)->max_qid = qid_max;
+ up_write(&nvmet_config_sem);
+ return cnt;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
+
static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_allow_any_host,
&nvmet_subsys_attr_attr_version,
@@ -1288,6 +1316,7 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
&nvmet_subsys_attr_attr_model,
+ &nvmet_subsys_attr_attr_qid_max,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_subsys_attr_attr_pi_enable,
#endif
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 7f4083cf953a..14677145bbba 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -832,6 +832,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
}
init_completion(&sq->free_done);
init_completion(&sq->confirm_done);
+ nvmet_auth_sq_init(sq);
return 0;
}
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index c2162eef8ce1..668d257fa986 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -292,7 +292,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
- strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
+ strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index ebdf9aa81041..7970a7640e58 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -23,17 +23,12 @@ static void nvmet_auth_expired_work(struct work_struct *work)
sq->dhchap_tid = -1;
}
-void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+void nvmet_auth_sq_init(struct nvmet_sq *sq)
{
- u32 result = le32_to_cpu(req->cqe->result.u32);
-
/* Initialize in-band authentication */
- INIT_DELAYED_WORK(&req->sq->auth_expired_work,
- nvmet_auth_expired_work);
- req->sq->authenticated = false;
- req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
- result |= (u32)NVME_CONNECT_AUTHREQ_ATR << 16;
- req->cqe->result.u32 = cpu_to_le32(result);
+ INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
+ sq->authenticated = false;
+ sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
}
static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
@@ -177,7 +172,7 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
return 0;
}
-static u16 nvmet_auth_failure2(struct nvmet_req *req, void *d)
+static u16 nvmet_auth_failure2(void *d)
{
struct nvmf_auth_dhchap_failure_data *data = d;
@@ -229,10 +224,8 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
}
status = nvmet_copy_from_sgl(req, 0, d, tl);
- if (status) {
- kfree(d);
- goto done;
- }
+ if (status)
+ goto done_kfree;
data = d;
pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
@@ -310,7 +303,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
goto done_kfree;
break;
case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
- status = nvmet_auth_failure2(req, d);
+ status = nvmet_auth_failure2(d);
if (status) {
pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
ctrl->cntlid, req->sq->qid, status);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index f91a56180d3d..43b5bd8bb6a5 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -198,6 +198,12 @@ err:
return ret;
}
+static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl)
+{
+ return (u32)ctrl->cntlid |
+ (nvmet_has_auth(ctrl) ? NVME_CONNECT_AUTHREQ_ATR : 0);
+}
+
static void nvmet_execute_admin_connect(struct nvmet_req *req)
{
struct nvmf_connect_command *c = &req->cmd->connect;
@@ -269,10 +275,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "",
nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
-
- if (nvmet_has_auth(ctrl))
- nvmet_init_auth(ctrl, req);
+ req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
out:
kfree(d);
complete:
@@ -328,14 +331,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (status)
goto out_ctrl_put;
- /* pass back cntlid for successful completion */
- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
-
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
- if (nvmet_has_auth(ctrl))
- nvmet_init_auth(ctrl, req);
-
+ req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
out:
kfree(d);
complete:
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 2dc1c1035626..c2d6cea0236b 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -12,11 +12,9 @@
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
{
- const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
- /* Number of logical blocks per physical block. */
- const u32 lpp = ql->physical_block_size / ql->logical_block_size;
/* Logical blocks per physical block, 0's based. */
- const __le16 lpp0b = to0based(lpp);
+ const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) /
+ bdev_logical_block_size(bdev));
/*
* For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
@@ -42,11 +40,12 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
- id->npdg = to0based(ql->discard_granularity / ql->logical_block_size);
+ id->npdg = to0based(bdev_discard_granularity(bdev) /
+ bdev_logical_block_size(bdev));
/* NPDG = Namespace Preferred Deallocate Alignment */
id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */
- id->nows = to0based(ql->io_opt / ql->logical_block_size);
+ id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
}
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
@@ -334,6 +333,11 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
{
struct bio *bio = &req->b.inline_bio;
+ if (!bdev_write_cache(req->ns->bdev)) {
+ nvmet_req_complete(req, NVME_SC_SUCCESS);
+ return;
+ }
+
if (!nvmet_check_transfer_len(req, 0))
return;
@@ -347,6 +351,9 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
+ if (!bdev_write_cache(req->ns->bdev))
+ return 0;
+
if (blkdev_issue_flush(req->ns->bdev))
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9750a7fca268..b45fe3adf015 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -204,7 +204,7 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
- struct nvme_loop_ctrl *ctrl = set->driver_data;
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data);
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
nvme_req(req)->ctrl = &ctrl->ctrl;
@@ -218,7 +218,7 @@ static struct lock_class_key loop_hctx_fq_lock_key;
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_loop_ctrl *ctrl = data;
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
@@ -238,7 +238,7 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_loop_ctrl *ctrl = data;
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
struct nvme_loop_queue *queue = &ctrl->queues[0];
BUG_ON(hctx_idx != 0);
@@ -266,9 +266,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
}
static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -282,10 +280,8 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
list_del(&ctrl->list);
mutex_unlock(&nvme_loop_ctrl_mutex);
- if (nctrl->tagset) {
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(&ctrl->tag_set);
- }
+ if (nctrl->tagset)
+ nvme_remove_io_tag_set(nctrl);
kfree(ctrl->queues);
nvmf_free_options(nctrl->opts);
free_ctrl:
@@ -350,52 +346,31 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{
int error;
- memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
- ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
- ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
- ctrl->admin_tag_set.driver_data = ctrl;
- ctrl->admin_tag_set.nr_hw_queues = 1;
- ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
- ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
-
ctrl->queues[0].ctrl = ctrl;
error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
if (error)
return error;
ctrl->ctrl.queue_count = 1;
- error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+ &nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (error)
goto out_free_sq;
- ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
- ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.fabrics_q)) {
- error = PTR_ERR(ctrl->ctrl.fabrics_q);
- goto out_free_tagset;
- }
-
- ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.admin_q)) {
- error = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_cleanup_fabrics_q;
- }
/* reset stopped state for the fresh admin queue */
clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
error = nvmf_connect_admin_queue(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
error = nvme_enable_ctrl(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
@@ -404,17 +379,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
return 0;
-out_cleanup_queue:
+out_cleanup_tagset:
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-out_free_tagset:
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_sq:
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
return error;
@@ -522,37 +493,21 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
if (ret)
return ret;
- memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
- ctrl->tag_set.ops = &nvme_loop_mq_ops;
- ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
- ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
- ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
- ctrl->tag_set.driver_data = ctrl;
- ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
- ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
- ctrl->ctrl.tagset = &ctrl->tag_set;
-
- ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+ &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (ret)
goto out_destroy_queues;
- ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
- if (ret)
- goto out_free_tagset;
-
ret = nvme_loop_connect_io_queues(ctrl);
if (ret)
- goto out_cleanup_connect_q;
+ goto out_cleanup_tagset;
return 0;
-out_cleanup_connect_q:
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-out_free_tagset:
- blk_mq_free_tag_set(&ctrl->tag_set);
+out_cleanup_tagset:
+ nvme_remove_io_tag_set(&ctrl->ctrl);
out_destroy_queues:
nvme_loop_destroy_io_queues(ctrl);
return ret;
@@ -601,7 +556,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ret = -ENOMEM;
- ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
@@ -621,6 +575,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd;
}
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
if (opts->nr_io_queues) {
ret = nvme_loop_create_io_queues(ctrl);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 6ffeeb0a1c49..dfe3894205aa 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -704,7 +704,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
bool set_ctrl);
int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
-void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req);
+void nvmet_auth_sq_init(struct nvmet_sq *sq);
void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
void nvmet_auth_sq_free(struct nvmet_sq *sq);
int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
@@ -726,8 +726,9 @@ static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
{
return 0;
}
-static inline void nvmet_init_auth(struct nvmet_ctrl *ctrl,
- struct nvmet_req *req) {};
+static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
+{
+}
static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
static inline bool nvmet_check_auth_status(struct nvmet_req *req)
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 6f39a29828b1..94d3153bae54 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -215,9 +215,11 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
struct request *rq = req->p.rq;
+ struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
+ u32 effects;
int status;
- status = nvme_execute_passthru_rq(rq);
+ status = nvme_execute_passthru_rq(rq, &effects);
if (status == NVME_SC_SUCCESS &&
req->cmd->common.opcode == nvme_admin_identify) {
@@ -238,6 +240,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, status);
blk_mq_free_request(rq);
+
+ if (effects)
+ nvme_passthru_end(ctrl, effects, req->cmd, status);
}
static void nvmet_passthru_req_done(struct request *rq,
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index a3694a32f6d5..6c1476e086ef 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -77,9 +77,8 @@ struct nvmet_tcp_cmd {
u32 pdu_len;
u32 pdu_recv;
int sg_idx;
- int nr_mapped;
struct msghdr recv_msg;
- struct kvec *iov;
+ struct bio_vec *iov;
u32 flags;
struct list_head entry;
@@ -165,9 +164,7 @@ static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
static struct workqueue_struct *nvmet_tcp_wq;
static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
-static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
-static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd);
static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd)
@@ -301,35 +298,21 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
{
- WARN_ON(unlikely(cmd->nr_mapped > 0));
-
kfree(cmd->iov);
sgl_free(cmd->req.sg);
cmd->iov = NULL;
cmd->req.sg = NULL;
}
-static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
-{
- struct scatterlist *sg;
- int i;
-
- sg = &cmd->req.sg[cmd->sg_idx];
-
- for (i = 0; i < cmd->nr_mapped; i++)
- kunmap(sg_page(&sg[i]));
-
- cmd->nr_mapped = 0;
-}
-
-static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
{
- struct kvec *iov = cmd->iov;
+ struct bio_vec *iov = cmd->iov;
struct scatterlist *sg;
u32 length, offset, sg_offset;
+ int nr_pages;
length = cmd->pdu_len;
- cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
+ nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
offset = cmd->rbytes_done;
cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
@@ -338,8 +321,9 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
while (length) {
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
- iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
- iov->iov_len = iov_len;
+ iov->bv_page = sg_page(sg);
+ iov->bv_len = sg->length;
+ iov->bv_offset = sg->offset + sg_offset;
length -= iov_len;
sg = sg_next(sg);
@@ -347,8 +331,8 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
sg_offset = 0;
}
- iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
- cmd->nr_mapped, cmd->pdu_len);
+ iov_iter_bvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
+ nr_pages, cmd->pdu_len);
}
static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
@@ -926,7 +910,7 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
}
queue->rcv_state = NVMET_TCP_RECV_DATA;
- nvmet_tcp_map_pdu_iovec(cmd);
+ nvmet_tcp_build_pdu_iovec(cmd);
cmd->flags |= NVMET_TCP_F_INIT_FAILED;
}
@@ -935,10 +919,17 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd;
- if (likely(queue->nr_cmds))
+ if (likely(queue->nr_cmds)) {
+ if (unlikely(data->ttag >= queue->nr_cmds)) {
+ pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
+ queue->idx, data->ttag, queue->nr_cmds);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
cmd = &queue->cmds[data->ttag];
- else
+ } else {
cmd = &queue->connect;
+ }
if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
@@ -952,7 +943,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
cmd->pdu_len = le32_to_cpu(data->data_length);
cmd->pdu_recv = 0;
- nvmet_tcp_map_pdu_iovec(cmd);
+ nvmet_tcp_build_pdu_iovec(cmd);
queue->cmd = cmd;
queue->rcv_state = NVMET_TCP_RECV_DATA;
@@ -976,6 +967,13 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
return nvmet_tcp_handle_icreq(queue);
}
+ if (unlikely(hdr->type == nvme_tcp_icreq)) {
+ pr_err("queue %d: received icreq pdu in state %d\n",
+ queue->idx, queue->state);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+
if (hdr->type == nvme_tcp_h2c_data) {
ret = nvmet_tcp_handle_h2c_data_pdu(queue);
if (unlikely(ret))
@@ -1021,7 +1019,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
if (nvmet_tcp_need_data_in(queue->cmd)) {
if (nvmet_tcp_has_inline_data(queue->cmd)) {
queue->rcv_state = NVMET_TCP_RECV_DATA;
- nvmet_tcp_map_pdu_iovec(queue->cmd);
+ nvmet_tcp_build_pdu_iovec(queue->cmd);
return 0;
}
/* send back R2T */
@@ -1141,7 +1139,6 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
cmd->rbytes_done += ret;
}
- nvmet_tcp_unmap_pdu_iovec(cmd);
if (queue->data_digest) {
nvmet_tcp_prep_recv_ddgst(cmd);
return 0;
@@ -1179,7 +1176,8 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
queue->idx, cmd->req.cmd->common.command_id,
queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
le32_to_cpu(cmd->exp_ddgst));
- nvmet_tcp_finish_cmd(cmd);
+ nvmet_req_uninit(&cmd->req);
+ nvmet_tcp_free_cmd_buffers(cmd);
nvmet_tcp_fatal_error(queue);
ret = -EPROTO;
goto out;
@@ -1408,13 +1406,6 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
write_unlock_bh(&sock->sk->sk_callback_lock);
}
-static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
-{
- nvmet_req_uninit(&cmd->req);
- nvmet_tcp_unmap_pdu_iovec(cmd);
- nvmet_tcp_free_cmd_buffers(cmd);
-}
-
static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
{
struct nvmet_tcp_cmd *cmd = queue->cmds;
@@ -1423,15 +1414,26 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
for (i = 0; i < queue->nr_cmds; i++, cmd++) {
if (nvmet_tcp_need_data_in(cmd))
nvmet_req_uninit(&cmd->req);
-
- nvmet_tcp_unmap_pdu_iovec(cmd);
- nvmet_tcp_free_cmd_buffers(cmd);
}
if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
/* failed in connect */
- nvmet_tcp_finish_cmd(&queue->connect);
+ nvmet_req_uninit(&queue->connect.req);
+ }
+}
+
+static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd = queue->cmds;
+ int i;
+
+ for (i = 0; i < queue->nr_cmds; i++, cmd++) {
+ if (nvmet_tcp_need_data_in(cmd))
+ nvmet_tcp_free_cmd_buffers(cmd);
}
+
+ if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
+ nvmet_tcp_free_cmd_buffers(&queue->connect);
}
static void nvmet_tcp_release_queue_work(struct work_struct *w)
@@ -1452,6 +1454,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_tcp_uninit_data_in_cmds(queue);
nvmet_sq_destroy(&queue->nvme_sq);
cancel_work_sync(&queue->io_work);
+ nvmet_tcp_free_cmd_data_in_buffers(queue);
sock_release(queue->sock);
nvmet_tcp_free_cmds(queue);
if (queue->hdr_digest || queue->data_digest)
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 835bfda86fcf..1254cf57e008 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -400,7 +400,6 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
{
struct block_device *bdev = req->ns->bdev;
unsigned int nr_zones = bdev_nr_zones(bdev);
- struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = NULL;
sector_t sector = 0;
int ret;
@@ -409,7 +408,7 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
};
d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
- GFP_NOIO, q->node);
+ GFP_NOIO, bdev->bd_disk->node_id);
if (!d.zbitmap) {
ret = -ENOMEM;
goto out;