summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/common/auth.c86
-rw-r--r--drivers/nvme/host/apple.c197
-rw-r--r--drivers/nvme/host/auth.c5
-rw-r--r--drivers/nvme/host/core.c23
-rw-r--r--drivers/nvme/host/fc.c10
-rw-r--r--drivers/nvme/host/ioctl.c7
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c184
-rw-r--r--drivers/nvme/host/tcp.c3
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fc.c35
-rw-r--r--drivers/nvme/target/fcloop.c8
12 files changed, 360 insertions, 215 deletions
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index 91e273b89fea..1f51fbebd9fa 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -684,6 +684,59 @@ out_free_enc:
EXPORT_SYMBOL_GPL(nvme_auth_generate_digest);
/**
+ * hkdf_expand_label - HKDF-Expand-Label (RFC 8846 section 7.1)
+ * @hmac_tfm: hash context keyed with pseudorandom key
+ * @label: ASCII label without "tls13 " prefix
+ * @labellen: length of @label
+ * @context: context bytes
+ * @contextlen: length of @context
+ * @okm: output keying material
+ * @okmlen: length of @okm
+ *
+ * Build the TLS 1.3 HkdfLabel structure and invoke hkdf_expand().
+ *
+ * Returns 0 on success with output keying material stored in @okm,
+ * or a negative errno value otherwise.
+ */
+static int hkdf_expand_label(struct crypto_shash *hmac_tfm,
+ const u8 *label, unsigned int labellen,
+ const u8 *context, unsigned int contextlen,
+ u8 *okm, unsigned int okmlen)
+{
+ int err;
+ u8 *info;
+ unsigned int infolen;
+ const char *tls13_prefix = "tls13 ";
+ unsigned int prefixlen = strlen(tls13_prefix);
+
+ if (WARN_ON(labellen > (255 - prefixlen)))
+ return -EINVAL;
+ if (WARN_ON(contextlen > 255))
+ return -EINVAL;
+
+ infolen = 2 + (1 + prefixlen + labellen) + (1 + contextlen);
+ info = kzalloc(infolen, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ /* HkdfLabel.Length */
+ put_unaligned_be16(okmlen, info);
+
+ /* HkdfLabel.Label */
+ info[2] = prefixlen + labellen;
+ memcpy(info + 3, tls13_prefix, prefixlen);
+ memcpy(info + 3 + prefixlen, label, labellen);
+
+ /* HkdfLabel.Context */
+ info[3 + prefixlen + labellen] = contextlen;
+ memcpy(info + 4 + prefixlen + labellen, context, contextlen);
+
+ err = hkdf_expand(hmac_tfm, info, infolen, okm, okmlen);
+ kfree_sensitive(info);
+ return err;
+}
+
+/**
* nvme_auth_derive_tls_psk - Derive TLS PSK
* @hmac_id: Hash function identifier
* @psk: generated input PSK
@@ -715,10 +768,10 @@ int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
{
struct crypto_shash *hmac_tfm;
const char *hmac_name;
- const char *psk_prefix = "tls13 nvme-tls-psk";
+ const char *label = "nvme-tls-psk";
static const char default_salt[HKDF_MAX_HASHLEN];
- size_t info_len, prk_len;
- char *info;
+ size_t prk_len;
+ const char *ctx;
unsigned char *prk, *tls_key;
int ret;
@@ -758,36 +811,29 @@ int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
if (ret)
goto out_free_prk;
- /*
- * 2 additional bytes for the length field from HDKF-Expand-Label,
- * 2 additional bytes for the HMAC ID, and one byte for the space
- * separator.
- */
- info_len = strlen(psk_digest) + strlen(psk_prefix) + 5;
- info = kzalloc(info_len + 1, GFP_KERNEL);
- if (!info) {
+ ctx = kasprintf(GFP_KERNEL, "%02d %s", hmac_id, psk_digest);
+ if (!ctx) {
ret = -ENOMEM;
goto out_free_prk;
}
- put_unaligned_be16(psk_len, info);
- memcpy(info + 2, psk_prefix, strlen(psk_prefix));
- sprintf(info + 2 + strlen(psk_prefix), "%02d %s", hmac_id, psk_digest);
-
tls_key = kzalloc(psk_len, GFP_KERNEL);
if (!tls_key) {
ret = -ENOMEM;
- goto out_free_info;
+ goto out_free_ctx;
}
- ret = hkdf_expand(hmac_tfm, info, info_len, tls_key, psk_len);
+ ret = hkdf_expand_label(hmac_tfm,
+ label, strlen(label),
+ ctx, strlen(ctx),
+ tls_key, psk_len);
if (ret) {
kfree(tls_key);
- goto out_free_info;
+ goto out_free_ctx;
}
*ret_psk = tls_key;
-out_free_info:
- kfree(info);
+out_free_ctx:
+ kfree(ctx);
out_free_prk:
kfree(prk);
out_free_shash:
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 1286c31320e6..f35d3f71d14f 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -35,7 +35,6 @@
#include "nvme.h"
#define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC
-#define APPLE_ANS_MAX_QUEUE_DEPTH 64
#define APPLE_ANS_COPROC_CPU_CONTROL 0x44
#define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
@@ -75,6 +74,8 @@
#define APPLE_NVME_AQ_DEPTH 2
#define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
+#define APPLE_NVME_IOSQES 7
+
/*
* These can be higher, but we need to ensure that any command doesn't
* require an sg allocation that needs more than a page of data.
@@ -142,6 +143,7 @@ struct apple_nvme_queue {
u32 __iomem *sq_db;
u32 __iomem *cq_db;
+ u16 sq_tail;
u16 cq_head;
u8 cq_phase;
@@ -166,11 +168,17 @@ struct apple_nvme_iod {
struct scatterlist *sg;
};
+struct apple_nvme_hw {
+ bool has_lsq_nvmmu;
+ u32 max_queue_depth;
+};
+
struct apple_nvme {
struct device *dev;
void __iomem *mmio_coproc;
void __iomem *mmio_nvme;
+ const struct apple_nvme_hw *hw;
struct device **pd_dev;
struct device_link **pd_link;
@@ -215,10 +223,12 @@ static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
{
- if (q->is_adminq)
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+
+ if (q->is_adminq && anv->hw->has_lsq_nvmmu)
return APPLE_NVME_AQ_DEPTH;
- return APPLE_ANS_MAX_QUEUE_DEPTH;
+ return anv->hw->max_queue_depth;
}
static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
@@ -280,7 +290,28 @@ static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
"NVMMU TCB invalidation failed\n");
}
-static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
+static void apple_nvme_submit_cmd_t8015(struct apple_nvme_queue *q,
+ struct nvme_command *cmd)
+{
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+
+ spin_lock_irq(&anv->lock);
+
+ if (q->is_adminq)
+ memcpy(&q->sqes[q->sq_tail], cmd, sizeof(*cmd));
+ else
+ memcpy((void *)q->sqes + (q->sq_tail << APPLE_NVME_IOSQES),
+ cmd, sizeof(*cmd));
+
+ if (++q->sq_tail == anv->hw->max_queue_depth)
+ q->sq_tail = 0;
+
+ writel(q->sq_tail, q->sq_db);
+ spin_unlock_irq(&anv->lock);
+}
+
+
+static void apple_nvme_submit_cmd_t8103(struct apple_nvme_queue *q,
struct nvme_command *cmd)
{
struct apple_nvme *anv = queue_to_apple_nvme(q);
@@ -590,7 +621,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
__u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
- apple_nvmmu_inval(q, command_id);
+ if (anv->hw->has_lsq_nvmmu)
+ apple_nvmmu_inval(q, command_id);
req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
if (unlikely(!req)) {
@@ -685,7 +717,7 @@ static int apple_nvme_create_cq(struct apple_nvme *anv)
c.create_cq.opcode = nvme_admin_create_cq;
c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
c.create_cq.cqid = cpu_to_le16(1);
- c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+ c.create_cq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1);
c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
c.create_cq.irq_vector = cpu_to_le16(0);
@@ -713,7 +745,7 @@ static int apple_nvme_create_sq(struct apple_nvme *anv)
c.create_sq.opcode = nvme_admin_create_sq;
c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
c.create_sq.sqid = cpu_to_le16(1);
- c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+ c.create_sq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1);
c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
c.create_sq.cqid = cpu_to_le16(1);
@@ -765,7 +797,12 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
nvme_start_request(req);
- apple_nvme_submit_cmd(q, cmnd);
+
+ if (anv->hw->has_lsq_nvmmu)
+ apple_nvme_submit_cmd_t8103(q, cmnd);
+ else
+ apple_nvme_submit_cmd_t8015(q, cmnd);
+
return BLK_STS_OK;
out_free_cmd:
@@ -970,11 +1007,13 @@ static const struct blk_mq_ops apple_nvme_mq_ops = {
static void apple_nvme_init_queue(struct apple_nvme_queue *q)
{
unsigned int depth = apple_nvme_queue_depth(q);
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
q->cq_head = 0;
q->cq_phase = 1;
- memset(q->tcbs, 0,
- APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb));
+ if (anv->hw->has_lsq_nvmmu)
+ memset(q->tcbs, 0, anv->hw->max_queue_depth
+ * sizeof(struct apple_nvmmu_tcb));
memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
WRITE_ONCE(q->enabled, true);
wmb(); /* ensure the first interrupt sees the initialization */
@@ -1069,49 +1108,55 @@ static void apple_nvme_reset_work(struct work_struct *work)
dma_set_max_seg_size(anv->dev, 0xffffffff);
- /*
- * Enable NVMMU and linear submission queues.
- * While we could keep those disabled and pretend this is slightly
- * more common NVMe controller we'd still need some quirks (e.g.
- * sq entries will be 128 bytes) and Apple might drop support for
- * that mode in the future.
- */
- writel(APPLE_ANS_LINEAR_SQ_EN,
- anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
+ if (anv->hw->has_lsq_nvmmu) {
+ /*
+ * Enable NVMMU and linear submission queues which is required
+ * since T6000.
+ */
+ writel(APPLE_ANS_LINEAR_SQ_EN,
+ anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
- /* Allow as many pending command as possible for both queues */
- writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16),
- anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL);
+ /* Allow as many pending command as possible for both queues */
+ writel(anv->hw->max_queue_depth
+ | (anv->hw->max_queue_depth << 16), anv->mmio_nvme
+ + APPLE_ANS_MAX_PEND_CMDS_CTRL);
- /* Setup the NVMMU for the maximum admin and IO queue depth */
- writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1,
- anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
+ /* Setup the NVMMU for the maximum admin and IO queue depth */
+ writel(anv->hw->max_queue_depth - 1,
+ anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
- /*
- * This is probably a chicken bit: without it all commands where any PRP
- * is set to zero (including those that don't use that field) fail and
- * the co-processor complains about "completed with err BAD_CMD-" or
- * a "NULL_PRP_PTR_ERR" in the syslog
- */
- writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
- ~APPLE_ANS_PRP_NULL_CHECK,
- anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
+ /*
+ * This is probably a chicken bit: without it all commands
+ * where any PRP is set to zero (including those that don't use
+ * that field) fail and the co-processor complains about
+ * "completed with err BAD_CMD-" or a "NULL_PRP_PTR_ERR" in the
+ * syslog
+ */
+ writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
+ ~APPLE_ANS_PRP_NULL_CHECK,
+ anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
+ }
/* Setup the admin queue */
- aqa = APPLE_NVME_AQ_DEPTH - 1;
+ if (anv->hw->has_lsq_nvmmu)
+ aqa = APPLE_NVME_AQ_DEPTH - 1;
+ else
+ aqa = anv->hw->max_queue_depth - 1;
aqa |= aqa << 16;
writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
- /* Setup NVMMU for both queues */
- writeq(anv->adminq.tcb_dma_addr,
- anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
- writeq(anv->ioq.tcb_dma_addr,
- anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
+ if (anv->hw->has_lsq_nvmmu) {
+ /* Setup NVMMU for both queues */
+ writeq(anv->adminq.tcb_dma_addr,
+ anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
+ writeq(anv->ioq.tcb_dma_addr,
+ anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
+ }
anv->ctrl.sqsize =
- APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */
+ anv->hw->max_queue_depth - 1; /* 0's based queue depth */
anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
dev_dbg(anv->dev, "Enabling controller now");
@@ -1282,8 +1327,9 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
* both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
* must be marked as reserved in the IO queue.
*/
- anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
- anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1;
+ if (anv->hw->has_lsq_nvmmu)
+ anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
+ anv->tagset.queue_depth = anv->hw->max_queue_depth - 1;
anv->tagset.timeout = NVME_IO_TIMEOUT;
anv->tagset.numa_node = NUMA_NO_NODE;
anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
@@ -1307,6 +1353,7 @@ static int apple_nvme_queue_alloc(struct apple_nvme *anv,
struct apple_nvme_queue *q)
{
unsigned int depth = apple_nvme_queue_depth(q);
+ size_t iosq_size;
q->cqes = dmam_alloc_coherent(anv->dev,
depth * sizeof(struct nvme_completion),
@@ -1314,22 +1361,28 @@ static int apple_nvme_queue_alloc(struct apple_nvme *anv,
if (!q->cqes)
return -ENOMEM;
- q->sqes = dmam_alloc_coherent(anv->dev,
- depth * sizeof(struct nvme_command),
+ if (anv->hw->has_lsq_nvmmu)
+ iosq_size = depth * sizeof(struct nvme_command);
+ else
+ iosq_size = depth << APPLE_NVME_IOSQES;
+
+ q->sqes = dmam_alloc_coherent(anv->dev, iosq_size,
&q->sq_dma_addr, GFP_KERNEL);
if (!q->sqes)
return -ENOMEM;
- /*
- * We need the maximum queue depth here because the NVMMU only has a
- * single depth configuration shared between both queues.
- */
- q->tcbs = dmam_alloc_coherent(anv->dev,
- APPLE_ANS_MAX_QUEUE_DEPTH *
- sizeof(struct apple_nvmmu_tcb),
- &q->tcb_dma_addr, GFP_KERNEL);
- if (!q->tcbs)
- return -ENOMEM;
+ if (anv->hw->has_lsq_nvmmu) {
+ /*
+ * We need the maximum queue depth here because the NVMMU only
+ * has a single depth configuration shared between both queues.
+ */
+ q->tcbs = dmam_alloc_coherent(anv->dev,
+ anv->hw->max_queue_depth *
+ sizeof(struct apple_nvmmu_tcb),
+ &q->tcb_dma_addr, GFP_KERNEL);
+ if (!q->tcbs)
+ return -ENOMEM;
+ }
/*
* initialize phase to make sure the allocated and empty memory
@@ -1413,6 +1466,12 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
anv->adminq.is_adminq = true;
platform_set_drvdata(pdev, anv);
+ anv->hw = of_device_get_match_data(&pdev->dev);
+ if (!anv->hw) {
+ ret = -ENODEV;
+ goto put_dev;
+ }
+
ret = apple_nvme_attach_genpd(anv);
if (ret < 0) {
dev_err_probe(dev, ret, "Failed to attach power domains");
@@ -1444,10 +1503,17 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
goto put_dev;
}
- anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
- anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
- anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
- anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+ if (anv->hw->has_lsq_nvmmu) {
+ anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
+ anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
+ anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
+ anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+ } else {
+ anv->adminq.sq_db = anv->mmio_nvme + NVME_REG_DBS;
+ anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
+ anv->ioq.sq_db = anv->mmio_nvme + NVME_REG_DBS + 8;
+ anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+ }
anv->sart = devm_apple_sart_get(dev);
if (IS_ERR(anv->sart)) {
@@ -1625,8 +1691,19 @@ static int apple_nvme_suspend(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
apple_nvme_resume);
+static const struct apple_nvme_hw apple_nvme_t8015_hw = {
+ .has_lsq_nvmmu = false,
+ .max_queue_depth = 16,
+};
+
+static const struct apple_nvme_hw apple_nvme_t8103_hw = {
+ .has_lsq_nvmmu = true,
+ .max_queue_depth = 64,
+};
+
static const struct of_device_id apple_nvme_of_match[] = {
- { .compatible = "apple,nvme-ans2" },
+ { .compatible = "apple,t8015-nvme-ans2", .data = &apple_nvme_t8015_hw },
+ { .compatible = "apple,nvme-ans2", .data = &apple_nvme_t8103_hw },
{},
};
MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 201fc8809a62..012fcfc79a73 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -331,9 +331,10 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
} else {
memset(chap->c2, 0, chap->hash_len);
}
- if (ctrl->opts->concat)
+ if (ctrl->opts->concat) {
chap->s2 = 0;
- else
+ chap->bi_directional = false;
+ } else
chap->s2 = nvme_auth_get_seqnum();
data->seqnum = cpu_to_le32(chap->s2);
if (chap->host_key_len) {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 6b7493934535..fa4181d7de73 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1807,12 +1807,12 @@ static void nvme_release(struct gendisk *disk)
nvme_ns_release(disk->private_data);
}
-int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+int nvme_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
/* some standard values */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
- geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
+ geo->cylinders = get_capacity(disk) >> 11;
return 0;
}
@@ -3167,6 +3167,11 @@ static inline bool nvme_admin_ctrl(struct nvme_ctrl *ctrl)
return ctrl->cntrltype == NVME_CTRL_ADMIN;
}
+static inline bool nvme_is_io_ctrl(struct nvme_ctrl *ctrl)
+{
+ return !nvme_discovery_ctrl(ctrl) && !nvme_admin_ctrl(ctrl);
+}
+
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@@ -3369,7 +3374,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
else
ctrl->max_zeroes_sectors = 0;
- if (ctrl->subsys->subtype != NVME_NQN_NVME ||
+ if (!nvme_is_io_ctrl(ctrl) ||
!nvme_id_cns_ok(ctrl, NVME_ID_CNS_CS_CTRL) ||
test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
return 0;
@@ -3491,14 +3496,14 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct
return -EINVAL;
}
- if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) {
+ if (nvme_is_io_ctrl(ctrl) && ctrl->ioccsz < 4) {
dev_err(ctrl->device,
"I/O queue command capsule supported size %d < 4\n",
ctrl->ioccsz);
return -EINVAL;
}
- if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) {
+ if (nvme_is_io_ctrl(ctrl) && ctrl->iorcsz < 1) {
dev_err(ctrl->device,
"I/O queue response capsule supported size %d < 1\n",
ctrl->iorcsz);
@@ -4990,8 +4995,14 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
* checking that they started once before, hence are reconnecting back.
*/
if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
- nvme_discovery_ctrl(ctrl))
+ nvme_discovery_ctrl(ctrl)) {
+ if (!ctrl->kato) {
+ nvme_stop_keep_alive(ctrl);
+ ctrl->kato = NVME_DEFAULT_KATO;
+ nvme_start_keep_alive(ctrl);
+ }
nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
+ }
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 3e12d4683ac7..03987f497a5b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3032,11 +3032,17 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
++ctrl->ctrl.nr_reconnects;
- if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
+ spin_lock_irqsave(&ctrl->rport->lock, flags);
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
return -ENODEV;
+ }
- if (nvme_fc_ctlr_active_on_rport(ctrl))
+ if (nvme_fc_ctlr_active_on_rport(ctrl)) {
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
return -ENOTUNIQ;
+ }
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: create association : host wwpn 0x%016llx "
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 6b3ac8ae3f34..c212fa952c0f 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -142,14 +142,9 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
0, rq_data_dir(req));
-
if (ret)
return ret;
- bio = req->bio;
- if (bdev)
- bio_set_dev(bio, bdev);
-
if (has_metadata) {
ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len);
if (ret)
@@ -410,7 +405,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
- io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
+ io_uring_cmd_done32(ioucmd, pdu->status, pdu->result, issue_flags);
}
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cfd2b5b90b91..102fae6a231c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -936,7 +936,7 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_id_ns **id);
-int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
+int nvme_getgeo(struct gendisk *disk, struct hd_geometry *geo);
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
extern const struct attribute_group *nvme_ns_attr_groups[];
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 2c6d9506b172..c916176bd9f0 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -172,9 +172,7 @@ struct nvme_dev {
u32 last_ps;
bool hmb;
struct sg_table *hmb_sgt;
-
mempool_t *dmavec_mempool;
- mempool_t *iod_meta_mempool;
/* shadow doorbell buffer support: */
__le32 *dbbuf_dbs;
@@ -261,6 +259,9 @@ enum nvme_iod_flags {
/* single segment dma mapping */
IOD_SINGLE_SEGMENT = 1U << 2,
+
+ /* Metadata using non-coalesced MPTR */
+ IOD_SINGLE_META_SEGMENT = 1U << 5,
};
struct nvme_dma_vec {
@@ -284,7 +285,8 @@ struct nvme_iod {
unsigned int nr_dma_vecs;
dma_addr_t meta_dma;
- struct sg_table meta_sgt;
+ unsigned int meta_total_len;
+ struct dma_iova_state meta_dma_state;
struct nvme_sgl_desc *meta_descriptor;
};
@@ -641,6 +643,11 @@ static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq,
return nvmeq->descriptor_pools.large;
}
+static inline bool nvme_pci_cmd_use_meta_sgl(struct nvme_command *cmd)
+{
+ return (cmd->common.flags & NVME_CMD_SGL_ALL) == NVME_CMD_SGL_METASEG;
+}
+
static inline bool nvme_pci_cmd_use_sgl(struct nvme_command *cmd)
{
return cmd->common.flags &
@@ -690,25 +697,52 @@ static void nvme_free_prps(struct request *req)
mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool);
}
-static void nvme_free_sgls(struct request *req)
+static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
+ struct nvme_sgl_desc *sg_list)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ enum dma_data_direction dir = rq_dma_dir(req);
+ unsigned int len = le32_to_cpu(sge->length);
struct device *dma_dev = nvmeq->dev->dev;
- dma_addr_t sqe_dma_addr = le64_to_cpu(iod->cmd.common.dptr.sgl.addr);
- unsigned int sqe_dma_len = le32_to_cpu(iod->cmd.common.dptr.sgl.length);
- struct nvme_sgl_desc *sg_list = iod->descriptors[0];
+ unsigned int i;
+
+ if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
+ dma_unmap_page(dma_dev, le64_to_cpu(sge->addr), len, dir);
+ return;
+ }
+
+ for (i = 0; i < len / sizeof(*sg_list); i++)
+ dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr),
+ le32_to_cpu(sg_list[i].length), dir);
+}
+
+static void nvme_unmap_metadata(struct request *req)
+{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
enum dma_data_direction dir = rq_dma_dir(req);
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct device *dma_dev = nvmeq->dev->dev;
+ struct nvme_sgl_desc *sge = iod->meta_descriptor;
- if (iod->nr_descriptors) {
- unsigned int nr_entries = sqe_dma_len / sizeof(*sg_list), i;
+ if (iod->flags & IOD_SINGLE_META_SEGMENT) {
+ dma_unmap_page(dma_dev, iod->meta_dma,
+ rq_integrity_vec(req).bv_len,
+ rq_dma_dir(req));
+ return;
+ }
- for (i = 0; i < nr_entries; i++)
- dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr),
- le32_to_cpu(sg_list[i].length), dir);
- } else {
- dma_unmap_page(dma_dev, sqe_dma_addr, sqe_dma_len, dir);
+ if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state,
+ iod->meta_total_len)) {
+ if (nvme_pci_cmd_use_meta_sgl(&iod->cmd))
+ nvme_free_sgls(req, sge, &sge[1]);
+ else
+ dma_unmap_page(dma_dev, iod->meta_dma,
+ iod->meta_total_len, dir);
}
+
+ if (iod->meta_descriptor)
+ dma_pool_free(nvmeq->descriptor_pools.small,
+ iod->meta_descriptor, iod->meta_dma);
}
static void nvme_unmap_data(struct request *req)
@@ -727,7 +761,8 @@ static void nvme_unmap_data(struct request *req)
if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) {
if (nvme_pci_cmd_use_sgl(&iod->cmd))
- nvme_free_sgls(req);
+ nvme_free_sgls(req, iod->descriptors[0],
+ &iod->cmd.common.dptr.sgl);
else
nvme_free_prps(req);
}
@@ -1007,70 +1042,70 @@ static blk_status_t nvme_map_data(struct request *req)
return nvme_pci_setup_data_prp(req, &iter);
}
-static void nvme_pci_sgl_set_data_sg(struct nvme_sgl_desc *sge,
- struct scatterlist *sg)
-{
- sge->addr = cpu_to_le64(sg_dma_address(sg));
- sge->length = cpu_to_le32(sg_dma_len(sg));
- sge->type = NVME_SGL_FMT_DATA_DESC << 4;
-}
-
static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
- struct nvme_dev *dev = nvmeq->dev;
+ unsigned int entries = req->nr_integrity_segments;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_dev *dev = nvmeq->dev;
struct nvme_sgl_desc *sg_list;
- struct scatterlist *sgl, *sg;
- unsigned int entries;
+ struct blk_dma_iter iter;
dma_addr_t sgl_dma;
- int rc, i;
+ int i = 0;
- iod->meta_sgt.sgl = mempool_alloc(dev->iod_meta_mempool, GFP_ATOMIC);
- if (!iod->meta_sgt.sgl)
- return BLK_STS_RESOURCE;
+ if (!blk_rq_integrity_dma_map_iter_start(req, dev->dev,
+ &iod->meta_dma_state, &iter))
+ return iter.status;
- sg_init_table(iod->meta_sgt.sgl, req->nr_integrity_segments);
- iod->meta_sgt.orig_nents = blk_rq_map_integrity_sg(req,
- iod->meta_sgt.sgl);
- if (!iod->meta_sgt.orig_nents)
- goto out_free_sg;
+ if (blk_rq_dma_map_coalesce(&iod->meta_dma_state))
+ entries = 1;
- rc = dma_map_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req),
- DMA_ATTR_NO_WARN);
- if (rc)
- goto out_free_sg;
+ /*
+ * The NVMe MPTR descriptor has an implicit length that the host and
+ * device must agree on to avoid data/memory corruption. We trust the
+ * kernel allocated correctly based on the format's parameters, so use
+ * the more efficient MPTR to avoid extra dma pool allocations for the
+ * SGL indirection.
+ *
+ * But for user commands, we don't necessarily know what they do, so
+ * the driver can't validate the metadata buffer size. The SGL
+ * descriptor provides an explicit length, so we're relying on that
+ * mechanism to catch any misunderstandings between the application and
+ * device.
+ */
+ if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) {
+ iod->cmd.common.metadata = cpu_to_le64(iter.addr);
+ iod->meta_total_len = iter.len;
+ iod->meta_dma = iter.addr;
+ iod->meta_descriptor = NULL;
+ return BLK_STS_OK;
+ }
sg_list = dma_pool_alloc(nvmeq->descriptor_pools.small, GFP_ATOMIC,
&sgl_dma);
if (!sg_list)
- goto out_unmap_sg;
+ return BLK_STS_RESOURCE;
- entries = iod->meta_sgt.nents;
iod->meta_descriptor = sg_list;
iod->meta_dma = sgl_dma;
-
iod->cmd.common.flags = NVME_CMD_SGL_METASEG;
iod->cmd.common.metadata = cpu_to_le64(sgl_dma);
-
- sgl = iod->meta_sgt.sgl;
if (entries == 1) {
- nvme_pci_sgl_set_data_sg(sg_list, sgl);
+ iod->meta_total_len = iter.len;
+ nvme_pci_sgl_set_data(sg_list, &iter);
return BLK_STS_OK;
}
sgl_dma += sizeof(*sg_list);
- nvme_pci_sgl_set_seg(sg_list, sgl_dma, entries);
- for_each_sg(sgl, sg, entries, i)
- nvme_pci_sgl_set_data_sg(&sg_list[i + 1], sg);
-
- return BLK_STS_OK;
+ do {
+ nvme_pci_sgl_set_data(&sg_list[++i], &iter);
+ iod->meta_total_len += iter.len;
+ } while (blk_rq_integrity_dma_map_iter_next(req, dev->dev, &iter));
-out_unmap_sg:
- dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0);
-out_free_sg:
- mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool);
- return BLK_STS_RESOURCE;
+ nvme_pci_sgl_set_seg(sg_list, sgl_dma, i);
+ if (unlikely(iter.status))
+ nvme_unmap_metadata(req);
+ return iter.status;
}
static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
@@ -1083,6 +1118,7 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
iod->cmd.common.metadata = cpu_to_le64(iod->meta_dma);
+ iod->flags |= IOD_SINGLE_META_SEGMENT;
return BLK_STS_OK;
}
@@ -1104,7 +1140,7 @@ static blk_status_t nvme_prep_rq(struct request *req)
iod->flags = 0;
iod->nr_descriptors = 0;
iod->total_len = 0;
- iod->meta_sgt.nents = 0;
+ iod->meta_total_len = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)
@@ -1215,25 +1251,6 @@ static void nvme_queue_rqs(struct rq_list *rqlist)
*rqlist = requeue_list;
}
-static __always_inline void nvme_unmap_metadata(struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
- struct nvme_dev *dev = nvmeq->dev;
-
- if (!iod->meta_sgt.nents) {
- dma_unmap_page(dev->dev, iod->meta_dma,
- rq_integrity_vec(req).bv_len,
- rq_dma_dir(req));
- return;
- }
-
- dma_pool_free(nvmeq->descriptor_pools.small, iod->meta_descriptor,
- iod->meta_dma);
- dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0);
- mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool);
-}
-
static __always_inline void nvme_pci_unmap_rq(struct request *req)
{
if (blk_integrity_rq(req))
@@ -3039,7 +3056,6 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
{
- size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1);
size_t alloc_size = sizeof(struct nvme_dma_vec) * NVME_MAX_SEGS;
dev->dmavec_mempool = mempool_create_node(1,
@@ -3048,17 +3064,7 @@ static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
dev_to_node(dev->dev));
if (!dev->dmavec_mempool)
return -ENOMEM;
-
- dev->iod_meta_mempool = mempool_create_node(1,
- mempool_kmalloc, mempool_kfree,
- (void *)meta_size, GFP_KERNEL,
- dev_to_node(dev->dev));
- if (!dev->iod_meta_mempool)
- goto free;
return 0;
-free:
- mempool_destroy(dev->dmavec_mempool);
- return -ENOMEM;
}
static void nvme_free_tagset(struct nvme_dev *dev)
@@ -3324,10 +3330,12 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
* Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
* because of high power consumption (> 2 Watt) in s2idle
* sleep. Only some boards with Intel CPU are affected.
+ * (Note for testing: Samsung 990 Evo Plus has same PCI ID)
*/
if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
+ dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
@@ -3508,7 +3516,6 @@ out_disable:
nvme_free_queues(dev, 0);
out_release_iod_mempool:
mempool_destroy(dev->dmavec_mempool);
- mempool_destroy(dev->iod_meta_mempool);
out_dev_unmap:
nvme_dev_unmap(dev);
out_uninit_ctrl:
@@ -3572,7 +3579,6 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_dbbuf_dma_free(dev);
nvme_free_queues(dev, 0);
mempool_destroy(dev->dmavec_mempool);
- mempool_destroy(dev->iod_meta_mempool);
nvme_release_descriptor_pools(dev);
nvme_dev_unmap(dev);
nvme_uninit_ctrl(&dev->ctrl);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index c0fe8cfb7229..1413788ca7d5 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2250,6 +2250,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
if (error)
goto out_cleanup_tagset;
+ if (ctrl->opts->concat && !ctrl->tls_pskid)
+ return 0;
+
error = nvme_enable_ctrl(ctrl);
if (error)
goto out_stop_queue;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 0dd7bd99afa3..5d7d483bfbe3 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -513,9 +513,6 @@ static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
return 0;
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct nvmet_ns *ns)
{
@@ -523,6 +520,8 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct pci_dev *p2p_dev;
int ret;
+ lockdep_assert_held(&ctrl->subsys->lock);
+
if (!ctrl->p2p_client || !ns->use_p2pmem)
return;
@@ -1539,15 +1538,14 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
return false;
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
struct device *p2p_client)
{
struct nvmet_ns *ns;
unsigned long idx;
+ lockdep_assert_held(&ctrl->subsys->lock);
+
if (!p2p_client)
return;
@@ -1557,14 +1555,13 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
{
struct radix_tree_iter iter;
void __rcu **slot;
+ lockdep_assert_held(&ctrl->subsys->lock);
+
radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
pci_dev_put(radix_tree_deref_slot(slot));
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index a9b18c051f5b..7d84527d5a43 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -54,6 +54,8 @@ struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
int ls_error;
struct list_head lsreq_list; /* tgtport->ls_req_list */
bool req_queued;
+
+ struct work_struct put_work;
};
@@ -111,8 +113,6 @@ struct nvmet_fc_tgtport {
struct nvmet_fc_port_entry *pe;
struct kref ref;
u32 max_sg_cnt;
-
- struct work_struct put_work;
};
struct nvmet_fc_port_entry {
@@ -235,12 +235,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
-static void nvmet_fc_put_tgtport_work(struct work_struct *work)
+static void nvmet_fc_put_lsop_work(struct work_struct *work)
{
- struct nvmet_fc_tgtport *tgtport =
- container_of(work, struct nvmet_fc_tgtport, put_work);
+ struct nvmet_fc_ls_req_op *lsop =
+ container_of(work, struct nvmet_fc_ls_req_op, put_work);
- nvmet_fc_tgtport_put(tgtport);
+ nvmet_fc_tgtport_put(lsop->tgtport);
+ kfree(lsop);
}
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
@@ -367,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
DMA_BIDIRECTIONAL);
out_putwork:
- queue_work(nvmet_wq, &tgtport->put_work);
+ queue_work(nvmet_wq, &lsop->put_work);
}
static int
@@ -388,6 +389,7 @@ __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
lsreq->done = done;
lsop->req_queued = false;
INIT_LIST_HEAD(&lsop->lsreq_list);
+ INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work);
lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
lsreq->rqstlen + lsreq->rsplen,
@@ -447,8 +449,6 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
__nvmet_fc_finish_ls_req(lsop);
/* fc-nvme target doesn't care about success or failure of cmd */
-
- kfree(lsop);
}
/*
@@ -1075,6 +1075,14 @@ nvmet_fc_delete_assoc_work(struct work_struct *work)
static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
+ int terminating;
+
+ terminating = atomic_xchg(&assoc->terminating, 1);
+
+ /* if already terminating, do nothing */
+ if (terminating)
+ return;
+
nvmet_fc_tgtport_get(assoc->tgtport);
if (!queue_work(nvmet_wq, &assoc->del_work))
nvmet_fc_tgtport_put(assoc->tgtport);
@@ -1202,13 +1210,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
unsigned long flags;
- int i, terminating;
-
- terminating = atomic_xchg(&assoc->terminating, 1);
-
- /* if already terminating, do nothing */
- if (terminating)
- return;
+ int i;
spin_lock_irqsave(&tgtport->lock, flags);
list_del_rcu(&assoc->a_list);
@@ -1410,7 +1412,6 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
- INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 257b497d515a..5dffcc5becae 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -496,13 +496,15 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
if (!targetport) {
/*
* The target port is gone. The target doesn't expect any
- * response anymore and the ->done call is not valid
- * because the resources have been freed by
- * nvmet_fc_free_pending_reqs.
+ * response anymore and thus lsreq can't be accessed anymore.
*
* We end up here from delete association exchange:
* nvmet_fc_xmt_disconnect_assoc sends an async request.
+ *
+ * Return success because this is what LLDDs do; silently
+ * drop the response.
*/
+ lsrsp->done(lsrsp);
kmem_cache_free(lsreq_cache, tls_req);
return 0;
}