summaryrefslogtreecommitdiff
path: root/drivers/nvme/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c6
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvme/target/passthru.c4
-rw-r--r--drivers/nvme/target/pci-epf.c25
-rw-r--r--drivers/nvme/target/tcp.c4
-rw-r--r--drivers/nvme/target/zns.c2
7 files changed, 29 insertions, 16 deletions
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 175c5b6d4dd5..884286f90688 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -581,8 +581,6 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
if (ns->enabled)
goto out_unlock;
- ret = -EMFILE;
-
ret = nvmet_bdev_ns_enable(ns);
if (ret == -ENOTBLK)
ret = nvmet_file_ns_enable(ns);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index eba42df2f821..8d246b8ca604 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -46,6 +46,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */
id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
+
+ /* Set WZDS and DRB if device supports unmapped write zeroes */
+ if (bdev_write_zeroes_unmap_sectors(bdev))
+ id->dlfeat = (1 << 3) | 0x1;
}
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
@@ -65,7 +69,7 @@ static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
return;
if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) {
- ns->metadata_size = bi->tuple_size;
+ ns->metadata_size = bi->metadata_size;
if (bi->flags & BLK_INTEGRITY_REF_TAG)
ns->pi_type = NVME_NS_DPS_PI_TYPE1;
else
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index df69a9dee71c..51df72f5e89b 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -867,6 +867,8 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
{
if (bio != &req->b.inline_bio)
bio_put(bio);
+ else
+ bio_uninit(bio);
}
#ifdef CONFIG_NVME_TARGET_TCP_TLS
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index b7515c53829b..3b4b0df8f879 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -106,7 +106,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
pctrl->max_hw_sectors);
/*
- * nvmet_passthru_map_sg is limitted to using a single bio so limit
+ * nvmet_passthru_map_sg is limited to using a single bio so limit
* the mdts based on BIO_MAX_VECS as well
*/
max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT,
@@ -147,7 +147,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
* When passthru controller is setup using nvme-loop transport it will
* export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
* the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
- * code path with duplicate ctr subsynqn. In order to prevent that we
+ * code path with duplicate ctrl subsysnqn. In order to prevent that we
* mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
*/
memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index a4295a5b8d28..2e78397a7373 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -1242,8 +1242,11 @@ static void nvmet_pci_epf_queue_response(struct nvmet_req *req)
iod->status = le16_to_cpu(req->cqe->status) >> 1;
- /* If we have no data to transfer, directly complete the command. */
- if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
+ /*
+ * If the command failed or we have no data to transfer, complete the
+ * command immediately.
+ */
+ if (iod->status || !iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
nvmet_pci_epf_complete_iod(iod);
return;
}
@@ -1604,8 +1607,13 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
goto complete;
}
+ /*
+ * If nvmet_req_init() fails (e.g., unsupported opcode) it will call
+ * __nvmet_req_complete() internally which will call
+ * nvmet_pci_epf_queue_response() and will complete the command directly.
+ */
if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops))
- goto complete;
+ return;
iod->data_len = nvmet_req_transfer_len(req);
if (iod->data_len) {
@@ -1643,10 +1651,11 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
wait_for_completion(&iod->done);
- if (iod->status == NVME_SC_SUCCESS) {
- WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
- nvmet_pci_epf_transfer_iod_data(iod);
- }
+ if (iod->status != NVME_SC_SUCCESS)
+ return;
+
+ WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
+ nvmet_pci_epf_transfer_iod_data(iod);
complete:
nvmet_pci_epf_complete_iod(iod);
@@ -1860,7 +1869,7 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc);
if (ctrl->io_cqes < sizeof(struct nvme_completion)) {
dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n",
- ctrl->io_sqes, sizeof(struct nvme_completion));
+ ctrl->io_cqes, sizeof(struct nvme_completion));
goto err;
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 688033b88d38..470bf37e5a63 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1928,10 +1928,10 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
struct sock *sk = queue->sock->sk;
/* Restore the default callbacks before starting upcall */
- read_lock_bh(&sk->sk_callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
sk->sk_user_data = NULL;
sk->sk_data_ready = port->data_ready;
- read_unlock_bh(&sk->sk_callback_lock);
+ write_unlock_bh(&sk->sk_callback_lock);
if (!nvmet_tcp_try_peek_pdu(queue)) {
if (!nvmet_tcp_tls_handshake(queue))
return;
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 29a60fabfcc8..15a579cf528c 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -541,7 +541,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
struct bio *bio;
int sg_cnt;
- /* Request is completed on len mismatch in nvmet_check_transter_len() */
+ /* Request is completed on len mismatch in nvmet_check_transfer_len() */
if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
return;