summaryrefslogtreecommitdiff
path: root/drivers/nvme/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-15 21:20:52 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-15 21:20:52 -0700
commit9637d517347e80ee2fe1c5d8ce45ba1b88d8b5cd (patch)
tree3cee2a1d8b3c6ea466924517307a1f98ada1e92f /drivers/nvme/target
parent273cbf61c3ddee9574ef1f4959b9bc6db5b24271 (diff)
parent787c79d6393fc028887cc1b6066915f0b094e92f (diff)
Merge tag 'for-linus-20190715' of git://git.kernel.dk/linux-block
Pull more block updates from Jens Axboe: "A later pull request with some followup items. I had some vacation coming up to the merge window, so certain things items were delayed a bit. This pull request also contains fixes that came in within the last few days of the merge window, which I didn't want to push right before sending you a pull request. This contains: - NVMe pull request, mostly fixes, but also a few minor items on the feature side that were timing constrained (Christoph et al) - Report zones fixes (Damien) - Removal of dead code (Damien) - Turn on cgroup psi memstall (Josef) - block cgroup MAINTAINERS entry (Konstantin) - Flush init fix (Josef) - blk-throttle low iops timing fix (Konstantin) - nbd resize fixes (Mike) - nbd 0 blocksize crash fix (Xiubo) - block integrity error leak fix (Wenwen) - blk-cgroup writeback and priority inheritance fixes (Tejun)" * tag 'for-linus-20190715' of git://git.kernel.dk/linux-block: (42 commits) MAINTAINERS: add entry for block io cgroup null_blk: fixup ->report_zones() for !CONFIG_BLK_DEV_ZONED block: Limit zone array allocation size sd_zbc: Fix report zones buffer allocation block: Kill gfp_t argument of blkdev_report_zones() block: Allow mapping of vmalloc-ed buffers block/bio-integrity: fix a memory leak bug nvme: fix NULL deref for fabrics options nbd: add netlink reconfigure resize support nbd: fix crash when the blksize is zero block: Disable write plugging for zoned block devices block: Fix elevator name declaration block: Remove unused definitions nvme: fix regression upon hot device removal and insertion blk-throttle: fix zero wait time for iops throttled group block: Fix potential overflow in blk_report_zones() blkcg: implement REQ_CGROUP_PUNT blkcg, writeback: Implement wbc_blkcg_css() blkcg, writeback: Add wbc->no_cgroup_owner blkcg, writeback: Rename wbc_account_io() to wbc_account_cgroup_owner() ...
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/admin-cmd.c3
-rw-r--r--drivers/nvme/target/configfs.c4
-rw-r--r--drivers/nvme/target/fcloop.c44
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c39
-rw-r--r--drivers/nvme/target/nvmet.h8
-rw-r--r--drivers/nvme/target/trace.c2
6 files changed, 76 insertions, 24 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 9f72d515fc4b..4dc12ea52f23 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -442,6 +442,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
break;
}
+ if (ns->bdev)
+ nvmet_bdev_set_limits(ns->bdev, id);
+
/*
* We just provide a single LBA format that matches what the
* underlying device reports.
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 08dd5af357f7..cd52b9f15376 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -588,8 +588,10 @@ static struct config_group *nvmet_ns_make(struct config_group *group,
goto out;
ret = -EINVAL;
- if (nsid == 0 || nsid == NVME_NSID_ALL)
+ if (nsid == 0 || nsid == NVME_NSID_ALL) {
+ pr_err("invalid nsid %#x", nsid);
goto out;
+ }
ret = -ENOMEM;
ns = nvmet_ns_alloc(subsys, nsid);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index b8c1cc54a0db..b50b53db3746 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -434,7 +434,7 @@ fcloop_fcp_recv_work(struct work_struct *work)
int ret = 0;
bool aborted = false;
- spin_lock(&tfcp_req->reqlock);
+ spin_lock_irq(&tfcp_req->reqlock);
switch (tfcp_req->inistate) {
case INI_IO_START:
tfcp_req->inistate = INI_IO_ACTIVE;
@@ -443,11 +443,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
aborted = true;
break;
default:
- spin_unlock(&tfcp_req->reqlock);
+ spin_unlock_irq(&tfcp_req->reqlock);
WARN_ON(1);
return;
}
- spin_unlock(&tfcp_req->reqlock);
+ spin_unlock_irq(&tfcp_req->reqlock);
if (unlikely(aborted))
ret = -ECANCELED;
@@ -469,7 +469,7 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
struct nvmefc_fcp_req *fcpreq;
bool completed = false;
- spin_lock(&tfcp_req->reqlock);
+ spin_lock_irq(&tfcp_req->reqlock);
fcpreq = tfcp_req->fcpreq;
switch (tfcp_req->inistate) {
case INI_IO_ABORTED:
@@ -478,11 +478,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
completed = true;
break;
default:
- spin_unlock(&tfcp_req->reqlock);
+ spin_unlock_irq(&tfcp_req->reqlock);
WARN_ON(1);
return;
}
- spin_unlock(&tfcp_req->reqlock);
+ spin_unlock_irq(&tfcp_req->reqlock);
if (unlikely(completed)) {
/* remove reference taken in original abort downcall */
@@ -494,9 +494,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
&tfcp_req->tgt_fcp_req);
- spin_lock(&tfcp_req->reqlock);
+ spin_lock_irq(&tfcp_req->reqlock);
tfcp_req->fcpreq = NULL;
- spin_unlock(&tfcp_req->reqlock);
+ spin_unlock_irq(&tfcp_req->reqlock);
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
/* call_host_done releases reference for abort downcall */
@@ -513,10 +513,10 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
container_of(work, struct fcloop_fcpreq, tio_done_work);
struct nvmefc_fcp_req *fcpreq;
- spin_lock(&tfcp_req->reqlock);
+ spin_lock_irq(&tfcp_req->reqlock);
fcpreq = tfcp_req->fcpreq;
tfcp_req->inistate = INI_IO_COMPLETED;
- spin_unlock(&tfcp_req->reqlock);
+ spin_unlock_irq(&tfcp_req->reqlock);
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
}
@@ -535,7 +535,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
if (!rport->targetport)
return -ECONNREFUSED;
- tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
+ tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
if (!tfcp_req)
return -ENOMEM;
@@ -621,12 +621,12 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
int fcp_err = 0, active, aborted;
u8 op = tgt_fcpreq->op;
- spin_lock(&tfcp_req->reqlock);
+ spin_lock_irq(&tfcp_req->reqlock);
fcpreq = tfcp_req->fcpreq;
active = tfcp_req->active;
aborted = tfcp_req->aborted;
tfcp_req->active = true;
- spin_unlock(&tfcp_req->reqlock);
+ spin_unlock_irq(&tfcp_req->reqlock);
if (unlikely(active))
/* illegal - call while i/o active */
@@ -634,9 +634,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
if (unlikely(aborted)) {
/* target transport has aborted i/o prior */
- spin_lock(&tfcp_req->reqlock);
+ spin_lock_irq(&tfcp_req->reqlock);
tfcp_req->active = false;
- spin_unlock(&tfcp_req->reqlock);
+ spin_unlock_irq(&tfcp_req->reqlock);
tgt_fcpreq->transferred_length = 0;
tgt_fcpreq->fcp_error = -ECANCELED;
tgt_fcpreq->done(tgt_fcpreq);
@@ -693,9 +693,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
break;
}
- spin_lock(&tfcp_req->reqlock);
+ spin_lock_irq(&tfcp_req->reqlock);
tfcp_req->active = false;
- spin_unlock(&tfcp_req->reqlock);
+ spin_unlock_irq(&tfcp_req->reqlock);
tgt_fcpreq->transferred_length = xfrlen;
tgt_fcpreq->fcp_error = fcp_err;
@@ -715,9 +715,9 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
* (one doing io, other doing abort) and only kills ops posted
* after the abort request
*/
- spin_lock(&tfcp_req->reqlock);
+ spin_lock_irq(&tfcp_req->reqlock);
tfcp_req->aborted = true;
- spin_unlock(&tfcp_req->reqlock);
+ spin_unlock_irq(&tfcp_req->reqlock);
tfcp_req->status = NVME_SC_INTERNAL;
@@ -765,7 +765,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
return;
/* break initiator/target relationship for io */
- spin_lock(&tfcp_req->reqlock);
+ spin_lock_irq(&tfcp_req->reqlock);
switch (tfcp_req->inistate) {
case INI_IO_START:
case INI_IO_ACTIVE:
@@ -775,11 +775,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
abortio = false;
break;
default:
- spin_unlock(&tfcp_req->reqlock);
+ spin_unlock_irq(&tfcp_req->reqlock);
WARN_ON(1);
return;
}
- spin_unlock(&tfcp_req->reqlock);
+ spin_unlock_irq(&tfcp_req->reqlock);
if (abortio)
/* leave the reference while the work item is scheduled */
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 7a1cf6437a6a..de0bff70ebb6 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -8,6 +8,45 @@
#include <linux/module.h>
#include "nvmet.h"
+void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
+{
+ const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
+ /* Number of physical blocks per logical block. */
+ const u32 ppl = ql->physical_block_size / ql->logical_block_size;
+ /* Physical blocks per logical block, 0's based. */
+ const __le16 ppl0b = to0based(ppl);
+
+ /*
+ * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
+ * NAWUPF, and NACWU are defined for this namespace and should be
+ * used by the host for this namespace instead of the AWUN, AWUPF,
+ * and ACWU fields in the Identify Controller data structure. If
+ * any of these fields are zero that means that the corresponding
+ * field from the identify controller data structure should be used.
+ */
+ id->nsfeat |= 1 << 1;
+ id->nawun = ppl0b;
+ id->nawupf = ppl0b;
+ id->nacwu = ppl0b;
+
+ /*
+ * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
+ * NOWS are defined for this namespace and should be used by
+ * the host for I/O optimization.
+ */
+ id->nsfeat |= 1 << 4;
+ /* NPWG = Namespace Preferred Write Granularity. 0's based */
+ id->npwg = ppl0b;
+ /* NPWA = Namespace Preferred Write Alignment. 0's based */
+ id->npwa = id->npwg;
+ /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
+ id->npdg = to0based(ql->discard_granularity / ql->logical_block_size);
+ /* NPDG = Namespace Preferred Deallocate Alignment */
+ id->npda = id->npdg;
+ /* NOWS = Namespace Optimal Write Size */
+ id->nows = to0based(ql->io_opt / ql->logical_block_size);
+}
+
int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
{
int ret;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index dc270944bb25..6ee66c610739 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -365,6 +365,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
void nvmet_execute_async_event(struct nvmet_req *req);
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
+void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
@@ -492,4 +493,11 @@ static inline u32 nvmet_rw_len(struct nvmet_req *req)
}
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
+
+/* Convert a 32-bit number to a 16-bit 0's based number */
+static inline __le16 to0based(u32 a)
+{
+ return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
+}
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index cdcdd14c6408..6af11d493271 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -146,7 +146,7 @@ static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
- trace_seq_printf(p, "spcecific=%*ph", 24, spc);
+ trace_seq_printf(p, "specific=%*ph", 24, spc);
trace_seq_putc(p, 0);
return ret;
}