summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/scsi/dc395x.rst11
-rw-r--r--MAINTAINERS13
-rw-r--r--block/blk-core.c2
-rw-r--r--drivers/md/dm.c69
-rw-r--r--drivers/nvme/host/Makefile2
-rw-r--r--drivers/nvme/host/core.c149
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pr.c315
-rw-r--r--drivers/s390/block/dasd.c7
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c20
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c2
-rw-r--r--drivers/scsi/bfa/bfa_svc.c2
-rw-r--r--drivers/scsi/bfa/bfad.c10
-rw-r--r--drivers/scsi/bfa/bfad_attr.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c4
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c28
-rw-r--r--drivers/scsi/libsas/sas_expander.c124
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c44
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c392
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c32
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h1
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c126
-rw-r--r--drivers/scsi/qedf/qedf_main.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c264
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c33
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c130
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c14
-rw-r--r--drivers/scsi/scsi_common.c43
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd.c130
-rw-r--r--drivers/scsi/smartpqi/Kconfig2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h6
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c284
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c34
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/target_core_configfs.c10
-rw-r--r--drivers/target/target_core_device.c6
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/target/target_core_iblock.c275
-rw-r--r--drivers/target/target_core_pr.c79
-rw-r--r--drivers/target/target_core_rd.c4
-rw-r--r--drivers/target/target_core_sbc.c13
-rw-r--r--drivers/target/target_core_spc.c113
-rw-r--r--drivers/ufs/core/ufs-hwmon.c2
-rw-r--r--drivers/ufs/core/ufshcd.c20
-rw-r--r--drivers/ufs/host/ufs-mediatek.c3
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/nvme.h51
-rw-r--r--include/linux/pr.h25
-rw-r--r--include/scsi/scsi_common.h13
-rw-r--r--include/scsi/scsi_proto.h5
-rw-r--r--include/target/target_core_backend.h8
-rw-r--r--include/target/target_core_base.h3
77 files changed, 2260 insertions, 952 deletions
diff --git a/Documentation/scsi/dc395x.rst b/Documentation/scsi/dc395x.rst
index d779e782b1cb..8b06d8fc7a9c 100644
--- a/Documentation/scsi/dc395x.rst
+++ b/Documentation/scsi/dc395x.rst
@@ -11,13 +11,10 @@ be safe to use. Testing with hard disks has not been done to any
great degree and caution should be exercised if you want to attempt
to use this driver with hard disks.
-This is a 2.5 only driver. For a 2.4 driver please see the original
-driver (which this driver started from) at
-http://www.garloff.de/kurt/linux/dc395/
-
-Problems, questions and patches should be submitted to the mailing
-list. Details on the list, including archives, are available at
-http://lists.twibble.org/mailman/listinfo/dc395x/
+This driver is evolved from `the original 2.4 driver
+<https://web.archive.org/web/20140129181343/http://www.garloff.de/kurt/linux/dc395/>`_.
+Problems, questions and patches should be submitted to the `Linux SCSI
+mailing list <linux-scsi@vger.kernel.org>`_.
Parameters
----------
diff --git a/MAINTAINERS b/MAINTAINERS
index 7e0b87d5aa2e..d36b5b3a9f76 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5716,10 +5716,7 @@ DC395x SCSI driver
M: Oliver Neukum <oliver@neukum.org>
M: Ali Akcaagac <aliakc@web.de>
M: Jamie Lenehan <lenehan@twibble.org>
-L: dc395x@twibble.org
S: Maintained
-W: http://twibble.org/dist/dc395x/
-W: http://lists.twibble.org/mailman/listinfo/dc395x/
F: Documentation/scsi/dc395x.rst
F: drivers/scsi/dc395x.*
@@ -18767,6 +18764,16 @@ F: include/linux/wait.h
F: include/uapi/linux/sched.h
F: kernel/sched/
+SCSI LIBSAS SUBSYSTEM
+R: John Garry <john.g.garry@oracle.com>
+R: Jason Yan <yanaijie@huawei.com>
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/libsas/
+F: include/scsi/libsas.h
+F: include/scsi/sas_ata.h
+F: Documentation/scsi/libsas.rst
+
SCSI RDMA PROTOCOL (SRP) INITIATOR
M: Bart Van Assche <bvanassche@acm.org>
L: linux-rdma@vger.kernel.org
diff --git a/block/blk-core.c b/block/blk-core.c
index 04ad13ec6ead..420458c3a3cb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -155,7 +155,7 @@ static const struct {
[BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
[BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
[BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
- [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
+ [BLK_STS_RESV_CONFLICT] = { -EBADE, "reservation conflict" },
[BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
[BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
[BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3b694ba3a106..74f79c4e45c9 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -3132,6 +3132,8 @@ struct dm_pr {
bool fail_early;
int ret;
enum pr_type type;
+ struct pr_keys *read_keys;
+ struct pr_held_reservation *rsv;
};
static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
@@ -3364,12 +3366,79 @@ out:
return r;
}
+static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_pr *pr = data;
+ const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
+
+ if (!ops || !ops->pr_read_keys) {
+ pr->ret = -EOPNOTSUPP;
+ return -1;
+ }
+
+ pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys);
+ if (!pr->ret)
+ return -1;
+
+ return 0;
+}
+
+static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys)
+{
+ struct dm_pr pr = {
+ .read_keys = keys,
+ };
+ int ret;
+
+ ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr);
+ if (ret)
+ return ret;
+
+ return pr.ret;
+}
+
+static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_pr *pr = data;
+ const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
+
+ if (!ops || !ops->pr_read_reservation) {
+ pr->ret = -EOPNOTSUPP;
+ return -1;
+ }
+
+ pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv);
+ if (!pr->ret)
+ return -1;
+
+ return 0;
+}
+
+static int dm_pr_read_reservation(struct block_device *bdev,
+ struct pr_held_reservation *rsv)
+{
+ struct dm_pr pr = {
+ .rsv = rsv,
+ };
+ int ret;
+
+ ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr);
+ if (ret)
+ return ret;
+
+ return pr.ret;
+}
+
static const struct pr_ops dm_pr_ops = {
.pr_register = dm_pr_register,
.pr_reserve = dm_pr_reserve,
.pr_release = dm_pr_release,
.pr_preempt = dm_pr_preempt,
.pr_clear = dm_pr_clear,
+ .pr_read_keys = dm_pr_read_keys,
+ .pr_read_reservation = dm_pr_read_reservation,
};
static const struct block_device_operations dm_blk_dops = {
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index e27202d22c7d..06c18a65da99 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
-nvme-core-y += core.o ioctl.o
+nvme-core-y += core.o ioctl.o pr.o
nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ccb6eb1282f8..b1249bf38844 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -279,7 +279,7 @@ static blk_status_t nvme_error_status(u16 status)
case NVME_SC_INVALID_PI:
return BLK_STS_PROTECTION;
case NVME_SC_RESERVATION_CONFLICT:
- return BLK_STS_NEXUS;
+ return BLK_STS_RESV_CONFLICT;
case NVME_SC_HOST_PATH_ERROR:
return BLK_STS_TRANSPORT;
case NVME_SC_ZONE_TOO_MANY_ACTIVE:
@@ -2061,153 +2061,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
}
}
-static char nvme_pr_type(enum pr_type type)
-{
- switch (type) {
- case PR_WRITE_EXCLUSIVE:
- return 1;
- case PR_EXCLUSIVE_ACCESS:
- return 2;
- case PR_WRITE_EXCLUSIVE_REG_ONLY:
- return 3;
- case PR_EXCLUSIVE_ACCESS_REG_ONLY:
- return 4;
- case PR_WRITE_EXCLUSIVE_ALL_REGS:
- return 5;
- case PR_EXCLUSIVE_ACCESS_ALL_REGS:
- return 6;
- default:
- return 0;
- }
-}
-
-static int nvme_send_ns_head_pr_command(struct block_device *bdev,
- struct nvme_command *c, u8 data[16])
-{
- struct nvme_ns_head *head = bdev->bd_disk->private_data;
- int srcu_idx = srcu_read_lock(&head->srcu);
- struct nvme_ns *ns = nvme_find_path(head);
- int ret = -EWOULDBLOCK;
-
- if (ns) {
- c->common.nsid = cpu_to_le32(ns->head->ns_id);
- ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
- }
- srcu_read_unlock(&head->srcu, srcu_idx);
- return ret;
-}
-
-static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
- u8 data[16])
-{
- c->common.nsid = cpu_to_le32(ns->head->ns_id);
- return nvme_submit_sync_cmd(ns->queue, c, data, 16);
-}
-
-static int nvme_sc_to_pr_err(int nvme_sc)
-{
- if (nvme_is_path_error(nvme_sc))
- return PR_STS_PATH_FAILED;
-
- switch (nvme_sc) {
- case NVME_SC_SUCCESS:
- return PR_STS_SUCCESS;
- case NVME_SC_RESERVATION_CONFLICT:
- return PR_STS_RESERVATION_CONFLICT;
- case NVME_SC_ONCS_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- case NVME_SC_BAD_ATTRIBUTES:
- case NVME_SC_INVALID_OPCODE:
- case NVME_SC_INVALID_FIELD:
- case NVME_SC_INVALID_NS:
- return -EINVAL;
- default:
- return PR_STS_IOERR;
- }
-}
-
-static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
- u64 key, u64 sa_key, u8 op)
-{
- struct nvme_command c = { };
- u8 data[16] = { 0, };
- int ret;
-
- put_unaligned_le64(key, &data[0]);
- put_unaligned_le64(sa_key, &data[8]);
-
- c.common.opcode = op;
- c.common.cdw10 = cpu_to_le32(cdw10);
-
- if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
- bdev->bd_disk->fops == &nvme_ns_head_ops)
- ret = nvme_send_ns_head_pr_command(bdev, &c, data);
- else
- ret = nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c,
- data);
- if (ret < 0)
- return ret;
-
- return nvme_sc_to_pr_err(ret);
-}
-
-static int nvme_pr_register(struct block_device *bdev, u64 old,
- u64 new, unsigned flags)
-{
- u32 cdw10;
-
- if (flags & ~PR_FL_IGNORE_KEY)
- return -EOPNOTSUPP;
-
- cdw10 = old ? 2 : 0;
- cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
- cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
- return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
-}
-
-static int nvme_pr_reserve(struct block_device *bdev, u64 key,
- enum pr_type type, unsigned flags)
-{
- u32 cdw10;
-
- if (flags & ~PR_FL_IGNORE_KEY)
- return -EOPNOTSUPP;
-
- cdw10 = nvme_pr_type(type) << 8;
- cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
-}
-
-static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
- enum pr_type type, bool abort)
-{
- u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
-
- return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
-}
-
-static int nvme_pr_clear(struct block_device *bdev, u64 key)
-{
- u32 cdw10 = 1 | (key ? 0 : 1 << 3);
-
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
-}
-
-static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
-{
- u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3);
-
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
-}
-
-const struct pr_ops nvme_pr_ops = {
- .pr_register = nvme_pr_register,
- .pr_reserve = nvme_pr_reserve,
- .pr_release = nvme_pr_release,
- .pr_preempt = nvme_pr_preempt,
- .pr_clear = nvme_pr_clear,
-};
-
#ifdef CONFIG_BLK_SED_OPAL
static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
bool send)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bf46f122e9e1..c0762346b441 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -19,6 +19,8 @@
#include <trace/events/block.h>
+extern const struct pr_ops nvme_pr_ops;
+
extern unsigned int nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
new file mode 100644
index 000000000000..391b1465ebfd
--- /dev/null
+++ b/drivers/nvme/host/pr.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015 Intel Corporation
+ * Keith Busch <kbusch@kernel.org>
+ */
+#include <linux/blkdev.h>
+#include <linux/pr.h>
+#include <asm/unaligned.h>
+
+#include "nvme.h"
+
+static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type)
+{
+ switch (type) {
+ case PR_WRITE_EXCLUSIVE:
+ return NVME_PR_WRITE_EXCLUSIVE;
+ case PR_EXCLUSIVE_ACCESS:
+ return NVME_PR_EXCLUSIVE_ACCESS;
+ case PR_WRITE_EXCLUSIVE_REG_ONLY:
+ return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY;
+ case PR_EXCLUSIVE_ACCESS_REG_ONLY:
+ return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY;
+ case PR_WRITE_EXCLUSIVE_ALL_REGS:
+ return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS;
+ case PR_EXCLUSIVE_ACCESS_ALL_REGS:
+ return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS;
+ }
+
+ return 0;
+}
+
+static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type)
+{
+ switch (type) {
+ case NVME_PR_WRITE_EXCLUSIVE:
+ return PR_WRITE_EXCLUSIVE;
+ case NVME_PR_EXCLUSIVE_ACCESS:
+ return PR_EXCLUSIVE_ACCESS;
+ case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
+ return PR_WRITE_EXCLUSIVE_REG_ONLY;
+ case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
+ return PR_EXCLUSIVE_ACCESS_REG_ONLY;
+ case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
+ return PR_WRITE_EXCLUSIVE_ALL_REGS;
+ case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
+ return PR_EXCLUSIVE_ACCESS_ALL_REGS;
+ }
+
+ return 0;
+}
+
+static int nvme_send_ns_head_pr_command(struct block_device *bdev,
+ struct nvme_command *c, void *data, unsigned int data_len)
+{
+ struct nvme_ns_head *head = bdev->bd_disk->private_data;
+ int srcu_idx = srcu_read_lock(&head->srcu);
+ struct nvme_ns *ns = nvme_find_path(head);
+ int ret = -EWOULDBLOCK;
+
+ if (ns) {
+ c->common.nsid = cpu_to_le32(ns->head->ns_id);
+ ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len);
+ }
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
+ void *data, unsigned int data_len)
+{
+ c->common.nsid = cpu_to_le32(ns->head->ns_id);
+ return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
+}
+
+static int nvme_sc_to_pr_err(int nvme_sc)
+{
+ if (nvme_is_path_error(nvme_sc))
+ return PR_STS_PATH_FAILED;
+
+ switch (nvme_sc) {
+ case NVME_SC_SUCCESS:
+ return PR_STS_SUCCESS;
+ case NVME_SC_RESERVATION_CONFLICT:
+ return PR_STS_RESERVATION_CONFLICT;
+ case NVME_SC_ONCS_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case NVME_SC_BAD_ATTRIBUTES:
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_INVALID_NS:
+ return -EINVAL;
+ default:
+ return PR_STS_IOERR;
+ }
+}
+
+static int nvme_send_pr_command(struct block_device *bdev,
+ struct nvme_command *c, void *data, unsigned int data_len)
+{
+ if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
+ bdev->bd_disk->fops == &nvme_ns_head_ops)
+ return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
+
+ return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,
+ data_len);
+}
+
+static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
+ u64 key, u64 sa_key, u8 op)
+{
+ struct nvme_command c = { };
+ u8 data[16] = { 0, };
+ int ret;
+
+ put_unaligned_le64(key, &data[0]);
+ put_unaligned_le64(sa_key, &data[8]);
+
+ c.common.opcode = op;
+ c.common.cdw10 = cpu_to_le32(cdw10);
+
+ ret = nvme_send_pr_command(bdev, &c, data, sizeof(data));
+ if (ret < 0)
+ return ret;
+
+ return nvme_sc_to_pr_err(ret);
+}
+
+static int nvme_pr_register(struct block_device *bdev, u64 old,
+ u64 new, unsigned flags)
+{
+ u32 cdw10;
+
+ if (flags & ~PR_FL_IGNORE_KEY)
+ return -EOPNOTSUPP;
+
+ cdw10 = old ? 2 : 0;
+ cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
+ cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
+ return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
+}
+
+static int nvme_pr_reserve(struct block_device *bdev, u64 key,
+ enum pr_type type, unsigned flags)
+{
+ u32 cdw10;
+
+ if (flags & ~PR_FL_IGNORE_KEY)
+ return -EOPNOTSUPP;
+
+ cdw10 = nvme_pr_type_from_blk(type) << 8;
+ cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
+}
+
+static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
+ enum pr_type type, bool abort)
+{
+ u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (abort ? 2 : 1);
+
+ return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
+}
+
+static int nvme_pr_clear(struct block_device *bdev, u64 key)
+{
+ u32 cdw10 = 1 | (key ? 0 : 1 << 3);
+
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
+}
+
+static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
+{
+ u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (key ? 0 : 1 << 3);
+
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
+}
+
+static int nvme_pr_resv_report(struct block_device *bdev, void *data,
+ u32 data_len, bool *eds)
+{
+ struct nvme_command c = { };
+ int ret;
+
+ c.common.opcode = nvme_cmd_resv_report;
+ c.common.cdw10 = cpu_to_le32(nvme_bytes_to_numd(data_len));
+ c.common.cdw11 = cpu_to_le32(NVME_EXTENDED_DATA_STRUCT);
+ *eds = true;
+
+retry:
+ ret = nvme_send_pr_command(bdev, &c, data, data_len);
+ if (ret == NVME_SC_HOST_ID_INCONSIST &&
+ c.common.cdw11 == cpu_to_le32(NVME_EXTENDED_DATA_STRUCT)) {
+ c.common.cdw11 = 0;
+ *eds = false;
+ goto retry;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return nvme_sc_to_pr_err(ret);
+}
+
+static int nvme_pr_read_keys(struct block_device *bdev,
+ struct pr_keys *keys_info)
+{
+ u32 rse_len, num_keys = keys_info->num_keys;
+ struct nvme_reservation_status_ext *rse;
+ int ret, i;
+ bool eds;
+
+ /*
+ * Assume we are using 128-bit host IDs and allocate a buffer large
+ * enough to get enough keys to fill the return keys buffer.
+ */
+ rse_len = struct_size(rse, regctl_eds, num_keys);
+ rse = kzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+
+ ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
+ if (ret)
+ goto free_rse;
+
+ keys_info->generation = le32_to_cpu(rse->gen);
+ keys_info->num_keys = get_unaligned_le16(&rse->regctl);
+
+ num_keys = min(num_keys, keys_info->num_keys);
+ for (i = 0; i < num_keys; i++) {
+ if (eds) {
+ keys_info->keys[i] =
+ le64_to_cpu(rse->regctl_eds[i].rkey);
+ } else {
+ struct nvme_reservation_status *rs;
+
+ rs = (struct nvme_reservation_status *)rse;
+ keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey);
+ }
+ }
+
+free_rse:
+ kfree(rse);
+ return ret;
+}
+
+static int nvme_pr_read_reservation(struct block_device *bdev,
+ struct pr_held_reservation *resv)
+{
+ struct nvme_reservation_status_ext tmp_rse, *rse;
+ int ret, i, num_regs;
+ u32 rse_len;
+ bool eds;
+
+get_num_regs:
+ /*
+ * Get the number of registrations so we know how big to allocate
+ * the response buffer.
+ */
+ ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds);
+ if (ret)
+ return ret;
+
+ num_regs = get_unaligned_le16(&tmp_rse.regctl);
+ if (!num_regs) {
+ resv->generation = le32_to_cpu(tmp_rse.gen);
+ return 0;
+ }
+
+ rse_len = struct_size(rse, regctl_eds, num_regs);
+ rse = kzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+
+ ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
+ if (ret)
+ goto free_rse;
+
+ if (num_regs != get_unaligned_le16(&rse->regctl)) {
+ kfree(rse);
+ goto get_num_regs;
+ }
+
+ resv->generation = le32_to_cpu(rse->gen);
+ resv->type = block_pr_type_from_nvme(rse->rtype);
+
+ for (i = 0; i < num_regs; i++) {
+ if (eds) {
+ if (rse->regctl_eds[i].rcsts) {
+ resv->key = le64_to_cpu(rse->regctl_eds[i].rkey);
+ break;
+ }
+ } else {
+ struct nvme_reservation_status *rs;
+
+ rs = (struct nvme_reservation_status *)rse;
+ if (rs->regctl_ds[i].rcsts) {
+ resv->key = le64_to_cpu(rs->regctl_ds[i].rkey);
+ break;
+ }
+ }
+ }
+
+free_rse:
+ kfree(rse);
+ return ret;
+}
+
+const struct pr_ops nvme_pr_ops = {
+ .pr_register = nvme_pr_register,
+ .pr_reserve = nvme_pr_reserve,
+ .pr_release = nvme_pr_release,
+ .pr_preempt = nvme_pr_preempt,
+ .pr_clear = nvme_pr_clear,
+ .pr_read_keys = nvme_pr_read_keys,
+ .pr_read_reservation = nvme_pr_read_reservation,
+};
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 9fbfce735d56..15a26b05bc55 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2737,7 +2737,12 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
else if (status == 0) {
switch (cqr->intrc) {
case -EPERM:
- error = BLK_STS_NEXUS;
+ /*
+ * DASD doesn't implement SCSI/NVMe reservations, but it
+ * implements a locking scheme similar to them. We
+ * return this error when we no longer have the lock.
+ */
+ error = BLK_STS_RESV_CONFLICT;
break;
case -ENOLINK:
error = BLK_STS_TRANSPORT;
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index df18d9d2af53..773c84af784c 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1134,7 +1134,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
rspnid->dap = s_id;
- strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
+ strscpy(rspnid->spn, name, sizeof(rspnid->spn));
rspnid->spn_len = (u8) strlen(rspnid->spn);
return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
@@ -1155,7 +1155,7 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
rsnn_nn->node_name = node_name;
- strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
+ strscpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index d2d396ca0e9a..5023c0ab4277 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -761,7 +761,7 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strlcpy(port_cfg->sym_name.symname, model,
+ strscpy(port_cfg->sym_name.symname, model,
BFA_SYMNAME_MAXLEN);
strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
BFA_SYMNAME_MAXLEN);
@@ -822,7 +822,7 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strlcpy(port_cfg->node_sym_name.symname, model,
+ strscpy(port_cfg->node_sym_name.symname, model,
BFA_SYMNAME_MAXLEN);
strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index b12afcc4b189..008afd817087 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -2642,10 +2642,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
hba_attr->fw_version);
- strlcpy(hba_attr->driver_version, (char *)driver_info->version,
+ strscpy(hba_attr->driver_version, (char *)driver_info->version,
sizeof(hba_attr->driver_version));
- strlcpy(hba_attr->os_name, driver_info->host_os_name,
+ strscpy(hba_attr->os_name, driver_info->host_os_name,
sizeof(hba_attr->os_name));
/*
@@ -2663,13 +2663,13 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
- strlcpy(hba_attr->node_sym_name.symname,
+ strscpy(hba_attr->node_sym_name.symname,
port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
strcpy(hba_attr->vendor_info, "QLogic");
hba_attr->num_ports =
cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
- strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
+ strscpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
}
@@ -2736,19 +2736,19 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
/*
* OS device Name
*/
- strlcpy(port_attr->os_device_name, driver_info->os_device_name,
+ strscpy(port_attr->os_device_name, driver_info->os_device_name,
sizeof(port_attr->os_device_name));
/*
* Host name
*/
- strlcpy(port_attr->host_name, driver_info->host_machine_name,
+ strscpy(port_attr->host_name, driver_info->host_machine_name,
sizeof(port_attr->host_name));
port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
- strlcpy(port_attr->port_sym_name.symname,
+ strscpy(port_attr->port_sym_name.symname,
bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
bfa_fcs_lport_get_attr(port, &lport_attr);
port_attr->port_type = cpu_to_be32(lport_attr.port_type);
@@ -3229,7 +3229,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
rsp_str[gmal_entry->len-1] = 0;
/* copy IP Address to fabric */
- strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
+ strscpy(bfa_fcs_lport_get_fabric_ipaddr(port),
gmal_entry->ip_addr,
BFA_FCS_FABRIC_IPADDR_SZ);
break;
@@ -4667,7 +4667,7 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
* to that of the base port.
*/
- strlcpy(symbl,
+ strscpy(symbl,
(char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
sizeof(symbl));
@@ -5194,7 +5194,7 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
* For Vports, we append the vport's port symbolic name
* to that of the base port.
*/
- strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
+ strscpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
sizeof(symbl));
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 5740302d83ac..e1ed1424fddb 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -2788,7 +2788,7 @@ void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
- strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strscpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 4e3cef02f10f..c9745c0b4eee 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -330,7 +330,7 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
lp.eid = event;
lp.log_type = BFA_PL_LOG_TYPE_STRING;
lp.misc = misc;
- strlcpy(lp.log_entry.string_log, log_str,
+ strscpy(lp.log_entry.string_log, log_str,
BFA_PL_STRING_LOG_SZ);
lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
bfa_plog_add(plog, &lp);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 529b73a83d69..62cb7a864fd5 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -965,19 +965,19 @@ bfad_start_ops(struct bfad_s *bfad) {
/* Fill the driver_info info to fcs*/
memset(&driver_info, 0, sizeof(driver_info));
- strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
+ strscpy(driver_info.version, BFAD_DRIVER_VERSION,
sizeof(driver_info.version));
if (host_name)
- strlcpy(driver_info.host_machine_name, host_name,
+ strscpy(driver_info.host_machine_name, host_name,
sizeof(driver_info.host_machine_name));
if (os_name)
- strlcpy(driver_info.host_os_name, os_name,
+ strscpy(driver_info.host_os_name, os_name,
sizeof(driver_info.host_os_name));
if (os_patch)
- strlcpy(driver_info.host_os_patch, os_patch,
+ strscpy(driver_info.host_os_patch, os_patch,
sizeof(driver_info.host_os_patch));
- strlcpy(driver_info.os_device_name, bfad->pci_name,
+ strscpy(driver_info.os_device_name, bfad->pci_name,
sizeof(driver_info.os_device_name));
/* FCS driver info init */
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 5a85401e9e2d..e96e4b6df265 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -834,7 +834,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
char symname[BFA_SYMNAME_MAXLEN];
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
- strlcpy(symname, port_attr.port_cfg.sym_name.symname,
+ strscpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
return sysfs_emit(buf, "%s\n", symname);
}
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 79d4f7ee5bcb..520f9152f3bf 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -119,7 +119,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
/* fill in driver attr info */
strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
- strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
+ strscpy(iocmd->ioc_attr.driver_attr.driver_ver,
BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
iocmd->ioc_attr.adapter_attr.fw_ver);
@@ -307,7 +307,7 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
iocmd->attr.port_type = port_attr.port_type;
iocmd->attr.loopback = port_attr.loopback;
iocmd->attr.authfail = port_attr.authfail;
- strlcpy(iocmd->attr.port_symname.symname,
+ strscpy(iocmd->attr.port_symname.symname,
port_attr.port_cfg.sym_name.symname,
sizeof(iocmd->attr.port_symname.symname));
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index c335f7a188d2..a9d3d8562d3c 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -1046,7 +1046,7 @@ bfad_fc_host_init(struct bfad_im_port_s *im_port)
/* For fibre channel services type 0x20 */
fc_host_supported_fc4s(host)[7] = 1;
- strlcpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
+ strscpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
sprintf(fc_host_symbolic_name(host), "%s", symname);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 12d588454f5d..20e1607c6282 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -30,6 +30,7 @@
#define SATA_INITI_D2H_STORE_ADDR_LO 0x60
#define SATA_INITI_D2H_STORE_ADDR_HI 0x64
#define CFG_MAX_TAG 0x68
+#define TRANS_LOCK_ICT_TIME 0X70
#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
#define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
#define HGC_GET_ITV_TIME 0x90
@@ -627,13 +628,15 @@ static void interrupt_enable_v3_hw(struct hisi_hba *hisi_hba)
static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
{
+ struct pci_dev *pdev = hisi_hba->pci_dev;
int i, j;
/* Global registers init */
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
(u32)((1ULL << hisi_hba->queue_count) - 1));
- hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0);
hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
+ /* time / CLK_AHB = 2.5s / 2ns = 0x4A817C80 */
+ hisi_sas_write32(hisi_hba, TRANS_LOCK_ICT_TIME, 0x4A817C80);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
@@ -652,6 +655,9 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
+ if (pdev->revision < 0x30)
+ hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0);
+
interrupt_enable_v3_hw(hisi_hba);
for (i = 0; i < hisi_hba->n_phy; i++) {
enum sas_linkrate max;
@@ -669,7 +675,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
- hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
@@ -680,13 +685,18 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
- hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME,
0x30f4240);
- /* used for 12G negotiate */
- hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
+ /* set value through firmware for 920B and later version */
+ if (pdev->revision < 0x30) {
+ hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
+ hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
+ /* used for 12G negotiate */
+ hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
+ }
+
/* get default FFE configuration for BIST */
for (j = 0; j < FFE_CFG_MAX; j++) {
u32 val = hisi_sas_phy_read32(hisi_hba, i,
@@ -2206,6 +2216,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type);
u32 dw3 = le32_to_cpu(complete_hdr->dw3);
+ u32 dw0 = le32_to_cpu(complete_hdr->dw0);
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
@@ -2215,8 +2226,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
* but I/O information has been written to the host memory, we examine
* response IU.
*/
- if (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) &&
- (complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))
+ if (!(dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) &&
+ (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))
return false;
ts->residual = trans_tx_fail_type;
@@ -2232,7 +2243,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- if ((complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
+ if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
(sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
ts->stat = SAS_PROTO_RESPONSE;
} else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
@@ -2999,6 +3010,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO),
HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI),
HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG),
+ HISI_SAS_DEBUGFS_REG(TRANS_LOCK_ICT_TIME),
HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL),
HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL),
HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME),
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index dc670304f181..adcac57aaee6 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1198,37 +1198,37 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
sas_route_char(child, child_phy));
}
+static bool sas_eeds_valid(struct domain_device *parent,
+ struct domain_device *child)
+{
+ struct sas_discovery *disc = &parent->port->disc;
+
+ return (SAS_ADDR(disc->eeds_a) == SAS_ADDR(parent->sas_addr) ||
+ SAS_ADDR(disc->eeds_a) == SAS_ADDR(child->sas_addr)) &&
+ (SAS_ADDR(disc->eeds_b) == SAS_ADDR(parent->sas_addr) ||
+ SAS_ADDR(disc->eeds_b) == SAS_ADDR(child->sas_addr));
+}
+
static int sas_check_eeds(struct domain_device *child,
- struct ex_phy *parent_phy,
- struct ex_phy *child_phy)
+ struct ex_phy *parent_phy,
+ struct ex_phy *child_phy)
{
int res = 0;
struct domain_device *parent = child->parent;
+ struct sas_discovery *disc = &parent->port->disc;
- if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) {
+ if (SAS_ADDR(disc->fanout_sas_addr) != 0) {
res = -ENODEV;
pr_warn("edge ex %016llx phy S:%02d <--> edge ex %016llx phy S:%02d, while there is a fanout ex %016llx\n",
SAS_ADDR(parent->sas_addr),
parent_phy->phy_id,
SAS_ADDR(child->sas_addr),
child_phy->phy_id,
- SAS_ADDR(parent->port->disc.fanout_sas_addr));
- } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) {
- memcpy(parent->port->disc.eeds_a, parent->sas_addr,
- SAS_ADDR_SIZE);
- memcpy(parent->port->disc.eeds_b, child->sas_addr,
- SAS_ADDR_SIZE);
- } else if (((SAS_ADDR(parent->port->disc.eeds_a) ==
- SAS_ADDR(parent->sas_addr)) ||
- (SAS_ADDR(parent->port->disc.eeds_a) ==
- SAS_ADDR(child->sas_addr)))
- &&
- ((SAS_ADDR(parent->port->disc.eeds_b) ==
- SAS_ADDR(parent->sas_addr)) ||
- (SAS_ADDR(parent->port->disc.eeds_b) ==
- SAS_ADDR(child->sas_addr))))
- ;
- else {
+ SAS_ADDR(disc->fanout_sas_addr));
+ } else if (SAS_ADDR(disc->eeds_a) == 0) {
+ memcpy(disc->eeds_a, parent->sas_addr, SAS_ADDR_SIZE);
+ memcpy(disc->eeds_b, child->sas_addr, SAS_ADDR_SIZE);
+ } else if (!sas_eeds_valid(parent, child)) {
res = -ENODEV;
pr_warn("edge ex %016llx phy%02d <--> edge ex %016llx phy%02d link forms a third EEDS!\n",
SAS_ADDR(parent->sas_addr),
@@ -1240,11 +1240,56 @@ static int sas_check_eeds(struct domain_device *child,
return res;
}
-/* Here we spill over 80 columns. It is intentional.
- */
-static int sas_check_parent_topology(struct domain_device *child)
+static int sas_check_edge_expander_topo(struct domain_device *child,
+ struct ex_phy *parent_phy)
+{
+ struct expander_device *child_ex = &child->ex_dev;
+ struct expander_device *parent_ex = &child->parent->ex_dev;
+ struct ex_phy *child_phy;
+
+ child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
+
+ if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
+ child_phy->routing_attr != TABLE_ROUTING)
+ goto error;
+ } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) {
+ if (child_phy->routing_attr == SUBTRACTIVE_ROUTING)
+ return sas_check_eeds(child, parent_phy, child_phy);
+ else if (child_phy->routing_attr != TABLE_ROUTING)
+ goto error;
+ } else if (parent_phy->routing_attr == TABLE_ROUTING) {
+ if (child_phy->routing_attr != SUBTRACTIVE_ROUTING &&
+ (child_phy->routing_attr != TABLE_ROUTING ||
+ !child_ex->t2t_supp || !parent_ex->t2t_supp))
+ goto error;
+ }
+
+ return 0;
+error:
+ sas_print_parent_topology_bug(child, parent_phy, child_phy);
+ return -ENODEV;
+}
+
+static int sas_check_fanout_expander_topo(struct domain_device *child,
+ struct ex_phy *parent_phy)
{
struct expander_device *child_ex = &child->ex_dev;
+ struct ex_phy *child_phy;
+
+ child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
+
+ if (parent_phy->routing_attr == TABLE_ROUTING &&
+ child_phy->routing_attr == SUBTRACTIVE_ROUTING)
+ return 0;
+
+ sas_print_parent_topology_bug(child, parent_phy, child_phy);
+
+ return -ENODEV;
+}
+
+static int sas_check_parent_topology(struct domain_device *child)
+{
struct expander_device *parent_ex;
int i;
int res = 0;
@@ -1259,7 +1304,6 @@ static int sas_check_parent_topology(struct domain_device *child)
for (i = 0; i < parent_ex->num_phys; i++) {
struct ex_phy *parent_phy = &parent_ex->ex_phy[i];
- struct ex_phy *child_phy;
if (parent_phy->phy_state == PHY_VACANT ||
parent_phy->phy_state == PHY_NOT_PRESENT)
@@ -1268,40 +1312,14 @@ static int sas_check_parent_topology(struct domain_device *child)
if (!sas_phy_match_dev_addr(child, parent_phy))
continue;
- child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
-
switch (child->parent->dev_type) {
case SAS_EDGE_EXPANDER_DEVICE:
- if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
- if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
- child_phy->routing_attr != TABLE_ROUTING) {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
- res = -ENODEV;
- }
- } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) {
- if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) {
- res = sas_check_eeds(child, parent_phy, child_phy);
- } else if (child_phy->routing_attr != TABLE_ROUTING) {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
- res = -ENODEV;
- }
- } else if (parent_phy->routing_attr == TABLE_ROUTING) {
- if (child_phy->routing_attr == SUBTRACTIVE_ROUTING ||
- (child_phy->routing_attr == TABLE_ROUTING &&
- child_ex->t2t_supp && parent_ex->t2t_supp)) {
- /* All good */;
- } else {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
- res = -ENODEV;
- }
- }
+ if (sas_check_edge_expander_topo(child, parent_phy))
+ res = -ENODEV;
break;
case SAS_FANOUT_EXPANDER_DEVICE:
- if (parent_phy->routing_attr != TABLE_ROUTING ||
- child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
+ if (sas_check_fanout_expander_topo(child, parent_phy))
res = -ENODEV;
- }
break;
default:
break;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3863a5341782..21c7ecd3ede5 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5858,8 +5858,8 @@ int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
module_param(lpfc_fabric_cgn_frequency, int, 0444);
MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq");
-int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
-module_param(lpfc_acqe_cgn_frequency, int, 0444);
+unsigned char lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
+module_param(lpfc_acqe_cgn_frequency, byte, 0444);
MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq");
int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b833b983e69d..d4e46a08f94d 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -134,7 +134,6 @@ void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb);
-int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
void lpfc_disc_list_loopmap(struct lpfc_vport *);
void lpfc_disc_start(struct lpfc_vport *);
@@ -248,6 +247,7 @@ irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id);
int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
uint32_t len);
@@ -664,7 +664,7 @@ extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
extern unsigned long lpfc_no_hba_reset[];
-extern int lpfc_acqe_cgn_frequency;
+extern unsigned char lpfc_acqe_cgn_frequency;
extern int lpfc_fabric_cgn_frequency;
extern int lpfc_use_cgn_signal;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6a15f879e517..a3c8550e9985 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -5205,14 +5205,9 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
*
* This routine is the completion callback function to the Logout (LOGO)
* Accept (ACC) Response ELS command. This routine is invoked to indicate
- * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
- * release the ndlp if it has the last reference remaining (reference count
- * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp
- * field to NULL to inform the following lpfc_els_free_iocb() routine no
- * ndlp reference count needs to be decremented. Otherwise, the ndlp
- * reference use-count shall be decremented by the lpfc_els_free_iocb()
- * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
- * IOCB data structure.
+ * the completion of the LOGO process. If the node has transitioned to NPR,
+ * this routine unregisters the RPI if it is still registered. The
+ * lpfc_els_free_iocb() is invoked to release the IOCB data structure.
**/
static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
@@ -5253,19 +5248,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
goto out;
- /* NPort Recovery mode or node is just allocated */
- if (!lpfc_nlp_not_used(ndlp)) {
- /* A LOGO is completing and the node is in NPR state.
- * Just unregister the RPI because the node is still
- * required.
- */
+ if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
lpfc_unreg_rpi(vport, ndlp);
- } else {
- /* Indicate the node has already released, should
- * not reference to it from within lpfc_els_free_iocb.
- */
- cmdiocb->ndlp = NULL;
- }
+
}
out:
/*
@@ -5285,9 +5270,8 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* RPI (Remote Port Index) mailbox command to the @phba. It simply releases
* the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
* decrements the ndlp reference count held for this completion callback
- * function. After that, it invokes the lpfc_nlp_not_used() to check
- * whether there is only one reference left on the ndlp. If so, it will
- * perform one more decrement and trigger the release of the ndlp.
+ * function. After that, it invokes the lpfc_drop_node to check
+ * whether it is appropriate to release the node.
**/
void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 5ba3a9ad9501..67bfdddb897c 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4333,13 +4333,14 @@ out:
/* If the node is not registered with the scsi or nvme
* transport, remove the fabric node. The failed reg_login
- * is terminal.
+ * is terminal and forces the removal of the last node
+ * reference.
*/
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(&ndlp->lock);
- lpfc_nlp_not_used(ndlp);
+ lpfc_nlp_put(ndlp);
}
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
@@ -6704,25 +6705,6 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
}
-/* This routine free's the specified nodelist if it is not in use
- * by any other discovery thread. This routine returns 1 if the
- * ndlp has been freed. A return value of 0 indicates the ndlp is
- * not yet been released.
- */
-int
-lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
-{
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node not used: did:x%x flg:x%x refcnt:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref));
-
- if (kref_read(&ndlp->kref) == 1)
- if (lpfc_nlp_put(ndlp))
- return 1;
- return 0;
-}
-
/**
* lpfc_fcf_inuse - Check if FCF can be unregistered.
* @phba: Pointer to hba context object.
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 58fa39c403a0..082f8a109e55 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -536,9 +536,9 @@ struct sli4_wcqe_xri_aborted {
/* completion queue entry structure for rqe completion */
struct lpfc_rcqe {
uint32_t word0;
-#define lpfc_rcqe_bindex_SHIFT 16
-#define lpfc_rcqe_bindex_MASK 0x0000FFF
-#define lpfc_rcqe_bindex_WORD word0
+#define lpfc_rcqe_iv_SHIFT 31
+#define lpfc_rcqe_iv_MASK 0x00000001
+#define lpfc_rcqe_iv_WORD word0
#define lpfc_rcqe_status_SHIFT 8
#define lpfc_rcqe_status_MASK 0x000000FF
#define lpfc_rcqe_status_WORD word0
@@ -546,6 +546,7 @@ struct lpfc_rcqe {
#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
+#define FC_STATUS_RQ_DMA_FAILURE 0x14 /* DMA failure */
uint32_t word1;
#define lpfc_rcqe_fcf_id_v1_SHIFT 0
#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F
@@ -4813,8 +4814,8 @@ struct cmf_sync_wqe {
#define cmf_sync_cqid_WORD word11
uint32_t read_bytes;
uint32_t word13;
-#define cmf_sync_period_SHIFT 16
-#define cmf_sync_period_MASK 0x0000ffff
+#define cmf_sync_period_SHIFT 24
+#define cmf_sync_period_MASK 0x000000ff
#define cmf_sync_period_WORD word13
uint32_t word14;
uint32_t word15;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 867b4c788f08..088bd75fb5d7 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1279,7 +1279,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
/*
* lpfc_idle_stat_delay_work - idle_stat tracking
*
- * This routine tracks per-cq idle_stat and determines polling decisions.
+ * This routine tracks per-eq idle_stat and determines polling decisions.
*
* Return codes:
* None
@@ -1290,7 +1290,7 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
struct lpfc_hba *phba = container_of(to_delayed_work(work),
struct lpfc_hba,
idle_stat_delay_work);
- struct lpfc_queue *cq;
+ struct lpfc_queue *eq;
struct lpfc_sli4_hdw_queue *hdwq;
struct lpfc_idle_stat *idle_stat;
u32 i, idle_percent;
@@ -1306,10 +1306,10 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
for_each_present_cpu(i) {
hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
- cq = hdwq->io_cq;
+ eq = hdwq->hba_eq;
- /* Skip if we've already handled this cq's primary CPU */
- if (cq->chann != i)
+ /* Skip if we've already handled this eq's primary CPU */
+ if (eq->chann != i)
continue;
idle_stat = &phba->sli4_hba.idle_stat[i];
@@ -1333,9 +1333,9 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
idle_percent = 100 - idle_percent;
if (idle_percent < 15)
- cq->poll_mode = LPFC_QUEUE_WORK;
+ eq->poll_mode = LPFC_QUEUE_WORK;
else
- cq->poll_mode = LPFC_IRQ_POLL;
+ eq->poll_mode = LPFC_THREADED_IRQ;
idle_stat->prev_idle = wall_idle;
idle_stat->prev_wall = wall;
@@ -4357,6 +4357,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_io_buf *lpfc_cmd;
int idx, cnt;
+ unsigned long iflags;
qp = phba->sli4_hba.hdwq;
cnt = 0;
@@ -4371,12 +4372,13 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
lpfc_cmd->hdwq_no = idx;
lpfc_cmd->hdwq = qp;
lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
- spin_lock(&qp->io_buf_list_put_lock);
+ spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags);
list_add_tail(&lpfc_cmd->list,
&qp->lpfc_io_buf_list_put);
qp->put_io_bufs++;
qp->total_io_bufs++;
- spin_unlock(&qp->io_buf_list_put_lock);
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
+ iflags);
}
}
return cnt;
@@ -13117,8 +13119,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
}
eqhdl->irq = rc;
- rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0,
- name, eqhdl);
+ rc = request_threaded_irq(eqhdl->irq,
+ &lpfc_sli4_hba_intr_handler,
+ &lpfc_sli4_hba_intr_handler_th,
+ IRQF_ONESHOT, name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index adda70423c77..82730a89ecb5 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1893,38 +1893,38 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
pnvme_rport->port_id,
pnvme_fcreq);
- /* If the hba is getting reset, this flag is set. It is
- * cleared when the reset is complete and rings reestablished.
- */
- spin_lock_irqsave(&phba->hbalock, flags);
- /* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_IOQ_FLUSH) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "6139 Driver in reset cleanup - flushing "
- "NVME Req now. hba_flag x%x\n",
- phba->hba_flag);
- return;
- }
-
lpfc_nbuf = freqpriv->nvme_buf;
if (!lpfc_nbuf) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6140 NVME IO req has no matching lpfc nvme "
"io buffer. Skipping abort req.\n");
return;
} else if (!lpfc_nbuf->nvmeCmd) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6141 lpfc NVME IO req has no nvme_fcreq "
"io buffer. Skipping abort req.\n");
return;
}
- nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
/* Guard against IO completion being called at same time */
- spin_lock(&lpfc_nbuf->buf_lock);
+ spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags);
+
+ /* If the hba is getting reset, this flag is set. It is
+ * cleared when the reset is complete and rings reestablished.
+ */
+ spin_lock(&phba->hbalock);
+ /* driver queued commands are in process of being flushed */
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6139 Driver in reset cleanup - flushing "
+ "NVME Req now. hba_flag x%x\n",
+ phba->hba_flag);
+ return;
+ }
+
+ nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
/*
* The lpfc_nbuf and the mapped nvme_fcreq in the driver's
@@ -1971,8 +1971,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
lpfc_nvme_abort_fcreq_cmpl);
- spin_unlock(&lpfc_nbuf->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
/* Make sure HBA is alive */
lpfc_issue_hb_tmo(phba);
@@ -1998,8 +1998,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
out_unlock:
- spin_unlock(&lpfc_nbuf->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index e989f130434e..49aa86c477c6 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4273,7 +4273,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
"x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
cmd->result, *lp, *(lp + 3),
- (u64)scsi_get_lba(cmd),
+ (cmd->device->sector_size) ?
+ (u64)scsi_get_lba(cmd) : 0,
cmd->retries, scsi_get_resid(cmd));
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8693578888f1..22708f66be64 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -82,7 +82,8 @@ static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
int);
static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
struct lpfc_queue *eq,
- struct lpfc_eqe *eqe);
+ struct lpfc_eqe *eqe,
+ enum lpfc_poll_mode poll_mode);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
@@ -629,7 +630,7 @@ lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
static int
lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
- uint8_t rearm)
+ u8 rearm, enum lpfc_poll_mode poll_mode)
{
struct lpfc_eqe *eqe;
int count = 0, consumed = 0;
@@ -639,7 +640,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
eqe = lpfc_sli4_eq_get(eq);
while (eqe) {
- lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
+ lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode);
__lpfc_sli4_consume_eqe(phba, eq, eqe);
consumed++;
@@ -1931,7 +1932,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
unsigned long iflags;
u32 ret_val;
u32 atot, wtot, max;
- u16 warn_sync_period = 0;
+ u8 warn_sync_period = 0;
/* First address any alarm / warning activity */
atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
@@ -7957,7 +7958,7 @@ out_rdf:
* lpfc_init_idle_stat_hb - Initialize idle_stat tracking
* @phba: pointer to lpfc hba data structure.
*
- * This routine initializes the per-cq idle_stat to dynamically dictate
+ * This routine initializes the per-eq idle_stat to dynamically dictate
* polling decisions.
*
* Return codes:
@@ -7967,16 +7968,16 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
{
int i;
struct lpfc_sli4_hdw_queue *hdwq;
- struct lpfc_queue *cq;
+ struct lpfc_queue *eq;
struct lpfc_idle_stat *idle_stat;
u64 wall;
for_each_present_cpu(i) {
hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
- cq = hdwq->io_cq;
+ eq = hdwq->hba_eq;
- /* Skip if we've already handled this cq's primary CPU */
- if (cq->chann != i)
+ /* Skip if we've already handled this eq's primary CPU */
+ if (eq->chann != i)
continue;
idle_stat = &phba->sli4_hba.idle_stat[i];
@@ -7985,13 +7986,14 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
idle_stat->prev_wall = wall;
if (phba->nvmet_support ||
- phba->cmf_active_mode != LPFC_CFG_OFF)
- cq->poll_mode = LPFC_QUEUE_WORK;
+ phba->cmf_active_mode != LPFC_CFG_OFF ||
+ phba->intr_type != MSIX)
+ eq->poll_mode = LPFC_QUEUE_WORK;
else
- cq->poll_mode = LPFC_IRQ_POLL;
+ eq->poll_mode = LPFC_THREADED_IRQ;
}
- if (!phba->nvmet_support)
+ if (!phba->nvmet_support && phba->intr_type == MSIX)
schedule_delayed_work(&phba->idle_stat_delay_work,
msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
}
@@ -9218,7 +9220,8 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (mbox_pending)
/* process and rearm the EQ */
- lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
+ lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
+ LPFC_QUEUE_WORK);
else
/* Always clear and re-arm the EQ */
sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
@@ -11254,7 +11257,8 @@ inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
* will be handled through a sched from polling timer
* function which is currently triggered every 1msec.
*/
- lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
+ lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM,
+ LPFC_QUEUE_WORK);
}
/**
@@ -14682,6 +14686,38 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
+ case FC_STATUS_RQ_DMA_FAILURE:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2564 RQE DMA Error x%x, x%08x x%08x x%08x "
+ "x%08x\n",
+ status, rcqe->word0, rcqe->word1,
+ rcqe->word2, rcqe->word3);
+
+ /* If IV set, no further recovery */
+ if (bf_get(lpfc_rcqe_iv, rcqe))
+ break;
+
+ /* recycle consumed resource */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
+ dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
+ if (!dma_buf) {
+ hrq->RQ_no_buf_found++;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ break;
+ }
+ hrq->RQ_rcv_buf++;
+ hrq->RQ_buf_posted--;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_in_buf_free(phba, &dma_buf->dbuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2565 Unexpected RQE Status x%x, w0-3 x%08x "
+ "x%08x x%08x x%08x\n",
+ status, rcqe->word0, rcqe->word1,
+ rcqe->word2, rcqe->word3);
+ break;
}
out:
return workposted;
@@ -14803,7 +14839,6 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
* @cq: Pointer to CQ to be processed
* @handler: Routine to process each cqe
* @delay: Pointer to usdelay to set in case of rescheduling of the handler
- * @poll_mode: Polling mode we were called from
*
* This routine processes completion queue entries in a CQ. While a valid
* queue element is found, the handler is called. During processing checks
@@ -14821,8 +14856,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
static bool
__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
- struct lpfc_cqe *), unsigned long *delay,
- enum lpfc_poll_mode poll_mode)
+ struct lpfc_cqe *), unsigned long *delay)
{
struct lpfc_cqe *cqe;
bool workposted = false;
@@ -14863,10 +14897,6 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
arm = false;
}
- /* Note: complete the irq_poll softirq before rearming CQ */
- if (poll_mode == LPFC_IRQ_POLL)
- irq_poll_complete(&cq->iop);
-
/* Track the max number of CQEs processed in 1 EQ */
if (count > cq->CQ_max_cqe)
cq->CQ_max_cqe = count;
@@ -14916,17 +14946,17 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
case LPFC_MCQ:
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_sp_handle_mcqe,
- &delay, LPFC_QUEUE_WORK);
+ &delay);
break;
case LPFC_WCQ:
if (cq->subtype == LPFC_IO)
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_fp_handle_cqe,
- &delay, LPFC_QUEUE_WORK);
+ &delay);
else
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_sp_handle_cqe,
- &delay, LPFC_QUEUE_WORK);
+ &delay);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -15203,6 +15233,38 @@ drop:
hrq->RQ_no_posted_buf++;
/* Post more buffers if possible */
break;
+ case FC_STATUS_RQ_DMA_FAILURE:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2575 RQE DMA Error x%x, x%08x x%08x x%08x "
+ "x%08x\n",
+ status, rcqe->word0, rcqe->word1,
+ rcqe->word2, rcqe->word3);
+
+ /* If IV set, no further recovery */
+ if (bf_get(lpfc_rcqe_iv, rcqe))
+ break;
+
+ /* recycle consumed resource */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
+ dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
+ if (!dma_buf) {
+ hrq->RQ_no_buf_found++;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ break;
+ }
+ hrq->RQ_rcv_buf++;
+ hrq->RQ_buf_posted--;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_rq_buf_free(phba, &dma_buf->hbuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2576 Unexpected RQE Status x%x, w0-3 x%08x "
+ "x%08x x%08x x%08x\n",
+ status, rcqe->word0, rcqe->word1,
+ rcqe->word2, rcqe->word3);
+ break;
}
out:
return workposted;
@@ -15271,45 +15333,64 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
}
/**
- * lpfc_sli4_sched_cq_work - Schedules cq work
- * @phba: Pointer to HBA context object.
- * @cq: Pointer to CQ
- * @cqid: CQ ID
- *
- * This routine checks the poll mode of the CQ corresponding to
- * cq->chann, then either schedules a softirq or queue_work to complete
- * cq work.
+ * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
+ * @cq: Pointer to CQ to be processed
*
- * queue_work path is taken if in NVMET mode, or if poll_mode is in
- * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
+ * This routine calls the cq processing routine with the handler for
+ * fast path CQEs.
*
+ * The CQ routine returns two values: the first is the calling status,
+ * which indicates whether work was queued to the background discovery
+ * thread. If true, the routine should wakeup the discovery thread;
+ * the second is the delay parameter. If non-zero, rather than rearming
+ * the CQ and yet another interrupt, the CQ handler should be queued so
+ * that it is processed in a subsequent polling action. The value of
+ * the delay indicates when to reschedule it.
**/
-static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
- struct lpfc_queue *cq, uint16_t cqid)
+static void
+__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
{
- int ret = 0;
+ struct lpfc_hba *phba = cq->phba;
+ unsigned long delay;
+ bool workposted = false;
+ int ret;
- switch (cq->poll_mode) {
- case LPFC_IRQ_POLL:
- /* CGN mgmt is mutually exclusive from softirq processing */
- if (phba->cmf_active_mode == LPFC_CFG_OFF) {
- irq_poll_sched(&cq->iop);
- break;
- }
- fallthrough;
- case LPFC_QUEUE_WORK:
- default:
+ /* process and rearm the CQ */
+ workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
+ &delay);
+
+ if (delay) {
if (is_kdump_kernel())
- ret = queue_work(phba->wq, &cq->irqwork);
+ ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
+ delay);
else
- ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
+ ret = queue_delayed_work_on(cq->chann, phba->wq,
+ &cq->sched_irqwork, delay);
if (!ret)
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0383 Cannot schedule queue work "
- "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
- cqid, cq->queue_id,
- raw_smp_processor_id());
+ "0367 Cannot schedule queue work "
+ "for cqid=%d on CPU %d\n",
+ cq->queue_id, cq->chann);
}
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_hba_process_cq - fast-path work handler when started by
+ * interrupt
+ * @work: pointer to work element
+ *
+ * translates from the work handler and calls the fast-path handler.
+ **/
+static void
+lpfc_sli4_hba_process_cq(struct work_struct *work)
+{
+ struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
+
+ __lpfc_sli4_hba_process_cq(cq);
}
/**
@@ -15317,6 +15398,7 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
* @phba: Pointer to HBA context object.
* @eq: Pointer to the queue structure.
* @eqe: Pointer to fast-path event queue entry.
+ * @poll_mode: poll_mode to execute processing the cq.
*
* This routine process a event queue entry from the fast-path event queue.
* It will check the MajorCode and MinorCode to determine this is for a
@@ -15327,11 +15409,12 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
**/
static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
- struct lpfc_eqe *eqe)
+ struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode)
{
struct lpfc_queue *cq = NULL;
uint32_t qidx = eq->hdwq;
uint16_t cqid, id;
+ int ret;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -15391,70 +15474,25 @@ work_cq:
else
cq->isr_timestamp = 0;
#endif
- lpfc_sli4_sched_cq_work(phba, cq, cqid);
-}
-/**
- * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
- * @cq: Pointer to CQ to be processed
- * @poll_mode: Enum lpfc_poll_state to determine poll mode
- *
- * This routine calls the cq processing routine with the handler for
- * fast path CQEs.
- *
- * The CQ routine returns two values: the first is the calling status,
- * which indicates whether work was queued to the background discovery
- * thread. If true, the routine should wakeup the discovery thread;
- * the second is the delay parameter. If non-zero, rather than rearming
- * the CQ and yet another interrupt, the CQ handler should be queued so
- * that it is processed in a subsequent polling action. The value of
- * the delay indicates when to reschedule it.
- **/
-static void
-__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
- enum lpfc_poll_mode poll_mode)
-{
- struct lpfc_hba *phba = cq->phba;
- unsigned long delay;
- bool workposted = false;
- int ret = 0;
-
- /* process and rearm the CQ */
- workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
- &delay, poll_mode);
-
- if (delay) {
+ switch (poll_mode) {
+ case LPFC_THREADED_IRQ:
+ __lpfc_sli4_hba_process_cq(cq);
+ break;
+ case LPFC_QUEUE_WORK:
+ default:
if (is_kdump_kernel())
- ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
- delay);
+ ret = queue_work(phba->wq, &cq->irqwork);
else
- ret = queue_delayed_work_on(cq->chann, phba->wq,
- &cq->sched_irqwork, delay);
+ ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
if (!ret)
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0367 Cannot schedule queue work "
- "for cqid=%d on CPU %d\n",
- cq->queue_id, cq->chann);
+ "0383 Cannot schedule queue work "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id,
+ raw_smp_processor_id());
+ break;
}
-
- /* wake up worker thread if there are works to be done */
- if (workposted)
- lpfc_worker_wake_up(phba);
-}
-
-/**
- * lpfc_sli4_hba_process_cq - fast-path work handler when started by
- * interrupt
- * @work: pointer to work element
- *
- * translates from the work handler and calls the fast-path handler.
- **/
-static void
-lpfc_sli4_hba_process_cq(struct work_struct *work)
-{
- struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
-
- __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
}
/**
@@ -15469,7 +15507,7 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
struct lpfc_queue *cq = container_of(to_delayed_work(work),
struct lpfc_queue, sched_irqwork);
- __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
+ __lpfc_sli4_hba_process_cq(cq);
}
/**
@@ -15495,8 +15533,9 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
* and returns for these events. This function is called without any lock
* held. It gets the hbalock to access and update SLI data structures.
*
- * This function returns IRQ_HANDLED when interrupt is handled else it
- * returns IRQ_NONE.
+ * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD
+ * when interrupt is scheduled to be handled from a threaded irq context, or
+ * else returns IRQ_NONE.
**/
irqreturn_t
lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
@@ -15505,8 +15544,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
struct lpfc_hba_eq_hdl *hba_eq_hdl;
struct lpfc_queue *fpeq;
unsigned long iflag;
- int ecount = 0;
int hba_eqidx;
+ int ecount = 0;
struct lpfc_eq_intr_info *eqi;
/* Get the driver's phba structure from the dev_id */
@@ -15535,30 +15574,41 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
- eqi->icnt++;
-
- fpeq->last_cpu = raw_smp_processor_id();
+ switch (fpeq->poll_mode) {
+ case LPFC_THREADED_IRQ:
+ /* CGN mgmt is mutually exclusive from irq processing */
+ if (phba->cmf_active_mode == LPFC_CFG_OFF)
+ return IRQ_WAKE_THREAD;
+ fallthrough;
+ case LPFC_QUEUE_WORK:
+ default:
+ eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
+ eqi->icnt++;
- if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
- fpeq->q_flag & HBA_EQ_DELAY_CHK &&
- phba->cfg_auto_imax &&
- fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
- phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
- lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
+ fpeq->last_cpu = raw_smp_processor_id();
- /* process and rearm the EQ */
- ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
+ if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
+ fpeq->q_flag & HBA_EQ_DELAY_CHK &&
+ phba->cfg_auto_imax &&
+ fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
+ phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
+ lpfc_sli4_mod_hba_eq_delay(phba, fpeq,
+ LPFC_MAX_AUTO_EQ_DELAY);
- if (unlikely(ecount == 0)) {
- fpeq->EQ_no_entry++;
- if (phba->intr_type == MSIX)
- /* MSI-X treated interrupt served as no EQ share INT */
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "0358 MSI-X interrupt with no EQE\n");
- else
- /* Non MSI-X treated on interrupt as EQ share INT */
- return IRQ_NONE;
+ /* process and rearm the EQ */
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
+ LPFC_QUEUE_WORK);
+
+ if (unlikely(ecount == 0)) {
+ fpeq->EQ_no_entry++;
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0358 MSI-X interrupt with no EQE\n");
+ else
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
}
return IRQ_HANDLED;
@@ -16115,13 +16165,69 @@ out:
return status;
}
-static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
+/**
+ * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within
+ * threaded irq context.
+ *
+ * Returns
+ * IRQ_HANDLED - interrupt is handled
+ * IRQ_NONE - otherwise
+ **/
+irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id)
{
- struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
+ struct lpfc_hba *phba;
+ struct lpfc_hba_eq_hdl *hba_eq_hdl;
+ struct lpfc_queue *fpeq;
+ int ecount = 0;
+ int hba_eqidx;
+ struct lpfc_eq_intr_info *eqi;
- __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
+ /* Get the driver's phba structure from the dev_id */
+ hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
+ phba = hba_eq_hdl->phba;
+ hba_eqidx = hba_eq_hdl->idx;
- return 1;
+ if (unlikely(!phba))
+ return IRQ_NONE;
+ if (unlikely(!phba->sli4_hba.hdwq))
+ return IRQ_NONE;
+
+ /* Get to the EQ struct associated with this vector */
+ fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
+ if (unlikely(!fpeq))
+ return IRQ_NONE;
+
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id());
+ eqi->icnt++;
+
+ fpeq->last_cpu = raw_smp_processor_id();
+
+ if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
+ fpeq->q_flag & HBA_EQ_DELAY_CHK &&
+ phba->cfg_auto_imax &&
+ fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
+ phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
+ lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
+
+ /* process and rearm the EQ */
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
+ LPFC_THREADED_IRQ);
+
+ if (unlikely(ecount == 0)) {
+ fpeq->EQ_no_entry++;
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3358 MSI-X interrupt with no EQE\n");
+ else
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
+ return IRQ_HANDLED;
}
/**
@@ -16265,8 +16371,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (cq->queue_id > phba->sli4_hba.cq_max)
phba->sli4_hba.cq_max = cq->queue_id;
-
- irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
out:
mempool_free(mbox, phba->mbox_mem_pool);
return status;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 2a0864e6d7cd..2541a8fba093 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -140,7 +140,7 @@ struct lpfc_rqb {
enum lpfc_poll_mode {
LPFC_QUEUE_WORK,
- LPFC_IRQ_POLL
+ LPFC_THREADED_IRQ,
};
struct lpfc_idle_stat {
@@ -279,8 +279,6 @@ struct lpfc_queue {
struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */
-#define LPFC_IRQ_POLL_WEIGHT 256
- struct irq_poll iop;
enum lpfc_poll_mode poll_mode;
};
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c97411b0992e..5fda8ac6b883 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.11"
+#define LPFC_DRIVER_VERSION "14.2.0.12"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 63bac3684c19..3554f6b07727 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1722,11 +1722,9 @@ struct megasas_sge_skinny {
} __packed;
union megasas_sgl {
-
- struct megasas_sge32 sge32[1];
- struct megasas_sge64 sge64[1];
- struct megasas_sge_skinny sge_skinny[1];
-
+ DECLARE_FLEX_ARRAY(struct megasas_sge32, sge32);
+ DECLARE_FLEX_ARRAY(struct megasas_sge64, sge64);
+ DECLARE_FLEX_ARRAY(struct megasas_sge_skinny, sge_skinny);
} __attribute__ ((packed));
struct megasas_header {
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 4d84d5bd173f..82b55e955730 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -2058,7 +2058,7 @@ int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
sas_expander = kzalloc(sizeof(struct mpi3mr_sas_node),
GFP_KERNEL);
if (!sas_expander)
- return -1;
+ return -ENOMEM;
sas_expander->handle = handle;
sas_expander->num_phys = expander_pg0.num_phys;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 8b9490011e36..2e886c1d867d 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -43,7 +43,8 @@
#include "pm8001_chips.h"
#include "pm80xx_hwi.h"
-static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING;
+static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING |
+ PM8001_EVENT_LOGGING | PM8001_INIT_LOGGING;
module_param(logging_level, ulong, 0644);
MODULE_PARM_DESC(logging_level, " bits for enabling logging info.");
@@ -666,7 +667,7 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
* Currently we just set the fixed SAS address to our HBA, for manufacture,
* it should read from the EEPROM
*/
-static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
{
u8 i, j;
u8 sas_add[8];
@@ -679,6 +680,12 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
struct pm8001_ioctl_payload payload;
u16 deviceid;
int rc;
+ unsigned long time_remaining;
+
+ if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
+ pm8001_dbg(pm8001_ha, FAIL, "controller is in fatal error state\n");
+ return -EIO;
+ }
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
pm8001_ha->nvmd_completion = &completion;
@@ -703,16 +710,23 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
payload.offset = 0;
payload.func_specific = kzalloc(payload.rd_length, GFP_KERNEL);
if (!payload.func_specific) {
- pm8001_dbg(pm8001_ha, INIT, "mem alloc fail\n");
- return;
+ pm8001_dbg(pm8001_ha, FAIL, "mem alloc fail\n");
+ return -ENOMEM;
}
rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
if (rc) {
kfree(payload.func_specific);
- pm8001_dbg(pm8001_ha, INIT, "nvmd failed\n");
- return;
+ pm8001_dbg(pm8001_ha, FAIL, "nvmd failed\n");
+ return -EIO;
+ }
+ time_remaining = wait_for_completion_timeout(&completion,
+ msecs_to_jiffies(60*1000)); // 1 min
+ if (!time_remaining) {
+ kfree(payload.func_specific);
+ pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n");
+ return -EIO;
}
- wait_for_completion(&completion);
+
for (i = 0, j = 0; i <= 7; i++, j++) {
if (pm8001_ha->chip_id == chip_8001) {
@@ -751,6 +765,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
SAS_ADDR_SIZE);
#endif
+ return 0;
}
/*
@@ -1166,7 +1181,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
pm80xx_set_thermal_config(pm8001_ha);
}
- pm8001_init_sas_add(pm8001_ha);
+ if (pm8001_init_sas_add(pm8001_ha))
+ goto err_out_shost;
/* phy setting support for motherboard controller */
rc = pm8001_configure_phy_settings(pm8001_ha);
if (rc)
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index dc1f4d958e03..953572fc0d9e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -71,6 +71,7 @@
#define PM8001_DEV_LOGGING 0x80 /* development message logging */
#define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */
#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */
+#define PM8001_EVENT_LOGGING 0x400 /* HW event logging */
#define pm8001_info(HBA, fmt, ...) \
pr_info("%s:: %s %d: " fmt, \
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 9584cadc4201..39a12ee94a72 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -3239,9 +3239,9 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_port *port = &pm8001_ha->port[port_id];
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- pm8001_dbg(pm8001_ha, DEVIO,
- "port id %d, phy id %d link_rate %d portstate 0x%x\n",
- port_id, phy_id, link_rate, portstate);
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_SATA_PHY_UP phyid:%#x port_id:%#x link_rate:%d portstate:%#x\n",
+ phy_id, port_id, link_rate, portstate);
phy->port = port;
port->port_id = port_id;
@@ -3291,10 +3291,14 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
phy->phy_attached = 0;
switch (portstate) {
case PORT_VALID:
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_VALID\n",
+ phy_id, port_id);
break;
case PORT_INVALID:
- pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n",
- port_id);
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_INVALID\n",
+ phy_id, port_id);
pm8001_dbg(pm8001_ha, MSG,
" Last phy Down and port invalid\n");
if (port_sata) {
@@ -3306,18 +3310,21 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
sas_phy_disconnected(&phy->sas_phy);
break;
case PORT_IN_RESET:
- pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n",
- port_id);
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_IN_RESET\n",
+ phy_id, port_id);
break;
case PORT_NOT_ESTABLISHED:
- pm8001_dbg(pm8001_ha, MSG,
- " Phy Down and PORT_NOT_ESTABLISHED\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_NOT_ESTABLISHED\n",
+ phy_id, port_id);
port->port_attached = 0;
break;
case PORT_LOSTCOMM:
- pm8001_dbg(pm8001_ha, MSG, " Phy Down and PORT_LOSTCOMM\n");
- pm8001_dbg(pm8001_ha, MSG,
- " Last phy Down and port invalid\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_LOSTCOMM\n",
+ phy_id, port_id);
+ pm8001_dbg(pm8001_ha, MSG, " Last phy Down and port invalid\n");
if (port_sata) {
port->port_attached = 0;
phy->phy_type = 0;
@@ -3328,9 +3335,9 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- pm8001_dbg(pm8001_ha, DEVIO,
- " Phy Down and(default) = 0x%x\n",
- portstate);
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate:%#x\n",
+ phy_id, port_id, portstate);
break;
}
@@ -3410,6 +3417,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
u8 phy_id =
(u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+ u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
u16 eventType =
(u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
u8 status =
@@ -3425,26 +3433,29 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
switch (eventType) {
case HW_EVENT_SAS_PHY_UP:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_SAS_PHY_UP phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
hw_event_sas_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_PHY_UP:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n");
hw_event_sata_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_SPINUP_HOLD:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_SATA_SPINUP_HOLD phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD,
GFP_ATOMIC);
break;
case HW_EVENT_PHY_DOWN:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
hw_event_phy_down(pm8001_ha, piomb);
- phy->phy_attached = 0;
phy->phy_state = PHY_LINK_DISABLE;
break;
case HW_EVENT_PORT_INVALID:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_INVALID phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
@@ -3463,7 +3474,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
GFP_ATOMIC);
break;
case HW_EVENT_PHY_ERROR:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_ERROR phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
sas_phy_disconnected(&phy->sas_phy);
phy->phy_attached = 0;
sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC);
@@ -3477,34 +3490,39 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_INVALID_DWORD:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_INVALID_DWORD\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_INVALID_DWORD phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_DISPARITY_ERROR phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_DISPARITY_ERROR,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_CODE_VIOLATION:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_CODE_VIOLATION\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_CODE_VIOLATION phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_CODE_VIOLATION,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_MALFUNCTION:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_MALFUNCTION phyid:%#x\n", phy_id);
break;
case HW_EVENT_BROADCAST_SES:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n");
@@ -3515,25 +3533,30 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
GFP_ATOMIC);
break;
case HW_EVENT_INBOUND_CRC_ERROR:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_INBOUND_CRC_ERROR phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_INBOUND_CRC_ERROR,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_HARD_RESET_RECEIVED:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_HARD_RESET_RECEIVED phyid:%#x\n", phy_id);
sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC);
break;
case HW_EVENT_ID_FRAME_TIMEOUT:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_ID_FRAME_TIMEOUT phyid:%#x\n", phy_id);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_PHY_RESET_FAILED phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
port_id, phy_id, 0, 0);
@@ -3543,13 +3566,16 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
GFP_ATOMIC);
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_RESET_TIMER_TMO phyid:%#x port_id:%#x portstate:%#x\n",
+ phy_id, port_id, portstate);
if (!pm8001_ha->phy[phy_id].reset_completion) {
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
port_id, phy_id, 0, 0);
}
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
+ port->port_state = portstate;
sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
GFP_ATOMIC);
if (pm8001_ha->phy[phy_id].reset_completion) {
@@ -3560,8 +3586,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_RECOVERY_TIMER_TMO phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_PORT_RECOVERY_TIMER_TMO,
port_id, phy_id, 0, 0);
@@ -3575,24 +3602,32 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case HW_EVENT_PORT_RECOVER:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_RECOVER phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
hw_event_port_recover(pm8001_ha, piomb);
break;
case HW_EVENT_PORT_RESET_COMPLETE:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_RESET_COMPLETE phyid:%#x port_id:%#x portstate:%#x\n",
+ phy_id, port_id, portstate);
if (pm8001_ha->phy[phy_id].reset_completion) {
pm8001_ha->phy[phy_id].port_reset_status =
PORT_RESET_SUCCESS;
complete(pm8001_ha->phy[phy_id].reset_completion);
pm8001_ha->phy[phy_id].reset_completion = NULL;
}
+ phy->phy_attached = 1;
+ phy->phy_state = PHY_STATE_LINK_UP_SPCV;
+ port->port_state = portstate;
break;
case EVENT_BROADCAST_ASYNCH_EVENT:
pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n");
break;
default:
- pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type 0x%x\n",
- eventType);
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "Unknown event portid:%d phyid:%d event:0x%x status:0x%x\n",
+ port_id, phy_id, eventType, status);
break;
}
return 0;
@@ -4726,6 +4761,9 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
+ pm8001_dbg(pm8001_ha, INIT,
+ "register device req phy_id 0x%x port_id 0x%x\n", phy_id,
+ (port->port_id & 0xFF));
rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
@@ -4815,7 +4853,7 @@ static void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag);
payload.ppc_phyid =
cpu_to_le32(((operation & 0xF) << 8) | (phyid & 0xFF));
- pm8001_dbg(pm8001_ha, INIT,
+ pm8001_dbg(pm8001_ha, DISC,
" phy profile command for phy %x ,length is %d\n",
le32_to_cpu(payload.ppc_phyid), length);
for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 3b64de81ea0d..2a31ddc99dde 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3041,9 +3041,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
* addresses of our queues
*/
if (!qedf->p_cpuq) {
- status = -EINVAL;
QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
- goto mem_alloc_failure;
+ return -EINVAL;
}
qedf->global_queues = kzalloc((sizeof(struct global_queue *)
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 70cfc94c3d43..b00222459607 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2750,6 +2750,7 @@ static void
qla2x00_terminate_rport_io(struct fc_rport *rport)
{
fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+ scsi_qla_host_t *vha;
if (!fcport)
return;
@@ -2759,9 +2760,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
return;
+ vha = fcport->vha;
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+ qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
+ 0, WAIT_TARGET);
return;
}
/*
@@ -2786,6 +2790,15 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
qla2x00_port_logout(fcport->vha, fcport);
}
}
+
+ /* check for any straggling io left behind */
+ if (qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET)) {
+ ql_log(ql_log_warn, vha, 0x300b,
+ "IO not return. Resetting. \n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_chip_reset(vha);
+ }
}
static int
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index df5e5b7fdcfe..dfee3b41bdf1 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -465,6 +465,15 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id)
return res;
}
+struct tmf_arg {
+ struct qla_qpair *qpair;
+ struct fc_port *fcport;
+ struct scsi_qla_host *vha;
+ u64 lun;
+ u32 flags;
+ uint8_t modifier;
+};
+
struct els_logo_payload {
uint8_t opcode;
uint8_t rsvd[3];
@@ -544,6 +553,10 @@ struct srb_iocb {
uint32_t data;
struct completion comp;
__le16 comp_status;
+
+ uint8_t modifier;
+ uint8_t vp_index;
+ uint16_t loop_id;
} tmf;
struct {
#define SRB_FXDISC_REQ_DMA_VALID BIT_0
@@ -647,6 +660,7 @@ struct srb_iocb {
#define SRB_SA_UPDATE 25
#define SRB_ELS_CMD_HST_NOLOGIN 26
#define SRB_SA_REPLACE 27
+#define SRB_MARKER 28
struct qla_els_pt_arg {
u8 els_opcode;
@@ -2528,6 +2542,7 @@ enum rscn_addr_format {
typedef struct fc_port {
struct list_head list;
struct scsi_qla_host *vha;
+ struct list_head tmf_pending;
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
@@ -2548,6 +2563,8 @@ typedef struct fc_port {
unsigned int do_prli_nvme:1;
uint8_t nvme_flag;
+ uint8_t active_tmf;
+#define MAX_ACTIVE_TMF 8
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
@@ -5498,4 +5515,8 @@ struct ql_vnd_tgt_stats_resp {
_fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \
_fp->flags
+#define TMF_NOT_READY(_fcport) \
+ (!_fcport || IS_SESSION_DELETED(_fcport) || atomic_read(&_fcport->state) != FCS_ONLINE || \
+ !_fcport->vha->hw->flags.fw_started)
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 391c8b3623a6..ba7831f24734 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -69,7 +69,7 @@ extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
+extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint64_t, uint32_t);
struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ec0423ec6681..a2d48d6b1dfc 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1996,6 +1996,11 @@ qla2x00_tmf_iocb_timeout(void *data)
int rc, h;
unsigned long flags;
+ if (sp->type == SRB_MARKER) {
+ complete(&tmf->u.tmf.comp);
+ return;
+ }
+
rc = qla24xx_async_abort_cmd(sp, false);
if (rc) {
spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
@@ -2013,24 +2018,131 @@ qla2x00_tmf_iocb_timeout(void *data)
}
}
+static void qla_marker_sp_done(srb_t *sp, int res)
+{
+ struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+ if (res != QLA_SUCCESS)
+ ql_dbg(ql_dbg_taskm, sp->vha, 0x8004,
+ "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n",
+ sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags,
+ sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id);
+
+ sp->u.iocb_cmd.u.tmf.data = res;
+ complete(&tmf->u.tmf.comp);
+}
+
+#define START_SP_W_RETRIES(_sp, _rval) \
+{\
+ int cnt = 5; \
+ do { \
+ _rval = qla2x00_start_sp(_sp); \
+ if (_rval == EAGAIN) \
+ msleep(1); \
+ else \
+ break; \
+ cnt--; \
+ } while (cnt); \
+}
+
+/**
+ * qla26xx_marker: send marker IOCB and wait for the completion of it.
+ * @arg: pointer to argument list.
+ * It is assume caller will provide an fcport pointer and modifier
+ */
+static int
+qla26xx_marker(struct tmf_arg *arg)
+{
+ struct scsi_qla_host *vha = arg->vha;
+ struct srb_iocb *tm_iocb;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+ fc_port_t *fcport = arg->fcport;
+
+ if (TMF_NOT_READY(arg->fcport)) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8039,
+ "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
+ fcport->loop_id, fcport->d_id.b24,
+ arg->modifier, arg->lun, arg->qpair->id);
+ return QLA_SUSPENDED;
+ }
+
+ /* ref: INIT */
+ sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_MARKER;
+ sp->name = "marker";
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done);
+ sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
+
+ tm_iocb = &sp->u.iocb_cmd;
+ init_completion(&tm_iocb->u.tmf.comp);
+ tm_iocb->u.tmf.modifier = arg->modifier;
+ tm_iocb->u.tmf.lun = arg->lun;
+ tm_iocb->u.tmf.loop_id = fcport->loop_id;
+ tm_iocb->u.tmf.vp_index = vha->vp_idx;
+
+ START_SP_W_RETRIES(sp, rval);
+
+ ql_dbg(ql_dbg_taskm, vha, 0x8006,
+ "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24,
+ arg->modifier, arg->lun, sp->qpair->id, rval);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x8031,
+ "Marker IOCB send failure (%x).\n", rval);
+ goto done_free_sp;
+ }
+
+ wait_for_completion(&tm_iocb->u.tmf.comp);
+ rval = tm_iocb->u.tmf.data;
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x8019,
+ "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24,
+ arg->modifier, arg->lun, sp->qpair->id, rval);
+ }
+
+done_free_sp:
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+done:
+ return rval;
+}
+
static void qla2x00_tmf_sp_done(srb_t *sp, int res)
{
struct srb_iocb *tmf = &sp->u.iocb_cmd;
+ if (res)
+ tmf->u.tmf.data = res;
complete(&tmf->u.tmf.comp);
}
-int
-qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
- uint32_t tag)
+static int
+__qla2x00_async_tm_cmd(struct tmf_arg *arg)
{
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = arg->vha;
struct srb_iocb *tm_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ fc_port_t *fcport = arg->fcport;
+
+ if (TMF_NOT_READY(arg->fcport)) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8032,
+ "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
+ fcport->loop_id, fcport->d_id.b24,
+ arg->modifier, arg->lun, arg->qpair->id);
+ return QLA_SUSPENDED;
+ }
+
/* ref: INIT */
- sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -2043,15 +2155,16 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
tm_iocb = &sp->u.iocb_cmd;
init_completion(&tm_iocb->u.tmf.comp);
- tm_iocb->u.tmf.flags = flags;
- tm_iocb->u.tmf.lun = lun;
+ tm_iocb->u.tmf.flags = arg->flags;
+ tm_iocb->u.tmf.lun = arg->lun;
+
+ START_SP_W_RETRIES(sp, rval);
ql_dbg(ql_dbg_taskm, vha, 0x802f,
- "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24,
+ arg->flags, arg->lun, sp->qpair->id, rval);
- rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
wait_for_completion(&tm_iocb->u.tmf.comp);
@@ -2063,15 +2176,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
"TM IOCB failed (%x).\n", rval);
}
- if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
- flags = tm_iocb->u.tmf.flags;
- lun = (uint16_t)tm_iocb->u.tmf.lun;
-
- /* Issue Marker IOCB */
- qla2x00_marker(vha, vha->hw->base_qpair,
- fcport->loop_id, lun,
- flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
- }
+ if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw))
+ rval = qla26xx_marker(arg);
done_free_sp:
/* ref: INIT */
@@ -2080,6 +2186,115 @@ done:
return rval;
}
+static void qla_put_tmf(fc_port_t *fcport)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ fcport->active_tmf--;
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+}
+
+static
+int qla_get_tmf(fc_port_t *fcport)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ int rc = 0;
+ LIST_HEAD(tmf_elem);
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ list_add_tail(&tmf_elem, &fcport->tmf_pending);
+
+ while (fcport->active_tmf >= MAX_ACTIVE_TMF) {
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ msleep(1);
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ if (TMF_NOT_READY(fcport)) {
+ ql_log(ql_log_warn, vha, 0x802c,
+ "Unable to acquire TM resource due to disruption.\n");
+ rc = EIO;
+ break;
+ }
+ if (fcport->active_tmf < MAX_ACTIVE_TMF &&
+ list_is_first(&tmf_elem, &fcport->tmf_pending))
+ break;
+ }
+
+ list_del(&tmf_elem);
+
+ if (!rc)
+ fcport->active_tmf++;
+
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ return rc;
+}
+
+int
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
+ uint32_t tag)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_qpair *qpair;
+ struct tmf_arg a;
+ int i, rval = QLA_SUCCESS;
+
+ if (TMF_NOT_READY(fcport))
+ return QLA_SUSPENDED;
+
+ a.vha = fcport->vha;
+ a.fcport = fcport;
+ a.lun = lun;
+ if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
+ a.modifier = MK_SYNC_ID_LUN;
+
+ if (qla_get_tmf(fcport))
+ return QLA_FUNCTION_FAILED;
+ } else {
+ a.modifier = MK_SYNC_ID;
+ }
+
+ if (vha->hw->mqenable) {
+ for (i = 0; i < vha->hw->num_qpairs; i++) {
+ qpair = vha->hw->queue_pair_map[i];
+ if (!qpair)
+ continue;
+
+ if (TMF_NOT_READY(fcport)) {
+ ql_log(ql_log_warn, vha, 0x8026,
+ "Unable to send TM due to disruption.\n");
+ rval = QLA_SUSPENDED;
+ break;
+ }
+
+ a.qpair = qpair;
+ a.flags = flags|TCF_NOTMCMD_TO_TARGET;
+ rval = __qla2x00_async_tm_cmd(&a);
+ if (rval)
+ break;
+ }
+ }
+
+ if (rval)
+ goto bailout;
+
+ a.qpair = vha->hw->base_qpair;
+ a.flags = flags;
+ rval = __qla2x00_async_tm_cmd(&a);
+
+bailout:
+ if (a.modifier == MK_SYNC_ID_LUN)
+ qla_put_tmf(fcport);
+
+ return rval;
+}
+
int
qla24xx_async_abort_command(srb_t *sp)
{
@@ -4861,7 +5076,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
if (use_tbl &&
ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES)
- strlcpy(ha->model_desc,
+ strscpy(ha->model_desc,
qla2x00_model_name[index * 2 + 1],
sizeof(ha->model_desc));
} else {
@@ -4869,14 +5084,14 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
if (use_tbl &&
ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES) {
- strlcpy(ha->model_number,
+ strscpy(ha->model_number,
qla2x00_model_name[index * 2],
sizeof(ha->model_number));
- strlcpy(ha->model_desc,
+ strscpy(ha->model_desc,
qla2x00_model_name[index * 2 + 1],
sizeof(ha->model_desc));
} else {
- strlcpy(ha->model_number, def,
+ strscpy(ha->model_number, def,
sizeof(ha->model_number));
}
}
@@ -5291,6 +5506,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
+ INIT_LIST_HEAD(&fcport->tmf_pending);
INIT_LIST_HEAD(&fcport->sess_cmd_list);
spin_lock_init(&fcport->sess_cmd_lock);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index b9b3e6f80ea9..6acfdcc48b16 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -522,21 +522,25 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
return (QLA_FUNCTION_FAILED);
}
+ mrk24 = (struct mrk_entry_24xx *)mrk;
+
mrk->entry_type = MARKER_TYPE;
mrk->modifier = type;
if (type != MK_SYNC_ALL) {
if (IS_FWI2_CAPABLE(ha)) {
- mrk24 = (struct mrk_entry_24xx *) mrk;
mrk24->nport_handle = cpu_to_le16(loop_id);
int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
mrk24->vp_index = vha->vp_idx;
- mrk24->handle = make_handle(req->id, mrk24->handle);
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16((uint16_t)lun);
}
}
+
+ if (IS_FWI2_CAPABLE(ha))
+ mrk24->handle = QLA_SKIP_HANDLE;
+
wmb();
qla2x00_start_iocbs(vha, req);
@@ -2541,7 +2545,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct srb_iocb *iocb = &sp->u.iocb_cmd;
- struct req_que *req = vha->req;
+ struct req_que *req = sp->qpair->req;
flags = iocb->u.tmf.flags;
lun = iocb->u.tmf.lun;
@@ -2557,7 +2561,8 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
tsk->port_id[2] = fcport->d_id.b.domain;
tsk->vp_index = fcport->vha->vp_idx;
- if (flags == TCF_LUN_RESET) {
+ if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET|
+ TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
int_to_scsilun(lun, &tsk->lun);
host_to_fcp_swap((uint8_t *)&tsk->lun,
sizeof(tsk->lun));
@@ -3852,9 +3857,9 @@ static int qla_get_iocbs_resource(struct srb *sp)
case SRB_NACK_LOGO:
case SRB_LOGOUT_CMD:
case SRB_CTRL_VP:
- push_it_through = true;
- fallthrough;
+ case SRB_MARKER:
default:
+ push_it_through = true;
get_exch = false;
}
@@ -3870,6 +3875,19 @@ static int qla_get_iocbs_resource(struct srb *sp)
return qla_get_fw_resources(sp->qpair, &sp->iores);
}
+static void
+qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
+{
+ mrk->entry_type = MARKER_TYPE;
+ mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier;
+ if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) {
+ mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id);
+ int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun);
+ host_to_fcp_swap(mrk->lun, sizeof(mrk->lun));
+ mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index;
+ }
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -3973,6 +3991,9 @@ qla2x00_start_sp(srb_t *sp)
case SRB_SA_REPLACE:
qla24xx_sa_replace_iocb(sp, pkt);
break;
+ case SRB_MARKER:
+ qla_marker_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 71feda2cdb63..a07c010b0843 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1862,9 +1862,9 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
}
}
-srb_t *
-qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
- struct req_que *req, void *iocb)
+static srb_t *
+qla_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+ struct req_que *req, void *iocb, u16 *ret_index)
{
struct qla_hw_data *ha = vha->hw;
sts_entry_t *pkt = iocb;
@@ -1899,12 +1899,25 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
return NULL;
}
- req->outstanding_cmds[index] = NULL;
-
+ *ret_index = index;
qla_put_fw_resources(sp->qpair, &sp->iores);
return sp;
}
+srb_t *
+qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+ struct req_que *req, void *iocb)
+{
+ uint16_t index;
+ srb_t *sp;
+
+ sp = qla_get_sp_from_handle(vha, func, req, iocb, &index);
+ if (sp)
+ req->outstanding_cmds[index] = NULL;
+
+ return sp;
+}
+
static void
qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
struct mbx_entry *mbx)
@@ -3237,13 +3250,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
- req->outstanding_cmds[handle] = NULL;
cp = GET_CMD_SP(sp);
if (cp == NULL) {
ql_dbg(ql_dbg_io, vha, 0x3018,
"Command already returned (0x%x/%p).\n",
sts->handle, sp);
+ req->outstanding_cmds[handle] = NULL;
return;
}
@@ -3514,6 +3527,9 @@ out:
if (rsp->status_srb == NULL)
sp->done(sp, res);
+
+ /* for io's, clearing of outstanding_cmds[handle] means scsi_done was called */
+ req->outstanding_cmds[handle] = NULL;
}
/**
@@ -3590,6 +3606,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
uint16_t que = MSW(pkt->handle);
struct req_que *req = NULL;
int res = DID_ERROR << 16;
+ u16 index;
ql_dbg(ql_dbg_async, vha, 0x502a,
"iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
@@ -3608,7 +3625,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
switch (pkt->entry_type) {
case NOTIFY_ACK_TYPE:
- case STATUS_TYPE:
case STATUS_CONT_TYPE:
case LOGINOUT_PORT_IOCB_TYPE:
case CT_IOCB_TYPE:
@@ -3628,6 +3644,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
case CTIO_TYPE7:
case CTIO_CRC2:
return 1;
+ case STATUS_TYPE:
+ sp = qla_get_sp_from_handle(vha, func, req, pkt, &index);
+ if (sp) {
+ sp->done(sp, res);
+ req->outstanding_cmds[index] = NULL;
+ return 0;
+ }
+ break;
}
fatal:
ql_log(ql_log_warn, vha, 0x5030,
@@ -3750,6 +3774,28 @@ static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
return rc;
}
+static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct mrk_entry_24xx *pkt)
+{
+ const char func[] = "MRK-IOCB";
+ srb_t *sp;
+ int res = QLA_SUCCESS;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ if (pkt->entry_status) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8025, "marker failure.\n");
+ res = QLA_COMMAND_ERROR;
+ }
+ sp->u.iocb_cmd.u.tmf.data = res;
+ sp->done(sp, res);
+}
+
/**
* qla24xx_process_response_queue() - Process response queue entries.
* @vha: SCSI driver HA context
@@ -3863,9 +3909,7 @@ process_err:
(struct nack_to_isp *)pkt);
break;
case MARKER_TYPE:
- /* Do nothing in this case, this check is to prevent it
- * from falling into default case
- */
+ qla_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx *)pkt);
break;
case ABORT_IOCB_TYPE:
qla24xx_abort_iocb_entry(vha, rsp->req,
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index f726eb8449c5..083f94e43fba 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -691,7 +691,7 @@ qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
struct qla_hw_data *ha = vha->hw;
if (pci_is_pcie(ha->pdev))
- strlcpy(str, "PCIe iSA", str_len);
+ strscpy(str, "PCIe iSA", str_len);
return str;
}
@@ -1850,21 +1850,21 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
phost_info = &preg_hsi->hsi;
memset(preg_hsi, 0, sizeof(struct register_host_info));
phost_info->os_type = OS_TYPE_LINUX;
- strlcpy(phost_info->sysname, p_sysid->sysname,
+ strscpy(phost_info->sysname, p_sysid->sysname,
sizeof(phost_info->sysname));
- strlcpy(phost_info->nodename, p_sysid->nodename,
+ strscpy(phost_info->nodename, p_sysid->nodename,
sizeof(phost_info->nodename));
if (!strcmp(phost_info->nodename, "(none)"))
ha->mr.host_info_resend = true;
- strlcpy(phost_info->release, p_sysid->release,
+ strscpy(phost_info->release, p_sysid->release,
sizeof(phost_info->release));
- strlcpy(phost_info->version, p_sysid->version,
+ strscpy(phost_info->version, p_sysid->version,
sizeof(phost_info->version));
- strlcpy(phost_info->machine, p_sysid->machine,
+ strscpy(phost_info->machine, p_sysid->machine,
sizeof(phost_info->machine));
- strlcpy(phost_info->domainname, p_sysid->domainname,
+ strscpy(phost_info->domainname, p_sysid->domainname,
sizeof(phost_info->domainname));
- strlcpy(phost_info->hostdriver, QLA2XXX_VERSION,
+ strscpy(phost_info->hostdriver, QLA2XXX_VERSION,
sizeof(phost_info->hostdriver));
preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
ql_dbg(ql_dbg_init, vha, 0x0149,
@@ -1909,9 +1909,9 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
if (fx_type == FXDISC_GET_CONFIG_INFO) {
struct config_info_data *pinfo =
(struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
- strlcpy(vha->hw->model_number, pinfo->model_num,
+ strscpy(vha->hw->model_number, pinfo->model_num,
ARRAY_SIZE(vha->hw->model_number));
- strlcpy(vha->hw->model_desc, pinfo->model_description,
+ strscpy(vha->hw->model_desc, pinfo->model_description,
ARRAY_SIZE(vha->hw->model_desc));
memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
sizeof(vha->hw->mr.symbolic_name));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2fa695bf38b7..bc89d3da8fd0 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1079,43 +1079,6 @@ qc24_fail_command:
}
/*
- * qla2x00_eh_wait_on_command
- * Waits for the command to be returned by the Firmware for some
- * max time.
- *
- * Input:
- * cmd = Scsi Command to wait on.
- *
- * Return:
- * Completed in time : QLA_SUCCESS
- * Did not complete in time : QLA_FUNCTION_FAILED
- */
-static int
-qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
-{
-#define ABORT_POLLING_PERIOD 1000
-#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
- unsigned long wait_iter = ABORT_WAIT_ITER;
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
- struct qla_hw_data *ha = vha->hw;
- srb_t *sp = scsi_cmd_priv(cmd);
- int ret = QLA_SUCCESS;
-
- if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
- ql_dbg(ql_dbg_taskm, vha, 0x8005,
- "Return:eh_wait.\n");
- return ret;
- }
-
- while (sp->type && wait_iter--)
- msleep(ABORT_POLLING_PERIOD);
- if (sp->type)
- ret = QLA_FUNCTION_FAILED;
-
- return ret;
-}
-
-/*
* qla2x00_wait_for_hba_online
* Wait till the HBA is online after going through
* <= MAX_RETRIES_OF_ISP_ABORT or
@@ -1365,6 +1328,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return ret;
}
+#define ABORT_POLLING_PERIOD 1000
+#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
+
/*
* Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
*/
@@ -1378,41 +1344,73 @@ __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
struct req_que *req = qpair->req;
srb_t *sp;
struct scsi_cmnd *cmd;
+ unsigned long wait_iter = ABORT_WAIT_ITER;
+ bool found;
+ struct qla_hw_data *ha = vha->hw;
status = QLA_SUCCESS;
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- for (cnt = 1; status == QLA_SUCCESS &&
- cnt < req->num_outstanding_cmds; cnt++) {
- sp = req->outstanding_cmds[cnt];
- if (!sp)
- continue;
- if (sp->type != SRB_SCSI_CMD)
- continue;
- if (vha->vp_idx != sp->vha->vp_idx)
- continue;
- match = 0;
- cmd = GET_CMD_SP(sp);
- switch (type) {
- case WAIT_HOST:
- match = 1;
- break;
- case WAIT_TARGET:
- match = cmd->device->id == t;
- break;
- case WAIT_LUN:
- match = (cmd->device->id == t &&
- cmd->device->lun == l);
- break;
- }
- if (!match)
- continue;
+ while (wait_iter--) {
+ found = false;
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- status = qla2x00_eh_wait_on_command(cmd);
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (!sp)
+ continue;
+ if (sp->type != SRB_SCSI_CMD)
+ continue;
+ if (vha->vp_idx != sp->vha->vp_idx)
+ continue;
+ match = 0;
+ cmd = GET_CMD_SP(sp);
+ switch (type) {
+ case WAIT_HOST:
+ match = 1;
+ break;
+ case WAIT_TARGET:
+ if (sp->fcport)
+ match = sp->fcport->d_id.b24 == t;
+ else
+ match = 0;
+ break;
+ case WAIT_LUN:
+ if (sp->fcport)
+ match = (sp->fcport->d_id.b24 == t &&
+ cmd->device->lun == l);
+ else
+ match = 0;
+ break;
+ }
+ if (!match)
+ continue;
+
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ if (unlikely(pci_channel_offline(ha->pdev)) ||
+ ha->flags.eeh_busy) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8005,
+ "Return:eh_wait.\n");
+ return status;
+ }
+
+ /*
+ * SRB_SCSI_CMD is still in the outstanding_cmds array.
+ * it means scsi_done has not called. Wait for it to
+ * clear from outstanding_cmds.
+ */
+ msleep(ABORT_POLLING_PERIOD);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ found = true;
+ }
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ if (!found)
+ break;
}
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ if (!wait_iter && found)
+ status = QLA_FUNCTION_FAILED;
return status;
}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 42d69d89834f..4d6f06fb156b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.08.200-k"
+#define QLA2XXX_VERSION "10.02.08.300-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 8
-#define QLA_DRIVER_BETA_VER 200
+#define QLA_DRIVER_BETA_VER 300
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index cd71074f3abe..249f1d7021d4 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1611,8 +1611,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
goto exit_get_chap;
}
- strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
- strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
+ strscpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
+ strscpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE);
exit_get_chap:
@@ -1732,8 +1732,8 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
goto exit_unlock_uni_chap;
}
- strlcpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
- strlcpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
+ strscpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
+ strscpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
rval = QLA_SUCCESS;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ee6d784c095c..b2a3988e1e15 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -798,9 +798,9 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
continue;
chap_rec->chap_tbl_idx = i;
- strlcpy(chap_rec->username, chap_table->name,
+ strscpy(chap_rec->username, chap_table->name,
ISCSI_CHAP_AUTH_NAME_MAX_LEN);
- strlcpy(chap_rec->password, chap_table->secret,
+ strscpy(chap_rec->password, chap_table->secret,
QL4_CHAP_MAX_SECRET_LEN);
chap_rec->password_length = chap_table->secret_len;
@@ -6052,8 +6052,8 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
if (!(chap_table->flags & BIT_6)) /* Not BIDI */
continue;
- strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
- strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
+ strscpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
+ strscpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
ret = 0;
break;
}
@@ -6281,8 +6281,8 @@ static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
tddb->tpgt = sess->tpgt;
tddb->port = conn->persistent_port;
- strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
- strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+ strscpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+ strscpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
}
static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
@@ -7781,7 +7781,7 @@ static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
goto exit_ddb_logout;
}
- strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname,
+ strscpy(flash_tddb->iscsi_name, fnode_sess->targetname,
ISCSI_NAME_SIZE);
if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 6e50e81a8216..b7a7a2eea887 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -8,6 +8,7 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/module.h>
+#include <uapi/linux/pr.h>
#include <asm/unaligned.h>
#include <scsi/scsi_common.h>
@@ -63,6 +64,48 @@ const char *scsi_device_type(unsigned type)
}
EXPORT_SYMBOL(scsi_device_type);
+enum pr_type scsi_pr_type_to_block(enum scsi_pr_type type)
+{
+ switch (type) {
+ case SCSI_PR_WRITE_EXCLUSIVE:
+ return PR_WRITE_EXCLUSIVE;
+ case SCSI_PR_EXCLUSIVE_ACCESS:
+ return PR_EXCLUSIVE_ACCESS;
+ case SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY:
+ return PR_WRITE_EXCLUSIVE_REG_ONLY;
+ case SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY:
+ return PR_EXCLUSIVE_ACCESS_REG_ONLY;
+ case SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS:
+ return PR_WRITE_EXCLUSIVE_ALL_REGS;
+ case SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS:
+ return PR_EXCLUSIVE_ACCESS_ALL_REGS;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scsi_pr_type_to_block);
+
+enum scsi_pr_type block_pr_type_to_scsi(enum pr_type type)
+{
+ switch (type) {
+ case PR_WRITE_EXCLUSIVE:
+ return SCSI_PR_WRITE_EXCLUSIVE;
+ case PR_EXCLUSIVE_ACCESS:
+ return SCSI_PR_EXCLUSIVE_ACCESS;
+ case PR_WRITE_EXCLUSIVE_REG_ONLY:
+ return SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY;
+ case PR_EXCLUSIVE_ACCESS_REG_ONLY:
+ return SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY;
+ case PR_WRITE_EXCLUSIVE_ALL_REGS:
+ return SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS;
+ case PR_EXCLUSIVE_ACCESS_ALL_REGS:
+ return SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(block_pr_type_to_scsi);
+
/**
* scsilun_to_int - convert a scsi_lun to an int
* @scsilun: struct scsi_lun to be converted.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b894432ca0b9..25489fbd94c6 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -594,7 +594,7 @@ static blk_status_t scsi_result_to_blk_status(int result)
case SCSIML_STAT_OK:
break;
case SCSIML_STAT_RESV_CONFLICT:
- return BLK_STS_NEXUS;
+ return BLK_STS_RESV_CONFLICT;
case SCSIML_STAT_NOSPC:
return BLK_STS_NOSPC;
case SCSIML_STAT_MED_ERROR:
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3825e4d159fc..84edbc0a5747 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -67,6 +67,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsicam.h>
+#include <scsi/scsi_common.h>
#include "sd.h"
#include "scsi_priv.h"
@@ -1719,26 +1720,6 @@ out_unlock:
return ret;
}
-static char sd_pr_type(enum pr_type type)
-{
- switch (type) {
- case PR_WRITE_EXCLUSIVE:
- return 0x01;
- case PR_EXCLUSIVE_ACCESS:
- return 0x03;
- case PR_WRITE_EXCLUSIVE_REG_ONLY:
- return 0x05;
- case PR_EXCLUSIVE_ACCESS_REG_ONLY:
- return 0x06;
- case PR_WRITE_EXCLUSIVE_ALL_REGS:
- return 0x07;
- case PR_EXCLUSIVE_ACCESS_ALL_REGS:
- return 0x08;
- default:
- return 0;
- }
-};
-
static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result)
{
switch (host_byte(result)) {
@@ -1769,8 +1750,97 @@ static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result)
}
}
-static int sd_pr_command(struct block_device *bdev, u8 sa,
- u64 key, u64 sa_key, u8 type, u8 flags)
+static int sd_pr_in_command(struct block_device *bdev, u8 sa,
+ unsigned char *data, int data_len)
+{
+ struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_device *sdev = sdkp->device;
+ struct scsi_sense_hdr sshdr;
+ u8 cmd[10] = { PERSISTENT_RESERVE_IN, sa };
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
+ int result;
+
+ put_unaligned_be16(data_len, &cmd[7]);
+
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, data, data_len,
+ SD_TIMEOUT, sdkp->max_retries, &exec_args);
+ if (scsi_status_is_check_condition(result) &&
+ scsi_sense_valid(&sshdr)) {
+ sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
+ scsi_print_sense_hdr(sdev, NULL, &sshdr);
+ }
+
+ if (result <= 0)
+ return result;
+
+ return sd_scsi_to_pr_err(&sshdr, result);
+}
+
+static int sd_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info)
+{
+ int result, i, data_offset, num_copy_keys;
+ u32 num_keys = keys_info->num_keys;
+ int data_len = num_keys * 8 + 8;
+ u8 *data;
+
+ data = kzalloc(data_len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ result = sd_pr_in_command(bdev, READ_KEYS, data, data_len);
+ if (result)
+ goto free_data;
+
+ keys_info->generation = get_unaligned_be32(&data[0]);
+ keys_info->num_keys = get_unaligned_be32(&data[4]) / 8;
+
+ data_offset = 8;
+ num_copy_keys = min(num_keys, keys_info->num_keys);
+
+ for (i = 0; i < num_copy_keys; i++) {
+ keys_info->keys[i] = get_unaligned_be64(&data[data_offset]);
+ data_offset += 8;
+ }
+
+free_data:
+ kfree(data);
+ return result;
+}
+
+static int sd_pr_read_reservation(struct block_device *bdev,
+ struct pr_held_reservation *rsv)
+{
+ struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_device *sdev = sdkp->device;
+ u8 data[24] = { };
+ int result, len;
+
+ result = sd_pr_in_command(bdev, READ_RESERVATION, data, sizeof(data));
+ if (result)
+ return result;
+
+ len = get_unaligned_be32(&data[4]);
+ if (!len)
+ return 0;
+
+ /* Make sure we have at least the key and type */
+ if (len < 14) {
+ sdev_printk(KERN_INFO, sdev,
+ "READ RESERVATION failed due to short return buffer of %d bytes\n",
+ len);
+ return -EINVAL;
+ }
+
+ rsv->generation = get_unaligned_be32(&data[0]);
+ rsv->key = get_unaligned_be64(&data[8]);
+ rsv->type = scsi_pr_type_to_block(data[21] & 0x0f);
+ return 0;
+}
+
+static int sd_pr_out_command(struct block_device *bdev, u8 sa, u64 key,
+ u64 sa_key, enum scsi_pr_type type, u8 flags)
{
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
struct scsi_device *sdev = sdkp->device;
@@ -1812,7 +1882,7 @@ static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
{
if (flags & ~PR_FL_IGNORE_KEY)
return -EOPNOTSUPP;
- return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
+ return sd_pr_out_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
old_key, new_key, 0,
(1 << 0) /* APTPL */);
}
@@ -1822,24 +1892,26 @@ static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
{
if (flags)
return -EOPNOTSUPP;
- return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
+ return sd_pr_out_command(bdev, 0x01, key, 0,
+ block_pr_type_to_scsi(type), 0);
}
static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
- return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
+ return sd_pr_out_command(bdev, 0x02, key, 0,
+ block_pr_type_to_scsi(type), 0);
}
static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
enum pr_type type, bool abort)
{
- return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
- sd_pr_type(type), 0);
+ return sd_pr_out_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
+ block_pr_type_to_scsi(type), 0);
}
static int sd_pr_clear(struct block_device *bdev, u64 key)
{
- return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
+ return sd_pr_out_command(bdev, 0x03, key, 0, 0, 0);
}
static const struct pr_ops sd_pr_ops = {
@@ -1848,6 +1920,8 @@ static const struct pr_ops sd_pr_ops = {
.pr_release = sd_pr_release,
.pr_preempt = sd_pr_preempt,
.pr_clear = sd_pr_clear,
+ .pr_read_keys = sd_pr_read_keys,
+ .pr_read_reservation = sd_pr_read_reservation,
};
static void scsi_disk_free_disk(struct gendisk *disk)
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index 973d240649ab..789460b0a342 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,7 +1,7 @@
#
# Kernel configuration file for the SMARTPQI
#
-# Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 228838eb3686..f960b5095d09 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -1108,6 +1108,7 @@ struct pqi_scsi_dev {
u8 volume_offline : 1;
u8 rescan : 1;
u8 ignore_device : 1;
+ u8 erase_in_progress : 1;
bool aio_enabled; /* only valid for physical disks */
bool in_remove;
bool device_offline;
@@ -1147,7 +1148,7 @@ struct pqi_scsi_dev {
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
- atomic_t raid_bypass_cnt;
+ unsigned int raid_bypass_cnt;
};
/* VPD inquiry pages */
@@ -1357,6 +1358,7 @@ struct pqi_ctrl_info {
u32 max_write_raid_5_6;
u32 max_write_raid_1_10_2drive;
u32 max_write_raid_1_10_3drive;
+ int numa_node;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 03de97cd72c2..3669affd114b 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.20-035"
+#define DRIVER_VERSION "2.1.22-040"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 20
-#define DRIVER_REVISION 35
+#define DRIVER_RELEASE 22
+#define DRIVER_REVISION 40
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -519,6 +519,36 @@ static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
writeb(status, ctrl_info->soft_reset_status);
}
+static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
+{
+ bool io_high_prio;
+ int priority_class;
+
+ io_high_prio = false;
+
+ if (device->ncq_prio_enable) {
+ priority_class =
+ IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
+ if (priority_class == IOPRIO_CLASS_RT) {
+ /* Set NCQ priority for read/write commands. */
+ switch (scmd->cmnd[0]) {
+ case WRITE_16:
+ case READ_16:
+ case WRITE_12:
+ case READ_12:
+ case WRITE_10:
+ case READ_10:
+ case WRITE_6:
+ case READ_6:
+ io_high_prio = true;
+ break;
+ }
+ }
+ }
+
+ return io_high_prio;
+}
+
static int pqi_map_single(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *sg_descriptor, void *buffer,
size_t buffer_length, enum dma_data_direction data_direction)
@@ -578,10 +608,6 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
cdb = request->cdb;
switch (cmd) {
- case TEST_UNIT_READY:
- request->data_direction = SOP_READ_FLAG;
- cdb[0] = TEST_UNIT_READY;
- break;
case INQUIRY:
request->data_direction = SOP_READ_FLAG;
cdb[0] = INQUIRY;
@@ -708,7 +734,8 @@ static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *
}
}
- pqi_reinit_io_request(io_request);
+ if (io_request)
+ pqi_reinit_io_request(io_request);
return io_request;
}
@@ -1588,6 +1615,7 @@ no_buffer:
#define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
+#define PQI_DEVICE_ERASE_IN_PROGRESS 0x10
static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
@@ -1636,6 +1664,8 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
PQI_DEVICE_NCQ_PRIO_SUPPORTED);
+ device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
+
return 0;
}
@@ -1681,7 +1711,7 @@ out:
/*
* Prevent adding drive to OS for some corner cases such as a drive
- * undergoing a sanitize operation. Some OSes will continue to poll
+ * undergoing a sanitize (erase) operation. Some OSes will continue to poll
* the drive until the sanitize completes, which can take hours,
* resulting in long bootup delays. Commands such as TUR, READ_CAP
* are allowed, but READ/WRITE cause check condition. So the OS
@@ -1689,73 +1719,9 @@ out:
* Note: devices that have completed sanitize must be re-enabled
* using the management utility.
*/
-static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
{
- u8 scsi_status;
- int rc;
- enum dma_data_direction dir;
- char *buffer;
- int buffer_length = 64;
- size_t sense_data_length;
- struct scsi_sense_hdr sshdr;
- struct pqi_raid_path_request request;
- struct pqi_raid_error_info error_info;
- bool offline = false; /* Assume keep online */
-
- /* Do not check controllers. */
- if (pqi_is_hba_lunid(device->scsi3addr))
- return false;
-
- /* Do not check LVs. */
- if (pqi_is_logical_device(device))
- return false;
-
- buffer = kmalloc(buffer_length, GFP_KERNEL);
- if (!buffer)
- return false; /* Assume not offline */
-
- /* Check for SANITIZE in progress using TUR */
- rc = pqi_build_raid_path_request(ctrl_info, &request,
- TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
- buffer_length, 0, &dir);
- if (rc)
- goto out; /* Assume not offline */
-
- memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
-
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
-
- if (rc)
- goto out; /* Assume not offline */
-
- scsi_status = error_info.status;
- sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
- if (sense_data_length == 0)
- sense_data_length =
- get_unaligned_le16(&error_info.response_data_length);
- if (sense_data_length) {
- if (sense_data_length > sizeof(error_info.data))
- sense_data_length = sizeof(error_info.data);
-
- /*
- * Check for sanitize in progress: asc:0x04, ascq: 0x1b
- */
- if (scsi_status == SAM_STAT_CHECK_CONDITION &&
- scsi_normalize_sense(error_info.data,
- sense_data_length, &sshdr) &&
- sshdr.sense_key == NOT_READY &&
- sshdr.asc == 0x04 &&
- sshdr.ascq == 0x1b) {
- device->device_offline = true;
- offline = true;
- goto out; /* Keep device offline */
- }
- }
-
-out:
- kfree(buffer);
- return offline;
+ return device->erase_in_progress;
}
static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
@@ -2499,10 +2465,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (!pqi_is_supported_device(device))
continue;
- /* Do not present disks that the OS cannot fully probe */
- if (pqi_keep_device_offline(ctrl_info, device))
- continue;
-
/* Gather information about the device. */
rc = pqi_get_device_info(ctrl_info, device, id_phys);
if (rc == -ENOMEM) {
@@ -2525,6 +2487,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
continue;
}
+ /* Do not present disks that the OS cannot fully probe. */
+ if (pqi_keep_device_offline(device))
+ continue;
+
pqi_assign_bus_target_lun(device);
if (device->is_physical_device) {
@@ -5504,15 +5470,19 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
-static int pqi_raid_submit_scsi_cmd_with_io_request(
- struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
+static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
- struct pqi_queue_group *queue_group)
+ struct pqi_queue_group *queue_group, bool io_high_prio)
{
int rc;
size_t cdb_length;
+ struct pqi_io_request *io_request;
struct pqi_raid_path_request *request;
+ io_request = pqi_alloc_io_request(ctrl_info, scmd);
+ if (!io_request)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
io_request->io_complete_callback = pqi_raid_io_complete;
io_request->scmd = scmd;
@@ -5522,6 +5492,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
@@ -5587,14 +5558,11 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
- struct pqi_io_request *io_request;
+ bool io_high_prio;
- io_request = pqi_alloc_io_request(ctrl_info, scmd);
- if (!io_request)
- return SCSI_MLQUEUE_HOST_BUSY;
+ io_high_prio = pqi_is_io_high_priority(device, scmd);
- return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
- device, scmd, queue_group);
+ return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
}
static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
@@ -5639,44 +5607,13 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
-static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
-{
- bool io_high_prio;
- int priority_class;
-
- io_high_prio = false;
-
- if (device->ncq_prio_enable) {
- priority_class =
- IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
- if (priority_class == IOPRIO_CLASS_RT) {
- /* Set NCQ priority for read/write commands. */
- switch (scmd->cmnd[0]) {
- case WRITE_16:
- case READ_16:
- case WRITE_12:
- case READ_12:
- case WRITE_10:
- case READ_10:
- case WRITE_6:
- case READ_6:
- io_high_prio = true;
- break;
- }
- }
- }
-
- return io_high_prio;
-}
-
static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
bool io_high_prio;
- io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
+ io_high_prio = pqi_is_io_high_priority(device, scmd);
return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
scmd->cmnd, scmd->cmd_len, queue_group, NULL,
@@ -5694,10 +5631,10 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_aio_path_request *request;
struct pqi_scsi_dev *device;
- device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY;
+
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = raid_bypass;
@@ -5712,6 +5649,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
+ device = scmd->device->hostdata;
if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
if (cdb_length > sizeof(request->cdb))
@@ -6052,7 +5990,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
- atomic_inc(&device->raid_bypass_cnt);
+ device->raid_bypass_cnt++;
}
}
if (!raid_bypassed)
@@ -7288,7 +7226,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
- int raid_bypass_cnt;
+ unsigned int raid_bypass_cnt;
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -7304,7 +7242,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
return -ENODEV;
}
- raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
+ raid_bypass_cnt = device->raid_bypass_cnt;
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -7366,8 +7304,7 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
return -ENODEV;
}
- if (!device->ncq_prio_support ||
- !device->is_physical_device) {
+ if (!device->ncq_prio_support) {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -EINVAL;
}
@@ -7379,6 +7316,18 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
return strlen(buf);
}
+static ssize_t pqi_numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct scsi_device *sdev;
+ struct pqi_ctrl_info *ctrl_info;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
+}
+
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
@@ -7388,6 +7337,7 @@ static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
+static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid.attr,
@@ -7398,6 +7348,7 @@ static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_raid_level.attr,
&dev_attr_raid_bypass_cnt.attr,
&dev_attr_sas_ncq_prio_enable.attr,
+ &dev_attr_numa_node.attr,
NULL
};
@@ -7716,8 +7667,8 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
features_requested_iomem_addr +
(le16_to_cpu(firmware_features->num_elements) * 2) +
sizeof(__le16);
- writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
- host_max_known_feature_iomem_addr);
+ writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
+ writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
}
return pqi_config_table_update(ctrl_info,
@@ -8560,7 +8511,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
ctrl_info->iomem_base = ioremap(pci_resource_start(
ctrl_info->pci_dev, 0),
- sizeof(struct pqi_ctrl_registers));
+ pci_resource_len(ctrl_info->pci_dev, 0));
if (!ctrl_info->iomem_base) {
dev_err(&ctrl_info->pci_dev->dev,
"failed to map memory for controller registers\n");
@@ -9018,6 +8969,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
"failed to allocate controller info block\n");
return -ENOMEM;
}
+ ctrl_info->numa_node = node;
ctrl_info->pci_dev = pci_dev;
@@ -9929,6 +9881,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0804)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0805)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0806)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x5445)
},
{
@@ -9965,6 +9929,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x54da)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x54db)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x54dc)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x0b27)
},
{
@@ -10017,6 +9993,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1014, 0x0718)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1000)
},
{
@@ -10029,6 +10009,50 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1e93, 0x1005)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1001)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1002)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1003)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1004)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1005)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1006)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1007)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1008)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1009)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 13e8c539010e..a981d0377948 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -92,25 +92,23 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
identify = &rphy->identify;
identify->sas_address = pqi_sas_port->sas_address;
+ identify->phy_identifier = pqi_sas_port->device->phy_id;
identify->initiator_port_protocols = SAS_PROTOCOL_ALL;
identify->target_port_protocols = SAS_PROTOCOL_STP;
- if (pqi_sas_port->device) {
- identify->phy_identifier = pqi_sas_port->device->phy_id;
- switch (pqi_sas_port->device->device_type) {
- case SA_DEVICE_TYPE_SAS:
- case SA_DEVICE_TYPE_SES:
- case SA_DEVICE_TYPE_NVME:
- identify->target_port_protocols = SAS_PROTOCOL_SSP;
- break;
- case SA_DEVICE_TYPE_EXPANDER_SMP:
- identify->target_port_protocols = SAS_PROTOCOL_SMP;
- break;
- case SA_DEVICE_TYPE_SATA:
- default:
- break;
- }
+ switch (pqi_sas_port->device->device_type) {
+ case SA_DEVICE_TYPE_SAS:
+ case SA_DEVICE_TYPE_SES:
+ case SA_DEVICE_TYPE_NVME:
+ identify->target_port_protocols = SAS_PROTOCOL_SSP;
+ break;
+ case SA_DEVICE_TYPE_EXPANDER_SMP:
+ identify->target_port_protocols = SAS_PROTOCOL_SMP;
+ break;
+ case SA_DEVICE_TYPE_SATA:
+ default:
+ break;
}
return sas_rphy_add(rphy);
@@ -295,10 +293,12 @@ int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
rc = pqi_sas_port_add_rphy(pqi_sas_port, rphy);
if (rc)
- goto free_sas_port;
+ goto free_sas_rphy;
return 0;
+free_sas_rphy:
+ sas_rphy_free(rphy);
free_sas_port:
pqi_free_sas_port(pqi_sas_port);
device->sas_port = NULL;
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 5811fb3c22a9..673437c7152b 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 9dcbae96a5c6..0c97626d87d4 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 557516c642c3..5b90c22ee3dc 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -726,8 +726,8 @@ static int iscsi_add_notunderstood_response(
}
INIT_LIST_HEAD(&extra_response->er_list);
- strlcpy(extra_response->key, key, sizeof(extra_response->key));
- strlcpy(extra_response->value, NOTUNDERSTOOD,
+ strscpy(extra_response->key, key, sizeof(extra_response->key));
+ strscpy(extra_response->value, NOTUNDERSTOOD,
sizeof(extra_response->value));
list_add_tail(&extra_response->er_list,
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 26dc8ed3045b..dc1ac5a0f806 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1321,7 +1321,7 @@ void iscsit_collect_login_stats(
if (conn->param_list)
intrname = iscsi_find_param_from_key(INITIATORNAME,
conn->param_list);
- strlcpy(ls->last_intr_fail_name,
+ strscpy(ls->last_intr_fail_name,
(intrname ? intrname->value : "Unknown"),
sizeof(ls->last_intr_fail_name));
@@ -1360,7 +1360,7 @@ void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session *sess)
return;
spin_lock_bh(&tiqn->sess_err_stats.lock);
- strlcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+ strscpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
sess->sess_ops->InitiatorName,
sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name));
tiqn->sess_err_stats.last_sess_failure_type =
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 74b67c346dfe..936e5ff1b209 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -649,7 +649,7 @@ static void dev_set_t10_wwn_model_alias(struct se_device *dev)
* here without potentially breaking existing setups, so continue to
* truncate one byte shorter than what can be carried in INQUIRY.
*/
- strlcpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
+ strscpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
}
static ssize_t emulate_model_alias_store(struct config_item *item,
@@ -675,7 +675,7 @@ static ssize_t emulate_model_alias_store(struct config_item *item,
if (flag) {
dev_set_t10_wwn_model_alias(dev);
} else {
- strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
+ strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
sizeof(dev->t10_wwn.model));
}
da->emulate_model_alias = flag;
@@ -1426,7 +1426,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
- strlcpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor));
+ strscpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor));
pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
" %s\n", dev->t10_wwn.vendor);
@@ -1482,7 +1482,7 @@ static ssize_t target_wwn_product_id_store(struct config_item *item,
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
- strlcpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model));
+ strscpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model));
pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n",
dev->t10_wwn.model);
@@ -1538,7 +1538,7 @@ static ssize_t target_wwn_revision_store(struct config_item *item,
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1);
- strlcpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision));
+ strscpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision));
pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n",
dev->t10_wwn.revision);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 90f3f4926172..b7ac60f4a219 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -789,10 +789,10 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
xcopy_lun->lun_tpg = &xcopy_pt_tpg;
/* Preload the default INQUIRY const values */
- strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
- strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
+ strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
+ strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
sizeof(dev->t10_wwn.model));
- strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
+ strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
sizeof(dev->t10_wwn.revision));
return dev;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index ce0e000b74fc..4d447520bab8 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -896,7 +896,7 @@ static void fd_free_prot(struct se_device *dev)
fd_dev->fd_prot_file = NULL;
}
-static struct sbc_ops fd_sbc_ops = {
+static struct exec_cmd_ops fd_exec_cmd_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
.execute_write_same = fd_execute_write_same,
@@ -906,7 +906,7 @@ static struct sbc_ops fd_sbc_ops = {
static sense_reason_t
fd_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &fd_sbc_ops);
+ return sbc_parse_cdb(cmd, &fd_exec_cmd_ops);
}
static const struct target_backend_ops fileio_ops = {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index cc838ffd1294..e6029ea87e2f 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -23,13 +23,16 @@
#include <linux/file.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
+#include <linux/pr.h>
#include <scsi/scsi_proto.h>
+#include <scsi/scsi_common.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include "target_core_iblock.h"
+#include "target_core_pr.h"
#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE 128
@@ -310,7 +313,7 @@ static sector_t iblock_get_blocks(struct se_device *dev)
return blocks_long;
}
-static void iblock_complete_cmd(struct se_cmd *cmd)
+static void iblock_complete_cmd(struct se_cmd *cmd, blk_status_t blk_status)
{
struct iblock_req *ibr = cmd->priv;
u8 status;
@@ -318,7 +321,9 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
if (!refcount_dec_and_test(&ibr->pending))
return;
- if (atomic_read(&ibr->ib_bio_err_cnt))
+ if (blk_status == BLK_STS_RESV_CONFLICT)
+ status = SAM_STAT_RESERVATION_CONFLICT;
+ else if (atomic_read(&ibr->ib_bio_err_cnt))
status = SAM_STAT_CHECK_CONDITION;
else
status = SAM_STAT_GOOD;
@@ -331,6 +336,7 @@ static void iblock_bio_done(struct bio *bio)
{
struct se_cmd *cmd = bio->bi_private;
struct iblock_req *ibr = cmd->priv;
+ blk_status_t blk_status = bio->bi_status;
if (bio->bi_status) {
pr_err("bio error: %p, err: %d\n", bio, bio->bi_status);
@@ -343,7 +349,7 @@ static void iblock_bio_done(struct bio *bio)
bio_put(bio);
- iblock_complete_cmd(cmd);
+ iblock_complete_cmd(cmd, blk_status);
}
static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
@@ -759,7 +765,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!sgl_nents) {
refcount_set(&ibr->pending, 1);
- iblock_complete_cmd(cmd);
+ iblock_complete_cmd(cmd, BLK_STS_OK);
return 0;
}
@@ -817,7 +823,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
iblock_submit_bios(&list);
- iblock_complete_cmd(cmd);
+ iblock_complete_cmd(cmd, BLK_STS_OK);
return 0;
fail_put_bios:
@@ -829,6 +835,258 @@ fail:
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key,
+ u64 sa_key, u8 type, bool aptpl)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bdev = ib_dev->ibd_bd;
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ int ret;
+
+ if (!ops) {
+ pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ switch (sa) {
+ case PRO_REGISTER:
+ case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+ if (!ops->pr_register) {
+ pr_err("block device does not support pr_register.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ /* The block layer pr ops always enables aptpl */
+ if (!aptpl)
+ pr_info("APTPL not set by initiator, but will be used.\n");
+
+ ret = ops->pr_register(bdev, key, sa_key,
+ sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY);
+ break;
+ case PRO_RESERVE:
+ if (!ops->pr_reserve) {
+ pr_err("block_device does not support pr_reserve.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0);
+ break;
+ case PRO_CLEAR:
+ if (!ops->pr_clear) {
+ pr_err("block_device does not support pr_clear.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ ret = ops->pr_clear(bdev, key);
+ break;
+ case PRO_PREEMPT:
+ case PRO_PREEMPT_AND_ABORT:
+ if (!ops->pr_clear) {
+ pr_err("block_device does not support pr_preempt.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ ret = ops->pr_preempt(bdev, key, sa_key,
+ scsi_pr_type_to_block(type),
+ sa == PRO_PREEMPT ? false : true);
+ break;
+ case PRO_RELEASE:
+ if (!ops->pr_clear) {
+ pr_err("block_device does not support pr_pclear.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ ret = ops->pr_release(bdev, key, scsi_pr_type_to_block(type));
+ break;
+ default:
+ pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n", sa);
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (!ret)
+ return TCM_NO_SENSE;
+ else if (ret == PR_STS_RESERVATION_CONFLICT)
+ return TCM_RESERVATION_CONFLICT;
+ else
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+}
+
+static void iblock_pr_report_caps(unsigned char *param_data)
+{
+ u16 len = 8;
+
+ put_unaligned_be16(len, &param_data[0]);
+ /*
+ * When using the pr_ops passthrough method we only support exporting
+ * the device through one target port because from the backend module
+ * level we can't see the target port config. As a result we only
+ * support registration directly from the I_T nexus the cmd is sent
+ * through and do not set ATP_C here.
+ *
+ * The block layer pr_ops do not support passing in initiators so
+ * we don't set SIP_C here.
+ */
+ /* PTPL_C: Persistence across Target Power Loss bit */
+ param_data[2] |= 0x01;
+ /*
+ * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
+ * set the TMV: Task Mask Valid bit.
+ */
+ param_data[3] |= 0x80;
+ /*
+ * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
+ */
+ param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */
+ /*
+ * PTPL_A: Persistence across Target Power Loss Active bit. The block
+ * layer pr ops always enables this so report it active.
+ */
+ param_data[3] |= 0x01;
+ /*
+ * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37.
+ */
+ param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+ param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
+ param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
+ param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
+ param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+ param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+}
+
+static sense_reason_t iblock_pr_read_keys(struct se_cmd *cmd,
+ unsigned char *param_data)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bdev = ib_dev->ibd_bd;
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ int i, len, paths, data_offset;
+ struct pr_keys *keys;
+ sense_reason_t ret;
+
+ if (!ops) {
+ pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (!ops->pr_read_keys) {
+ pr_err("Block device does not support read_keys.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ /*
+ * We don't know what's under us, but dm-multipath will register every
+ * path with the same key, so start off with enough space for 16 paths.
+ * which is not a lot of memory and should normally be enough.
+ */
+ paths = 16;
+retry:
+ len = 8 * paths;
+ keys = kzalloc(sizeof(*keys) + len, GFP_KERNEL);
+ if (!keys)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ keys->num_keys = paths;
+ if (!ops->pr_read_keys(bdev, keys)) {
+ if (keys->num_keys > paths) {
+ kfree(keys);
+ paths *= 2;
+ goto retry;
+ }
+ } else {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto free_keys;
+ }
+
+ ret = TCM_NO_SENSE;
+
+ put_unaligned_be32(keys->generation, &param_data[0]);
+ if (!keys->num_keys) {
+ put_unaligned_be32(0, &param_data[4]);
+ goto free_keys;
+ }
+
+ put_unaligned_be32(8 * keys->num_keys, &param_data[4]);
+
+ data_offset = 8;
+ for (i = 0; i < keys->num_keys; i++) {
+ if (data_offset + 8 > cmd->data_length)
+ break;
+
+ put_unaligned_be64(keys->keys[i], &param_data[data_offset]);
+ data_offset += 8;
+ }
+
+free_keys:
+ kfree(keys);
+ return ret;
+}
+
+static sense_reason_t iblock_pr_read_reservation(struct se_cmd *cmd,
+ unsigned char *param_data)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bdev = ib_dev->ibd_bd;
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ struct pr_held_reservation rsv = { };
+
+ if (!ops) {
+ pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (!ops->pr_read_reservation) {
+ pr_err("Block device does not support read_keys.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (ops->pr_read_reservation(bdev, &rsv))
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ put_unaligned_be32(rsv.generation, &param_data[0]);
+ if (!block_pr_type_to_scsi(rsv.type)) {
+ put_unaligned_be32(0, &param_data[4]);
+ return TCM_NO_SENSE;
+ }
+
+ put_unaligned_be32(16, &param_data[4]);
+
+ if (cmd->data_length < 16)
+ return TCM_NO_SENSE;
+ put_unaligned_be64(rsv.key, &param_data[8]);
+
+ if (cmd->data_length < 22)
+ return TCM_NO_SENSE;
+ param_data[21] = block_pr_type_to_scsi(rsv.type);
+
+ return TCM_NO_SENSE;
+}
+
+static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa,
+ unsigned char *param_data)
+{
+ sense_reason_t ret = TCM_NO_SENSE;
+
+ switch (sa) {
+ case PRI_REPORT_CAPABILITIES:
+ iblock_pr_report_caps(param_data);
+ break;
+ case PRI_READ_KEYS:
+ ret = iblock_pr_read_keys(cmd, param_data);
+ break;
+ case PRI_READ_RESERVATION:
+ ret = iblock_pr_read_reservation(cmd, param_data);
+ break;
+ default:
+ pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa);
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ return ret;
+}
+
static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -869,17 +1127,19 @@ static unsigned int iblock_get_io_opt(struct se_device *dev)
return bdev_io_opt(bd);
}
-static struct sbc_ops iblock_sbc_ops = {
+static struct exec_cmd_ops iblock_exec_cmd_ops = {
.execute_rw = iblock_execute_rw,
.execute_sync_cache = iblock_execute_sync_cache,
.execute_write_same = iblock_execute_write_same,
.execute_unmap = iblock_execute_unmap,
+ .execute_pr_out = iblock_execute_pr_out,
+ .execute_pr_in = iblock_execute_pr_in,
};
static sense_reason_t
iblock_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &iblock_sbc_ops);
+ return sbc_parse_cdb(cmd, &iblock_exec_cmd_ops);
}
static bool iblock_get_write_cache(struct se_device *dev)
@@ -890,6 +1150,7 @@ static bool iblock_get_write_cache(struct se_device *dev)
static const struct target_backend_ops iblock_ops = {
.name = "iblock",
.inquiry_prod = "IBLOCK",
+ .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR,
.inquiry_rev = IBLOCK_VERSION,
.owner = THIS_MODULE,
.attach_hba = iblock_attach_hba,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d19ec4e6a4c0..49d9167bb263 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -3538,6 +3538,37 @@ out_put_pr_reg:
return ret;
}
+static sense_reason_t
+target_try_pr_out_pt(struct se_cmd *cmd, u8 sa, u64 res_key, u64 sa_res_key,
+ u8 type, bool aptpl, bool all_tg_pt, bool spec_i_pt)
+{
+ struct exec_cmd_ops *ops = cmd->protocol_data;
+
+ if (!cmd->se_sess || !cmd->se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ if (!ops->execute_pr_out) {
+ pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ switch (sa) {
+ case PRO_REGISTER_AND_MOVE:
+ case PRO_REPLACE_LOST_RESERVATION:
+ pr_err("SPC-3 PR: PRO_REGISTER_AND_MOVE and PRO_REPLACE_LOST_RESERVATION are not supported by PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (spec_i_pt || all_tg_pt) {
+ pr_err("SPC-3 PR: SPEC_I_PT and ALL_TG_PT are not supported by PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ return ops->execute_pr_out(cmd, sa, res_key, sa_res_key, type, aptpl);
+}
+
/*
* See spc4r17 section 6.14 Table 170
*/
@@ -3641,6 +3672,12 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
return TCM_PARAMETER_LIST_LENGTH_ERROR;
}
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) {
+ ret = target_try_pr_out_pt(cmd, sa, res_key, sa_res_key, type,
+ aptpl, all_tg_pt, spec_i_pt);
+ goto done;
+ }
+
/*
* (core_scsi3_emulate_pro_* function parameters
* are defined by spc4r17 Table 174:
@@ -3682,6 +3719,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
return TCM_INVALID_CDB_FIELD;
}
+done:
if (!ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
@@ -4039,9 +4077,42 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
return 0;
}
+static sense_reason_t target_try_pr_in_pt(struct se_cmd *cmd, u8 sa)
+{
+ struct exec_cmd_ops *ops = cmd->protocol_data;
+ unsigned char *buf;
+ sense_reason_t ret;
+
+ if (cmd->data_length < 8) {
+ pr_err("PRIN SA SCSI Data Length: %u too small\n",
+ cmd->data_length);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ if (!ops->execute_pr_in) {
+ pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (sa == PRI_READ_FULL_STATUS) {
+ pr_err("SPC-3 PR: PRI_READ_FULL_STATUS is not supported by PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ ret = ops->execute_pr_in(cmd, sa, buf);
+
+ transport_kunmap_data_sg(cmd);
+ return ret;
+}
+
sense_reason_t
target_scsi3_emulate_pr_in(struct se_cmd *cmd)
{
+ u8 sa = cmd->t_task_cdb[1] & 0x1f;
sense_reason_t ret;
/*
@@ -4060,7 +4131,12 @@ target_scsi3_emulate_pr_in(struct se_cmd *cmd)
return TCM_RESERVATION_CONFLICT;
}
- switch (cmd->t_task_cdb[1] & 0x1f) {
+ if (cmd->se_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) {
+ ret = target_try_pr_in_pt(cmd, sa);
+ goto done;
+ }
+
+ switch (sa) {
case PRI_READ_KEYS:
ret = core_scsi3_pri_read_keys(cmd);
break;
@@ -4079,6 +4155,7 @@ target_scsi3_emulate_pr_in(struct se_cmd *cmd)
return TCM_INVALID_CDB_FIELD;
}
+done:
if (!ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 6648c1c90e19..6f67cc09c2b5 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -643,14 +643,14 @@ static void rd_free_prot(struct se_device *dev)
rd_release_prot_space(rd_dev);
}
-static struct sbc_ops rd_sbc_ops = {
+static struct exec_cmd_ops rd_exec_cmd_ops = {
.execute_rw = rd_execute_rw,
};
static sense_reason_t
rd_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &rd_sbc_ops);
+ return sbc_parse_cdb(cmd, &rd_exec_cmd_ops);
}
static const struct target_backend_ops rd_mcp_ops = {
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 7536ca797606..6a02561cc20c 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -192,7 +192,7 @@ EXPORT_SYMBOL(sbc_get_write_same_sectors);
static sense_reason_t
sbc_execute_write_same_unmap(struct se_cmd *cmd)
{
- struct sbc_ops *ops = cmd->protocol_data;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
sector_t nolb = sbc_get_write_same_sectors(cmd);
sense_reason_t ret;
@@ -271,7 +271,8 @@ static inline unsigned long long transport_lba_64(unsigned char *cdb)
}
static sense_reason_t
-sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops)
+sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags,
+ struct exec_cmd_ops *ops)
{
struct se_device *dev = cmd->se_dev;
sector_t end_lba = dev->transport->get_blocks(dev) + 1;
@@ -340,7 +341,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op
static sense_reason_t
sbc_execute_rw(struct se_cmd *cmd)
{
- struct sbc_ops *ops = cmd->protocol_data;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
cmd->data_direction);
@@ -566,7 +567,7 @@ out:
static sense_reason_t
sbc_compare_and_write(struct se_cmd *cmd)
{
- struct sbc_ops *ops = cmd->protocol_data;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
sense_reason_t ret;
int rc;
@@ -764,7 +765,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
}
sense_reason_t
-sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
{
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
@@ -1076,7 +1077,7 @@ EXPORT_SYMBOL(sbc_get_device_type);
static sense_reason_t
sbc_execute_unmap(struct se_cmd *cmd)
{
- struct sbc_ops *ops = cmd->protocol_data;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
unsigned char *buf, *ptr = NULL;
sector_t lba;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 89c0d56294cc..50290abc07bc 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1424,9 +1424,10 @@ static struct target_opcode_descriptor tcm_opcode_write_verify16 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static bool tcm_is_ws_enabled(struct se_cmd *cmd)
+static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
- struct sbc_ops *ops = cmd->protocol_data;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) ||
@@ -1451,7 +1452,8 @@ static struct target_opcode_descriptor tcm_opcode_write_same32 = {
.update_usage_bits = set_dpofua_usage_bits32,
};
-static bool tcm_is_caw_enabled(struct se_cmd *cmd)
+static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1491,7 +1493,8 @@ static struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
-static bool tcm_is_rep_ref_enabled(struct se_cmd *cmd)
+static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1502,7 +1505,6 @@ static bool tcm_is_rep_ref_enabled(struct se_cmd *cmd)
}
spin_unlock(&dev->t10_alua.lba_map_lock);
return true;
-
}
static struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
@@ -1537,9 +1539,10 @@ static struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
};
-static bool tcm_is_unmap_enabled(struct se_cmd *cmd)
+static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
- struct sbc_ops *ops = cmd->protocol_data;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
return ops->execute_unmap && dev->dev_attrib.emulate_tpu;
@@ -1659,11 +1662,46 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
0xff, SCSI_CONTROL_MASK},
};
-static bool tcm_is_pr_enabled(struct se_cmd *cmd)
+static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- return dev->dev_attrib.emulate_pr;
+ if (!dev->dev_attrib.emulate_pr)
+ return false;
+
+ if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ return true;
+
+ switch (descr->opcode) {
+ case RESERVE:
+ case RESERVE_10:
+ case RELEASE:
+ case RELEASE_10:
+ /*
+ * The pr_ops which are used by the backend modules don't
+ * support these commands.
+ */
+ return false;
+ case PERSISTENT_RESERVE_OUT:
+ switch (descr->service_action) {
+ case PRO_REGISTER_AND_MOVE:
+ case PRO_REPLACE_LOST_RESERVATION:
+ /*
+ * The backend modules don't have access to ports and
+ * I_T nexuses so they can't handle these type of
+ * requests.
+ */
+ return false;
+ }
+ break;
+ case PERSISTENT_RESERVE_IN:
+ if (descr->service_action == PRI_READ_FULL_STATUS)
+ return false;
+ break;
+ }
+
+ return true;
}
static struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
@@ -1788,20 +1826,13 @@ static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
.enabled = tcm_is_pr_enabled,
};
-static bool tcm_is_scsi2_reservations_enabled(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
-
- return dev->dev_attrib.emulate_pr;
-}
-
static struct target_opcode_descriptor tcm_opcode_release = {
.support = SCSI_SUPPORT_FULL,
.opcode = RELEASE,
.cdb_size = 6,
.usage_bits = {RELEASE, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
- .enabled = tcm_is_scsi2_reservations_enabled,
+ .enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_release10 = {
@@ -1811,7 +1842,7 @@ static struct target_opcode_descriptor tcm_opcode_release10 = {
.usage_bits = {RELEASE_10, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff,
0xff, SCSI_CONTROL_MASK},
- .enabled = tcm_is_scsi2_reservations_enabled,
+ .enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_reserve = {
@@ -1820,7 +1851,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve = {
.cdb_size = 6,
.usage_bits = {RESERVE, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
- .enabled = tcm_is_scsi2_reservations_enabled,
+ .enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_reserve10 = {
@@ -1830,7 +1861,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve10 = {
.usage_bits = {RESERVE_10, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff,
0xff, SCSI_CONTROL_MASK},
- .enabled = tcm_is_scsi2_reservations_enabled,
+ .enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_request_sense = {
@@ -1849,7 +1880,8 @@ static struct target_opcode_descriptor tcm_opcode_inquiry = {
0xff, SCSI_CONTROL_MASK},
};
-static bool tcm_is_3pc_enabled(struct se_cmd *cmd)
+static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1910,8 +1942,8 @@ static struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
-
-static bool spc_rsoc_enabled(struct se_cmd *cmd)
+static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1931,7 +1963,8 @@ static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
.enabled = spc_rsoc_enabled,
};
-static bool tcm_is_set_tpg_enabled(struct se_cmd *cmd)
+static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
{
struct t10_alua_tg_pt_gp *l_tg_pt_gp;
struct se_lun *l_lun = cmd->se_lun;
@@ -2118,7 +2151,7 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
if (descr->serv_action_valid)
return TCM_INVALID_CDB_FIELD;
- if (!descr->enabled || descr->enabled(cmd))
+ if (!descr->enabled || descr->enabled(descr, cmd))
*opcode = descr;
break;
case 0x2:
@@ -2132,7 +2165,8 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
*/
if (descr->serv_action_valid &&
descr->service_action == requested_sa) {
- if (!descr->enabled || descr->enabled(cmd))
+ if (!descr->enabled || descr->enabled(descr,
+ cmd))
*opcode = descr;
} else if (!descr->serv_action_valid)
return TCM_INVALID_CDB_FIELD;
@@ -2145,7 +2179,8 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
* be returned in the one_command parameter data format.
*/
if (descr->service_action == requested_sa)
- if (!descr->enabled || descr->enabled(cmd))
+ if (!descr->enabled || descr->enabled(descr,
+ cmd))
*opcode = descr;
break;
}
@@ -2202,7 +2237,7 @@ spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
descr = tcm_supported_opcodes[i];
- if (descr->enabled && !descr->enabled(cmd))
+ if (descr->enabled && !descr->enabled(descr, cmd))
continue;
response_length += spc_rsoc_encode_command_descriptor(
@@ -2231,12 +2266,22 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
- if (!dev->dev_attrib.emulate_pr &&
- ((cdb[0] == PERSISTENT_RESERVE_IN) ||
- (cdb[0] == PERSISTENT_RESERVE_OUT) ||
- (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
- (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
- return TCM_UNSUPPORTED_SCSI_OPCODE;
+ switch (cdb[0]) {
+ case RESERVE:
+ case RESERVE_10:
+ case RELEASE:
+ case RELEASE_10:
+ if (!dev->dev_attrib.emulate_pr)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ break;
+ case PERSISTENT_RESERVE_IN:
+ case PERSISTENT_RESERVE_OUT:
+ if (!dev->dev_attrib.emulate_pr)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ break;
}
switch (cdb[0]) {
diff --git a/drivers/ufs/core/ufs-hwmon.c b/drivers/ufs/core/ufs-hwmon.c
index 4c6a872b7a7c..101d7082446f 100644
--- a/drivers/ufs/core/ufs-hwmon.c
+++ b/drivers/ufs/core/ufs-hwmon.c
@@ -146,7 +146,7 @@ static umode_t ufs_hwmon_is_visible(const void *_data, enum hwmon_sensor_types t
return 0;
}
-static const struct hwmon_channel_info *ufs_hwmon_info[] = {
+static const struct hwmon_channel_info *const ufs_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_ENABLE | HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_LCRIT),
NULL
};
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 17d7bb875fee..3f362232d5ee 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -98,7 +98,7 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
-/* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */
+/* UFSHC 4.0 compliant HC support this mode. */
static bool use_mcq_mode = true;
static bool is_mcq_supported(struct ufs_hba *hba)
@@ -106,23 +106,7 @@ static bool is_mcq_supported(struct ufs_hba *hba)
return hba->mcq_sup && use_mcq_mode;
}
-static int param_set_mcq_mode(const char *val, const struct kernel_param *kp)
-{
- int ret;
-
- ret = param_set_bool(val, kp);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct kernel_param_ops mcq_mode_ops = {
- .set = param_set_mcq_mode,
- .get = param_get_bool,
-};
-
-module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644);
+module_param(use_mcq_mode, bool, 0644);
MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 73e217260390..a054810e321d 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -410,9 +410,6 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
usleep_range(100, 200);
} while (ktime_before(time_checked, timeout));
- if (val == state)
- return 0;
-
return -ETIMEDOUT;
}
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index dfdcd218aaac..bb573f87c8d1 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -101,7 +101,7 @@ typedef u16 blk_short_t;
#define BLK_STS_NOSPC ((__force blk_status_t)3)
#define BLK_STS_TRANSPORT ((__force blk_status_t)4)
#define BLK_STS_TARGET ((__force blk_status_t)5)
-#define BLK_STS_NEXUS ((__force blk_status_t)6)
+#define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6)
#define BLK_STS_MEDIUM ((__force blk_status_t)7)
#define BLK_STS_PROTECTION ((__force blk_status_t)8)
#define BLK_STS_RESOURCE ((__force blk_status_t)9)
@@ -195,7 +195,7 @@ static inline bool blk_path_error(blk_status_t error)
case BLK_STS_NOTSUPP:
case BLK_STS_NOSPC:
case BLK_STS_TARGET:
- case BLK_STS_NEXUS:
+ case BLK_STS_RESV_CONFLICT:
case BLK_STS_MEDIUM:
case BLK_STS_PROTECTION:
return false;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 779507ac750b..182b6d614eb1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -759,20 +759,55 @@ enum {
NVME_LBART_ATTRIB_HIDE = 1 << 1,
};
+enum nvme_pr_type {
+ NVME_PR_WRITE_EXCLUSIVE = 1,
+ NVME_PR_EXCLUSIVE_ACCESS = 2,
+ NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3,
+ NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4,
+ NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5,
+ NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6,
+};
+
+enum nvme_eds {
+ NVME_EXTENDED_DATA_STRUCT = 0x1,
+};
+
+struct nvme_registered_ctrl {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 rsvd3[5];
+ __le64 hostid;
+ __le64 rkey;
+};
+
struct nvme_reservation_status {
__le32 gen;
__u8 rtype;
__u8 regctl[2];
__u8 resv5[2];
__u8 ptpls;
- __u8 resv10[13];
- struct {
- __le16 cntlid;
- __u8 rcsts;
- __u8 resv3[5];
- __le64 hostid;
- __le64 rkey;
- } regctl_ds[];
+ __u8 resv10[14];
+ struct nvme_registered_ctrl regctl_ds[];
+};
+
+struct nvme_registered_ctrl_ext {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 rsvd3[5];
+ __le64 rkey;
+ __u8 hostid[16];
+ __u8 rsvd32[32];
+};
+
+struct nvme_reservation_status_ext {
+ __le32 gen;
+ __u8 rtype;
+ __u8 regctl[2];
+ __u8 resv5[2];
+ __u8 ptpls;
+ __u8 resv10[14];
+ __u8 rsvd24[40];
+ struct nvme_registered_ctrl_ext regctl_eds[];
};
enum nvme_async_event_type {
diff --git a/include/linux/pr.h b/include/linux/pr.h
index 94ceec713afe..3003daec28a5 100644
--- a/include/linux/pr.h
+++ b/include/linux/pr.h
@@ -4,6 +4,18 @@
#include <uapi/linux/pr.h>
+struct pr_keys {
+ u32 generation;
+ u32 num_keys;
+ u64 keys[];
+};
+
+struct pr_held_reservation {
+ u64 key;
+ u32 generation;
+ enum pr_type type;
+};
+
struct pr_ops {
int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key,
u32 flags);
@@ -14,6 +26,19 @@ struct pr_ops {
int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key,
enum pr_type type, bool abort);
int (*pr_clear)(struct block_device *bdev, u64 key);
+ /*
+ * pr_read_keys - Read the registered keys and return them in the
+ * pr_keys->keys array. The keys array will have been allocated at the
+ * end of the pr_keys struct, and pr_keys->num_keys must be set to the
+ * number of keys the array can hold. If there are more than can fit
+ * in the array, success will still be returned and pr_keys->num_keys
+ * will reflect the total number of keys the device contains, so the
+ * caller can retry with a larger array.
+ */
+ int (*pr_read_keys)(struct block_device *bdev,
+ struct pr_keys *keys_info);
+ int (*pr_read_reservation)(struct block_device *bdev,
+ struct pr_held_reservation *rsv);
};
#endif /* LINUX_PR_H */
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
index 5b567b43e1b1..fb58715fac86 100644
--- a/include/scsi/scsi_common.h
+++ b/include/scsi/scsi_common.h
@@ -7,8 +7,21 @@
#define _SCSI_COMMON_H_
#include <linux/types.h>
+#include <uapi/linux/pr.h>
#include <scsi/scsi_proto.h>
+enum scsi_pr_type {
+ SCSI_PR_WRITE_EXCLUSIVE = 0x01,
+ SCSI_PR_EXCLUSIVE_ACCESS = 0x03,
+ SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY = 0x05,
+ SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY = 0x06,
+ SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS = 0x07,
+ SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS = 0x08,
+};
+
+enum scsi_pr_type block_pr_type_to_scsi(enum pr_type type);
+enum pr_type scsi_pr_type_to_block(enum scsi_pr_type type);
+
static inline unsigned
scsi_varlen_cdb_length(const void *hdr)
{
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index fbe5bdfe4d6e..07d65c1f59db 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -151,6 +151,11 @@
#define ZO_FINISH_ZONE 0x02
#define ZO_OPEN_ZONE 0x03
#define ZO_RESET_WRITE_POINTER 0x04
+/* values for PR in service action */
+#define READ_KEYS 0x00
+#define READ_RESERVATION 0x01
+#define REPORT_CAPABILITES 0x02
+#define READ_FULL_STATUS 0x03
/* values for variable length command */
#define XDREAD_32 0x03
#define XDWRITE_32 0x04
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index a3c193df25b3..739df993aa5e 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -62,13 +62,17 @@ struct target_backend_ops {
struct configfs_attribute **tb_dev_action_attrs;
};
-struct sbc_ops {
+struct exec_cmd_ops {
sense_reason_t (*execute_rw)(struct se_cmd *cmd, struct scatterlist *,
u32, enum dma_data_direction);
sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
sense_reason_t (*execute_unmap)(struct se_cmd *cmd,
sector_t lba, sector_t nolb);
+ sense_reason_t (*execute_pr_out)(struct se_cmd *cmd, u8 sa, u64 key,
+ u64 sa_key, u8 type, bool aptpl);
+ sense_reason_t (*execute_pr_in)(struct se_cmd *cmd, u8 sa,
+ unsigned char *param_data);
};
int transport_backend_register(const struct target_backend_ops *);
@@ -86,7 +90,7 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
sense_reason_t spc_emulate_evpd_83(struct se_cmd *, unsigned char *);
-sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
+sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops);
u32 sbc_get_device_rev(struct se_device *dev);
u32 sbc_get_device_type(struct se_device *dev);
sector_t sbc_get_write_same_sectors(struct se_cmd *cmd);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 5f8e96f1516f..159567359bbb 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -880,7 +880,8 @@ struct target_opcode_descriptor {
u8 specific_timeout;
u16 nominal_timeout;
u16 recommended_timeout;
- bool (*enabled)(struct se_cmd *cmd);
+ bool (*enabled)(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd);
void (*update_usage_bits)(u8 *usage_bits,
struct se_device *dev);
u8 usage_bits[];