summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-fc27
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs2
-rw-r--r--drivers/ata/ahci.h8
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/libahci.c52
-rw-r--r--drivers/ata/libata-sata.c21
-rw-r--r--drivers/ata/libata-scsi.c29
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_nv.c4
-rw-r--r--drivers/ata/sata_sil24.c2
-rw-r--r--drivers/firewire/sbp2.c10
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c59
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c38
-rw-r--r--drivers/message/fusion/mptfc.c8
-rw-r--r--drivers/message/fusion/mptsas.c4
-rw-r--r--drivers/message/fusion/mptscsih.c46
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/message/fusion/mptspi.c6
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c8
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c52
-rw-r--r--drivers/scsi/3w-9xxx.c18
-rw-r--r--drivers/scsi/3w-sas.c18
-rw-r--r--drivers/scsi/3w-xxxx.c26
-rw-r--r--drivers/scsi/53c700.c20
-rw-r--r--drivers/scsi/BusLogic.c13
-rw-r--r--drivers/scsi/NCR5380.c12
-rw-r--r--drivers/scsi/a100u2w.c5
-rw-r--r--drivers/scsi/aacraid/aachba.c53
-rw-r--r--drivers/scsi/aacraid/linit.c38
-rw-r--r--drivers/scsi/advansys.c14
-rw-r--r--drivers/scsi/aha152x.c29
-rw-r--r--drivers/scsi/aha1542.c16
-rw-r--r--drivers/scsi/aha1740.c4
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h2
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c33
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c22
-rw-r--r--drivers/scsi/arm/acornscsi.c20
-rw-r--r--drivers/scsi/arm/arxescsi.c1
-rw-r--r--drivers/scsi/arm/cumana_2.c1
-rw-r--r--drivers/scsi/arm/eesox.c1
-rw-r--r--drivers/scsi/arm/fas216.c26
-rw-r--r--drivers/scsi/arm/fas216.h10
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/atp870u.c17
-rw-r--r--drivers/scsi/be2iscsi/be_main.c21
-rw-r--r--drivers/scsi/bfa/bfad_attr.c68
-rw-r--r--drivers/scsi/bfa/bfad_im.c16
-rw-r--r--drivers/scsi/bfa/bfad_im.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c8
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c15
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c32
-rw-r--r--drivers/scsi/cxlflash/main.c46
-rw-r--r--drivers/scsi/dc395x.c12
-rw-r--r--drivers/scsi/dpt_i2o.c13
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c6
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c4
-rw-r--r--drivers/scsi/elx/efct/efct_scsi.c3
-rw-r--r--drivers/scsi/elx/libefc/efc.h2
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.c7
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.c2
-rw-r--r--drivers/scsi/elx/libefc/efclib.h1
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c8
-rw-r--r--drivers/scsi/esp_scsi.c12
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c17
-rw-r--r--drivers/scsi/fnic/fnic_main.c2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c122
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c113
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c23
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c35
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c62
-rw-r--r--drivers/scsi/hosts.c17
-rw-r--r--drivers/scsi/hpsa.c56
-rw-r--r--drivers/scsi/hptiop.c20
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c30
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c28
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c42
-rw-r--r--drivers/scsi/imm.c6
-rw-r--r--drivers/scsi/initio.c7
-rw-r--r--drivers/scsi/ipr.c48
-rw-r--r--drivers/scsi/ips.c31
-rw-r--r--drivers/scsi/isci/init.c8
-rw-r--r--drivers/scsi/isci/task.h4
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libiscsi.c7
-rw-r--r--drivers/scsi/libsas/sas_init.c8
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c27
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c314
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c61
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c144
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c135
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c70
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c44
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c131
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c197
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c6
-rw-r--r--drivers/scsi/megaraid.c24
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c40
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c56
-rw-r--r--drivers/scsi/mesh.c18
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c32
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c26
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c84
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c26
-rw-r--r--drivers/scsi/mvsas/mv_init.c12
-rw-r--r--drivers/scsi/mvumi.c4
-rw-r--r--drivers/scsi/myrb.c60
-rw-r--r--drivers/scsi/myrs.c50
-rw-r--r--drivers/scsi/ncr53c8xx.c16
-rw-r--r--drivers/scsi/nsp32.c7
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c7
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c14
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c70
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c12
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c14
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c15
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h8
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c63
-rw-r--r--drivers/scsi/pmcraid.c27
-rw-r--r--drivers/scsi/ppa.c6
-rw-r--r--drivers/scsi/ps3rom.c8
-rw-r--r--drivers/scsi/qedf/qedf.h2
-rw-r--r--drivers/scsi/qedf/qedf_attr.c15
-rw-r--r--drivers/scsi/qedf/qedf_io.c19
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c2
-rw-r--r--drivers/scsi/qedi/qedi_sysfs.c15
-rw-r--r--drivers/scsi/qla1280.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c149
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c48
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c103
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c73
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c41
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c6
-rw-r--r--drivers/scsi/qlogicfas408.c7
-rw-r--r--drivers/scsi/qlogicpti.c7
-rw-r--r--drivers/scsi/scsi.c8
-rw-r--r--drivers/scsi/scsi_debug.c19
-rw-r--r--drivers/scsi/scsi_error.c17
-rw-r--r--drivers/scsi/scsi_lib.c64
-rw-r--r--drivers/scsi/scsi_pm.c105
-rw-r--r--drivers/scsi/scsi_priv.h7
-rw-r--r--drivers/scsi/scsi_scan.c74
-rw-r--r--drivers/scsi/scsi_sysfs.c54
-rw-r--r--drivers/scsi/scsi_transport_sas.c1
-rw-r--r--drivers/scsi/sd.c38
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h61
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c588
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c6
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c60
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h4
-rw-r--r--drivers/scsi/snic/snic.h2
-rw-r--r--drivers/scsi/snic/snic_attrs.c19
-rw-r--r--drivers/scsi/snic/snic_main.c2
-rw-r--r--drivers/scsi/snic/snic_scsi.c33
-rw-r--r--drivers/scsi/sr.c7
-rw-r--r--drivers/scsi/stex.c10
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c6
-rw-r--r--drivers/scsi/ufs/Kconfig19
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c6
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c6
-rw-r--r--drivers/scsi/ufs/ufs-hwmon.c210
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c111
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h27
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c21
-rw-r--r--drivers/scsi/ufs/ufs.h7
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c341
-rw-r--r--drivers/scsi/ufs/ufshcd.h43
-rw-r--r--drivers/scsi/ufs/ufshpb.c7
-rw-r--r--drivers/scsi/virtio_scsi.c7
-rw-r--r--drivers/scsi/vmw_pvscsi.c9
-rw-r--r--drivers/scsi/wd33c93.c18
-rw-r--r--drivers/scsi/wd719x.c4
-rw-r--r--drivers/scsi/xen-scsifront.c4
-rw-r--r--drivers/staging/rts5208/rtsx.c9
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c20
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c17
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c28
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c91
-rw-r--r--drivers/target/loopback/tcm_loop.c4
-rw-r--r--drivers/target/sbp/sbp_target.c30
-rw-r--r--drivers/target/target_core_alua.c83
-rw-r--r--drivers/target/target_core_configfs.c1
-rw-r--r--drivers/target/target_core_device.c2
-rw-r--r--drivers/target/target_core_fabric_configfs.c78
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_transport.c94
-rw-r--r--drivers/target/target_core_user.c7
-rw-r--r--drivers/target/target_core_xcopy.c14
-rw-r--r--drivers/usb/gadget/function/f_tcm.c31
-rw-r--r--drivers/usb/image/microtek.c5
-rw-r--r--drivers/usb/storage/scsiglue.c13
-rw-r--r--drivers/usb/storage/uas.c13
-rw-r--r--drivers/usb/storage/usb.c4
-rw-r--r--include/linux/libata.h8
-rw-r--r--include/scsi/libsas.h1
-rw-r--r--include/scsi/scsi_cmnd.h14
-rw-r--r--include/scsi/scsi_device.h6
-rw-r--r--include/scsi/scsi_host.h27
-rw-r--r--include/scsi/scsi_transport_sas.h1
-rw-r--r--include/target/target_core_base.h9
-rw-r--r--include/target/target_core_fabric.h1
240 files changed, 4172 insertions, 2788 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-fc b/Documentation/ABI/testing/sysfs-class-fc
new file mode 100644
index 000000000000..3057a6d3b8cf
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-fc
@@ -0,0 +1,27 @@
+What: /sys/class/fc/fc_udev_device/appid_store
+Date: Aug 2021
+Contact: Muneendra Kumar <muneendra.kumar@broadconm.com>
+Description:
+ This interface allows an admin to set an FC application
+ identifier in the blkcg associated with a cgroup id. The
+ identifier is typically a UUID that is associated with
+ an application or logical entity such as a virtual
+ machine or container group. The application or logical
+ entity utilizes a block device via the cgroup id.
+ FC adapter drivers may query the identifier and tag FC
+ traffic based on the identifier. FC host and FC fabric
+ entities can utilize the application id and FC traffic
+ tag to identify traffic sources.
+
+ The interface expects a string "<cgroupid>:<appid>" where:
+ <cgroupid> is inode of the cgroup in hexadecimal
+ <appid> is user provided string upto 128 characters
+ in length.
+
+ If an appid_store is done for a cgroup id that already
+ has an appid set, the new value will override the
+ previous value.
+
+ If an admin wants to remove an FC application identifier
+ from a cgroup, an appid_store should be done with the
+ following string: "<cgroupid>:"
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index 863cc4897277..a44ef8bfbadf 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -983,7 +983,7 @@ Description: This file shows the amount of data that the host plans to
What: /sys/class/scsi_device/*/device/dyn_cap_needed
Date: February 2018
Contact: Stanislav Nijnikov <stanislav.nijnikov@wdc.com>
-Description: This file shows the The amount of physical memory needed
+Description: This file shows the amount of physical memory needed
to be removed from the physical memory resources pool of
the particular logical unit. The full information about
the attribute could be found at UFS specifications 2.1.
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 2e89499bd9c3..eeac5482f1d1 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -376,8 +376,8 @@ struct ahci_host_priv {
extern int ahci_ignore_sss;
-extern struct device_attribute *ahci_shost_attrs[];
-extern struct device_attribute *ahci_sdev_attrs[];
+extern const struct attribute_group *ahci_shost_groups[];
+extern const struct attribute_group *ahci_sdev_groups[];
/*
* This must be instantiated by the edge drivers. Read the comments
@@ -388,8 +388,8 @@ extern struct device_attribute *ahci_sdev_attrs[];
.can_queue = AHCI_MAX_CMDS, \
.sg_tablesize = AHCI_MAX_SG, \
.dma_boundary = AHCI_DMA_BOUNDARY, \
- .shost_attrs = ahci_shost_attrs, \
- .sdev_attrs = ahci_sdev_attrs, \
+ .shost_groups = ahci_shost_groups, \
+ .sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.slave_configure = ata_scsi_slave_config
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3ca7720e7d8f..0b2fcf0d1d6c 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1085,14 +1085,16 @@ static struct ata_port_operations ich_pata_ops = {
.set_dmamode = ich_set_dmamode,
};
-static struct device_attribute *piix_sidpr_shost_attrs[] = {
- &dev_attr_link_power_management_policy,
+static struct attribute *piix_sidpr_shost_attrs[] = {
+ &dev_attr_link_power_management_policy.attr,
NULL
};
+ATTRIBUTE_GROUPS(piix_sidpr_shost);
+
static struct scsi_host_template piix_sidpr_sht = {
ATA_BMDMA_SHT(DRV_NAME),
- .shost_attrs = piix_sidpr_shost_attrs,
+ .shost_groups = piix_sidpr_shost_groups,
};
static struct ata_port_operations piix_sidpr_sata_ops = {
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 5b3fa2cbe722..28430c093a7f 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -108,28 +108,46 @@ static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
ahci_read_em_buffer, ahci_store_em_buffer);
static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
-struct device_attribute *ahci_shost_attrs[] = {
- &dev_attr_link_power_management_policy,
- &dev_attr_em_message_type,
- &dev_attr_em_message,
- &dev_attr_ahci_host_caps,
- &dev_attr_ahci_host_cap2,
- &dev_attr_ahci_host_version,
- &dev_attr_ahci_port_cmd,
- &dev_attr_em_buffer,
- &dev_attr_em_message_supported,
+static struct attribute *ahci_shost_attrs[] = {
+ &dev_attr_link_power_management_policy.attr,
+ &dev_attr_em_message_type.attr,
+ &dev_attr_em_message.attr,
+ &dev_attr_ahci_host_caps.attr,
+ &dev_attr_ahci_host_cap2.attr,
+ &dev_attr_ahci_host_version.attr,
+ &dev_attr_ahci_port_cmd.attr,
+ &dev_attr_em_buffer.attr,
+ &dev_attr_em_message_supported.attr,
NULL
};
-EXPORT_SYMBOL_GPL(ahci_shost_attrs);
-struct device_attribute *ahci_sdev_attrs[] = {
- &dev_attr_sw_activity,
- &dev_attr_unload_heads,
- &dev_attr_ncq_prio_supported,
- &dev_attr_ncq_prio_enable,
+static const struct attribute_group ahci_shost_attr_group = {
+ .attrs = ahci_shost_attrs
+};
+
+const struct attribute_group *ahci_shost_groups[] = {
+ &ahci_shost_attr_group,
+ NULL
+};
+EXPORT_SYMBOL_GPL(ahci_shost_groups);
+
+struct attribute *ahci_sdev_attrs[] = {
+ &dev_attr_sw_activity.attr,
+ &dev_attr_unload_heads.attr,
+ &dev_attr_ncq_prio_supported.attr,
+ &dev_attr_ncq_prio_enable.attr,
+ NULL
+};
+
+static const struct attribute_group ahci_sdev_attr_group = {
+ .attrs = ahci_sdev_attrs
+};
+
+const struct attribute_group *ahci_sdev_groups[] = {
+ &ahci_sdev_attr_group,
NULL
};
-EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
+EXPORT_SYMBOL_GPL(ahci_sdev_groups);
struct ata_port_operations ahci_ops = {
.inherits = &sata_pmp_port_ops,
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 8f3ff830ab0c..4e88597aa9df 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -922,13 +922,22 @@ DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
-struct device_attribute *ata_ncq_sdev_attrs[] = {
- &dev_attr_unload_heads,
- &dev_attr_ncq_prio_enable,
- &dev_attr_ncq_prio_supported,
+struct attribute *ata_ncq_sdev_attrs[] = {
+ &dev_attr_unload_heads.attr,
+ &dev_attr_ncq_prio_enable.attr,
+ &dev_attr_ncq_prio_supported.attr,
NULL
};
-EXPORT_SYMBOL_GPL(ata_ncq_sdev_attrs);
+
+static const struct attribute_group ata_ncq_sdev_attr_group = {
+ .attrs = ata_ncq_sdev_attrs
+};
+
+const struct attribute_group *ata_ncq_sdev_groups[] = {
+ &ata_ncq_sdev_attr_group,
+ NULL
+};
+EXPORT_SYMBOL_GPL(ata_ncq_sdev_groups);
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
@@ -1258,7 +1267,7 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
rc = __ata_scsi_queuecmd(cmd, ap->link.device);
else {
cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
return rc;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 8a6b7b913d64..1b84d5526d77 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -234,11 +234,20 @@ static void ata_scsi_set_invalid_parameter(struct ata_device *dev,
field, 0xff, 0);
}
-struct device_attribute *ata_common_sdev_attrs[] = {
- &dev_attr_unload_heads,
+static struct attribute *ata_common_sdev_attrs[] = {
+ &dev_attr_unload_heads.attr,
NULL
};
-EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
+
+static const struct attribute_group ata_common_sdev_attr_group = {
+ .attrs = ata_common_sdev_attrs
+};
+
+const struct attribute_group *ata_common_sdev_groups[] = {
+ &ata_common_sdev_attr_group,
+ NULL
+};
+EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
/**
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
@@ -634,7 +643,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
qc = ata_qc_new_init(dev, scsi_cmd_to_rq(cmd)->tag);
if (qc) {
qc->scsicmd = cmd;
- qc->scsidone = cmd->scsi_done;
+ qc->scsidone = scsi_done;
qc->sg = scsi_sglist(cmd);
qc->n_elem = scsi_sg_count(cmd);
@@ -643,7 +652,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
qc->flags |= ATA_QCFLAG_QUIET;
} else {
cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
return qc;
@@ -1738,14 +1747,14 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
early_finish:
ata_qc_free(qc);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
DPRINTK("EXIT - early finish (good or error)\n");
return 0;
err_did:
ata_qc_free(qc);
cmd->result = (DID_ERROR << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
err_mem:
DPRINTK("EXIT - internal\n");
return 0;
@@ -4042,7 +4051,7 @@ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
scmd->cmd_len, scsi_op, dev->cdb_len);
scmd->result = DID_ERROR << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -4084,7 +4093,7 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
rc = __ata_scsi_queuecmd(cmd, dev);
else {
cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
spin_unlock_irqrestore(ap->lock, irq_flags);
@@ -4218,7 +4227,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
break;
}
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index be0ca8d5b345..16e8aa184a75 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -923,7 +923,7 @@ static struct scsi_host_template pata_macio_sht = {
*/
.max_segment_size = MAX_DBDMA_SEG,
.slave_configure = pata_macio_slave_config,
- .sdev_attrs = ata_common_sdev_attrs,
+ .sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index c53633d47bfb..cae4c1eab102 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -670,7 +670,7 @@ static struct scsi_host_template mv6_sht = {
.can_queue = MV_MAX_Q_DEPTH - 1,
.sg_tablesize = MV_MAX_SG_CT / 2,
.dma_boundary = MV_DMA_BOUNDARY,
- .sdev_attrs = ata_ncq_sdev_attrs,
+ .sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.slave_configure = ata_scsi_slave_config
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index c385d18ce87b..16272c111208 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -380,7 +380,7 @@ static struct scsi_host_template nv_adma_sht = {
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
.slave_configure = nv_adma_slave_config,
- .sdev_attrs = ata_ncq_sdev_attrs,
+ .sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
@@ -391,7 +391,7 @@ static struct scsi_host_template nv_swncq_sht = {
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = nv_swncq_slave_config,
- .sdev_attrs = ata_ncq_sdev_attrs,
+ .sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 06a1e27c4f84..f99ec6f7d7c0 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -379,7 +379,7 @@ static struct scsi_host_template sil24_sht = {
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
.tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
- .sdev_attrs = ata_ncq_sdev_attrs,
+ .sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.slave_configure = ata_scsi_slave_config
};
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 4d5054211550..85cd379fd383 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1375,7 +1375,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
sbp2_unmap_scatterlist(device->card->device, orb);
orb->cmd->result = result;
- orb->cmd->scsi_done(orb->cmd);
+ scsi_done(orb->cmd);
}
static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
@@ -1578,11 +1578,13 @@ static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
-static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
- &dev_attr_ieee1394_id,
+static struct attribute *sbp2_scsi_sysfs_attrs[] = {
+ &dev_attr_ieee1394_id.attr,
NULL
};
+ATTRIBUTE_GROUPS(sbp2_scsi_sysfs);
+
static struct scsi_host_template scsi_driver_template = {
.module = THIS_MODULE,
.name = "SBP-2 IEEE-1394",
@@ -1595,7 +1597,7 @@ static struct scsi_host_template scsi_driver_template = {
.sg_tablesize = SG_ALL,
.max_segment_size = SBP2_MAX_SEG_SIZE,
.can_queue = 1,
- .sdev_attrs = sbp2_scsi_sysfs_attrs,
+ .sdev_groups = sbp2_scsi_sysfs_groups,
};
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 71eda91e810c..e174e853f8a4 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1026,10 +1026,17 @@ out:
*/
static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
{
- struct device_attribute **attr;
+ const struct attribute_group **g;
+ struct attribute **attr;
- for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
- device_remove_file(&shost->shost_dev, *attr);
+ for (g = shost->hostt->shost_groups; *g; ++g) {
+ for (attr = (*g)->attrs; *attr; ++attr) {
+ struct device_attribute *dev_attr =
+ container_of(*attr, typeof(*dev_attr), attr);
+
+ device_remove_file(&shost->shost_dev, dev_attr);
+ }
+ }
}
static void srp_remove_target(struct srp_target_port *target)
@@ -1266,7 +1273,7 @@ static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
if (scmnd) {
srp_free_req(ch, req, scmnd, 0);
scmnd->result = result;
- scmnd->scsi_done(scmnd);
+ scsi_done(scmnd);
}
}
@@ -1987,7 +1994,7 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
srp_free_req(ch, req, scmnd,
be32_to_cpu(rsp->req_lim_delta));
- scmnd->scsi_done(scmnd);
+ scsi_done(scmnd);
}
}
@@ -2239,7 +2246,7 @@ err_iu:
err:
if (scmnd->result) {
- scmnd->scsi_done(scmnd);
+ scsi_done(scmnd);
ret = 0;
} else {
ret = SCSI_MLQUEUE_HOST_BUSY;
@@ -2811,7 +2818,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
if (ret == SUCCESS) {
srp_free_req(ch, req, scmnd, 0);
scmnd->result = DID_ABORT << 16;
- scmnd->scsi_done(scmnd);
+ scsi_done(scmnd);
}
return ret;
@@ -3050,26 +3057,28 @@ static ssize_t allow_ext_sg_show(struct device *dev,
static DEVICE_ATTR_RO(allow_ext_sg);
-static struct device_attribute *srp_host_attrs[] = {
- &dev_attr_id_ext,
- &dev_attr_ioc_guid,
- &dev_attr_service_id,
- &dev_attr_pkey,
- &dev_attr_sgid,
- &dev_attr_dgid,
- &dev_attr_orig_dgid,
- &dev_attr_req_lim,
- &dev_attr_zero_req_lim,
- &dev_attr_local_ib_port,
- &dev_attr_local_ib_device,
- &dev_attr_ch_count,
- &dev_attr_comp_vector,
- &dev_attr_tl_retry_count,
- &dev_attr_cmd_sg_entries,
- &dev_attr_allow_ext_sg,
+static struct attribute *srp_host_attrs[] = {
+ &dev_attr_id_ext.attr,
+ &dev_attr_ioc_guid.attr,
+ &dev_attr_service_id.attr,
+ &dev_attr_pkey.attr,
+ &dev_attr_sgid.attr,
+ &dev_attr_dgid.attr,
+ &dev_attr_orig_dgid.attr,
+ &dev_attr_req_lim.attr,
+ &dev_attr_zero_req_lim.attr,
+ &dev_attr_local_ib_port.attr,
+ &dev_attr_local_ib_device.attr,
+ &dev_attr_ch_count.attr,
+ &dev_attr_comp_vector.attr,
+ &dev_attr_tl_retry_count.attr,
+ &dev_attr_cmd_sg_entries.attr,
+ &dev_attr_allow_ext_sg.attr,
NULL
};
+ATTRIBUTE_GROUPS(srp_host);
+
static struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
@@ -3090,7 +3099,7 @@ static struct scsi_host_template srp_template = {
.can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
.this_id = -1,
.cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
- .shost_attrs = srp_host_attrs,
+ .shost_groups = srp_host_groups,
.track_queue_depth = 1,
.cmd_size = sizeof(struct srp_request),
};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 3cadf1295417..f86ee1c4b970 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3705,47 +3705,17 @@ static struct configfs_attribute *srpt_da_attrs[] = {
NULL,
};
-static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
+static int srpt_enable_tpg(struct se_portal_group *se_tpg, bool enable)
{
- struct se_portal_group *se_tpg = to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
- return sysfs_emit(page, "%d\n", sport->enabled);
-}
-
-static ssize_t srpt_tpg_enable_store(struct config_item *item,
- const char *page, size_t count)
-{
- struct se_portal_group *se_tpg = to_tpg(item);
- struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
- unsigned long tmp;
- int ret;
-
- ret = kstrtoul(page, 0, &tmp);
- if (ret < 0) {
- pr_err("Unable to extract srpt_tpg_store_enable\n");
- return -EINVAL;
- }
-
- if ((tmp != 0) && (tmp != 1)) {
- pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
- return -EINVAL;
- }
-
mutex_lock(&sport->mutex);
- srpt_set_enabled(sport, tmp);
+ srpt_set_enabled(sport, enable);
mutex_unlock(&sport->mutex);
- return count;
+ return 0;
}
-CONFIGFS_ATTR(srpt_tpg_, enable);
-
-static struct configfs_attribute *srpt_tpg_attrs[] = {
- &srpt_tpg_attr_enable,
- NULL,
-};
-
/**
* srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
* @wwn: Corresponds to $driver/$port.
@@ -3856,12 +3826,12 @@ static const struct target_core_fabric_ops srpt_template = {
.fabric_make_wwn = srpt_make_tport,
.fabric_drop_wwn = srpt_drop_tport,
.fabric_make_tpg = srpt_make_tpg,
+ .fabric_enable_tpg = srpt_enable_tpg,
.fabric_drop_tpg = srpt_drop_tpg,
.fabric_init_nodeacl = srpt_init_nodeacl,
.tfc_discovery_attrs = srpt_da_attrs,
.tfc_wwn_attrs = srpt_wwn_attrs,
- .tfc_tpg_base_attrs = srpt_tpg_attrs,
.tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
};
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 572333fadd68..fac747109209 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -129,7 +129,7 @@ static struct scsi_host_template mptfc_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
- .shost_attrs = mptscsih_host_attrs,
+ .shost_groups = mptscsih_host_attr_groups,
};
/****************************************************************************
@@ -649,14 +649,14 @@ mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
if (!vdevice || !vdevice->vtarget) {
SCpnt->result = DID_NO_CONNECT << 16;
- SCpnt->scsi_done(SCpnt);
+ scsi_done(SCpnt);
return 0;
}
err = fc_remote_port_chkready(rport);
if (unlikely(err)) {
SCpnt->result = err;
- SCpnt->scsi_done(SCpnt);
+ scsi_done(SCpnt);
return 0;
}
@@ -664,7 +664,7 @@ mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
ri = *((struct mptfc_rport_info **)rport->dd_data);
if (unlikely(!ri)) {
SCpnt->result = DID_IMM_RETRY << 16;
- SCpnt->scsi_done(SCpnt);
+ scsi_done(SCpnt);
return 0;
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 85285ba8e817..091b45024d34 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1929,7 +1929,7 @@ mptsas_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
SCpnt->result = DID_NO_CONNECT << 16;
- SCpnt->scsi_done(SCpnt);
+ scsi_done(SCpnt);
return 0;
}
@@ -2020,7 +2020,7 @@ static struct scsi_host_template mptsas_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
- .shost_attrs = mptscsih_host_attrs,
+ .shost_groups = mptscsih_host_attr_groups,
.no_write_same = 1,
};
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index ce2e5b21978e..276084ed04a6 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1009,7 +1009,7 @@ out:
/* Unmap the DMA buffers, if any. */
scsi_dma_unmap(sc);
- sc->scsi_done(sc); /* Issue the command callback */
+ scsi_done(sc); /* Issue the command callback */
/* Free Chain buffers */
mptscsih_freeChainBuffers(ioc, req_idx);
@@ -1054,7 +1054,7 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
"completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, "
"idx=%x\n", ioc->name, channel, id, sc, mf, ii));
- sc->scsi_done(sc);
+ scsi_done(sc);
}
}
EXPORT_SYMBOL(mptscsih_flush_running_cmds);
@@ -1118,7 +1118,7 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
"fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name,
vdevice->vtarget->channel, vdevice->vtarget->id,
sc, mf, ii));
- sc->scsi_done(sc);
+ scsi_done(sc);
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
}
}
@@ -1693,7 +1693,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
*/
if ((hd = shost_priv(SCpnt->device->host)) == NULL) {
SCpnt->result = DID_RESET << 16;
- SCpnt->scsi_done(SCpnt);
+ scsi_done(SCpnt);
printk(KERN_ERR MYNAM ": task abort: "
"can't locate host! (sc=%p)\n", SCpnt);
return FAILED;
@@ -1710,7 +1710,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
"task abort: device has been deleted (sc=%p)\n",
ioc->name, SCpnt));
SCpnt->result = DID_NO_CONNECT << 16;
- SCpnt->scsi_done(SCpnt);
+ scsi_done(SCpnt);
retval = SUCCESS;
goto out;
}
@@ -3218,23 +3218,31 @@ mptscsih_debug_level_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(debug_level, S_IRUGO | S_IWUSR,
mptscsih_debug_level_show, mptscsih_debug_level_store);
-struct device_attribute *mptscsih_host_attrs[] = {
- &dev_attr_version_fw,
- &dev_attr_version_bios,
- &dev_attr_version_mpi,
- &dev_attr_version_product,
- &dev_attr_version_nvdata_persistent,
- &dev_attr_version_nvdata_default,
- &dev_attr_board_name,
- &dev_attr_board_assembly,
- &dev_attr_board_tracer,
- &dev_attr_io_delay,
- &dev_attr_device_delay,
- &dev_attr_debug_level,
+static struct attribute *mptscsih_host_attrs[] = {
+ &dev_attr_version_fw.attr,
+ &dev_attr_version_bios.attr,
+ &dev_attr_version_mpi.attr,
+ &dev_attr_version_product.attr,
+ &dev_attr_version_nvdata_persistent.attr,
+ &dev_attr_version_nvdata_default.attr,
+ &dev_attr_board_name.attr,
+ &dev_attr_board_assembly.attr,
+ &dev_attr_board_tracer.attr,
+ &dev_attr_io_delay.attr,
+ &dev_attr_device_delay.attr,
+ &dev_attr_debug_level.attr,
NULL,
};
-EXPORT_SYMBOL(mptscsih_host_attrs);
+static const struct attribute_group mptscsih_host_attr_group = {
+ .attrs = mptscsih_host_attrs
+};
+
+const struct attribute_group *mptscsih_host_attr_groups[] = {
+ &mptscsih_host_attr_group,
+ NULL
+};
+EXPORT_SYMBOL(mptscsih_host_attr_groups);
EXPORT_SYMBOL(mptscsih_remove);
EXPORT_SYMBOL(mptscsih_shutdown);
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 2baeefd9be7a..a22c5eaf703c 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -131,7 +131,7 @@ extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
-extern struct device_attribute *mptscsih_host_attrs[];
+extern const struct attribute_group *mptscsih_host_attr_groups[];
extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
extern void mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index af0ce5611e4a..acd4805dcf83 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -782,14 +782,14 @@ mptspi_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
if (!vdevice || !vdevice->vtarget) {
SCpnt->result = DID_NO_CONNECT << 16;
- SCpnt->scsi_done(SCpnt);
+ scsi_done(SCpnt);
return 0;
}
if (SCpnt->device->channel == 1 &&
mptscsih_is_phys_disk(ioc, 0, SCpnt->device->id) == 0) {
SCpnt->result = DID_NO_CONNECT << 16;
- SCpnt->scsi_done(SCpnt);
+ scsi_done(SCpnt);
return 0;
}
@@ -843,7 +843,7 @@ static struct scsi_host_template mptspi_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
- .shost_attrs = mptscsih_host_attrs,
+ .shost_groups = mptscsih_host_attr_groups,
};
static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 6bc96d70254d..c302cbb18a55 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -184,8 +184,8 @@ extern const struct attribute_group *zfcp_sysfs_adapter_attr_groups[];
extern const struct attribute_group *zfcp_unit_attr_groups[];
extern const struct attribute_group *zfcp_port_attr_groups[];
extern struct mutex zfcp_sysfs_port_units_mutex;
-extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
-extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+extern const struct attribute_group *zfcp_sysfs_sdev_attr_groups[];
+extern const struct attribute_group *zfcp_sysfs_shost_attr_groups[];
bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
/* zfcp_unit.c */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c1f979296c1a..4f1e4385ce58 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -2501,7 +2501,7 @@ skip_fsfstatus:
zfcp_dbf_scsi_result(scpnt, req);
scpnt->host_scribble = NULL;
- (scpnt->scsi_done) (scpnt);
+ scsi_done(scpnt);
/*
* We must hold this lock until scsi_done has been called.
* Otherwise we may call scsi_done after abort regarding this
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 9da9b2b2a580..526ac240d9fe 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -60,7 +60,7 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
set_host_byte(scpnt, result);
zfcp_dbf_scsi_fail_send(scpnt);
- scpnt->scsi_done(scpnt);
+ scsi_done(scpnt);
}
static
@@ -78,7 +78,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
if (unlikely(scsi_result)) {
scpnt->result = scsi_result;
zfcp_dbf_scsi_fail_send(scpnt);
- scpnt->scsi_done(scpnt);
+ scsi_done(scpnt);
return 0;
}
@@ -444,8 +444,8 @@ static struct scsi_host_template zfcp_scsi_host_template = {
/* report size limit per scatter-gather segment */
.max_segment_size = ZFCP_QDIO_SBALE_LEN,
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
- .shost_attrs = zfcp_sysfs_shost_attrs,
- .sdev_attrs = zfcp_sysfs_sdev_attrs,
+ .shost_groups = zfcp_sysfs_shost_attr_groups,
+ .sdev_groups = zfcp_sysfs_sdev_attr_groups,
.track_queue_depth = 1,
.supported_mode = MODE_INITIATOR,
};
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index b8cd75a872ee..dbf3e50444e6 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -672,17 +672,26 @@ ZFCP_DEFINE_SCSI_ATTR(zfcp_in_recovery, "%d\n",
ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n",
atomic_read(&zfcp_sdev->status));
-struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
- &dev_attr_fcp_lun,
- &dev_attr_wwpn,
- &dev_attr_hba_id,
- &dev_attr_read_latency,
- &dev_attr_write_latency,
- &dev_attr_cmd_latency,
- &dev_attr_zfcp_access_denied,
- &dev_attr_zfcp_failed,
- &dev_attr_zfcp_in_recovery,
- &dev_attr_zfcp_status,
+struct attribute *zfcp_sdev_attrs[] = {
+ &dev_attr_fcp_lun.attr,
+ &dev_attr_wwpn.attr,
+ &dev_attr_hba_id.attr,
+ &dev_attr_read_latency.attr,
+ &dev_attr_write_latency.attr,
+ &dev_attr_cmd_latency.attr,
+ &dev_attr_zfcp_access_denied.attr,
+ &dev_attr_zfcp_failed.attr,
+ &dev_attr_zfcp_in_recovery.attr,
+ &dev_attr_zfcp_status.attr,
+ NULL
+};
+
+static const struct attribute_group zfcp_sysfs_sdev_attr_group = {
+ .attrs = zfcp_sdev_attrs
+};
+
+const struct attribute_group *zfcp_sysfs_sdev_attr_groups[] = {
+ &zfcp_sysfs_sdev_attr_group,
NULL
};
@@ -783,12 +792,21 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
}
static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
-struct device_attribute *zfcp_sysfs_shost_attrs[] = {
- &dev_attr_utilization,
- &dev_attr_requests,
- &dev_attr_megabytes,
- &dev_attr_seconds_active,
- &dev_attr_queue_full,
+static struct attribute *zfcp_sysfs_shost_attrs[] = {
+ &dev_attr_utilization.attr,
+ &dev_attr_requests.attr,
+ &dev_attr_megabytes.attr,
+ &dev_attr_seconds_active.attr,
+ &dev_attr_queue_full.attr,
+ NULL
+};
+
+static const struct attribute_group zfcp_sysfs_shost_attr_group = {
+ .attrs = zfcp_sysfs_shost_attrs
+};
+
+const struct attribute_group *zfcp_sysfs_shost_attr_groups[] = {
+ &zfcp_sysfs_shost_attr_group,
NULL
};
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index e41cc354cc8a..cd823ff5deab 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -197,11 +197,13 @@ static struct device_attribute twa_host_stats_attr = {
};
/* Host attributes initializer */
-static struct device_attribute *twa_host_attrs[] = {
- &twa_host_stats_attr,
+static struct attribute *twa_host_attrs[] = {
+ &twa_host_stats_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(twa_host);
+
/* File operations struct for character device */
static const struct file_operations twa_fops = {
.owner = THIS_MODULE,
@@ -1352,7 +1354,7 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
/* Now complete the io */
if (twa_command_mapped(cmd))
scsi_dma_unmap(cmd);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
tw_dev->posted_request_count--;
@@ -1596,7 +1598,7 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
cmd->result = (DID_RESET << 16);
if (twa_command_mapped(cmd))
scsi_dma_unmap(cmd);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
}
@@ -1744,8 +1746,9 @@ out:
} /* End twa_scsi_eh_reset() */
/* This is the main scsi queue function to handle scsi opcodes */
-static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
int request_id, retval;
TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
@@ -1763,9 +1766,6 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
goto out;
}
- /* Save done function into scsi_cmnd struct */
- SCpnt->scsi_done = done;
-
/* Get a free request id */
twa_get_request_id(tw_dev, &request_id);
@@ -1990,7 +1990,7 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
- .shost_attrs = twa_host_attrs,
+ .shost_groups = twa_host_groups,
.emulated = 1,
.no_write_same = 1,
};
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 4fde39da54e4..b9482da79512 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -198,11 +198,13 @@ static struct device_attribute twl_host_stats_attr = {
};
/* Host attributes initializer */
-static struct device_attribute *twl_host_attrs[] = {
- &twl_host_stats_attr,
+static struct attribute *twl_host_attrs[] = {
+ &twl_host_stats_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(twl_host);
+
/* This function will look up an AEN severity string */
static char *twl_aen_severity_lookup(unsigned char severity_code)
{
@@ -1216,7 +1218,7 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
/* Now complete the io */
scsi_dma_unmap(cmd);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twl_free_request_id(tw_dev, request_id);
tw_dev->posted_request_count--;
@@ -1369,7 +1371,7 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
if (cmd) {
cmd->result = (DID_RESET << 16);
scsi_dma_unmap(cmd);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
}
@@ -1450,8 +1452,9 @@ out:
} /* End twl_scsi_eh_reset() */
/* This is the main scsi queue function to handle scsi opcodes */
-static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
int request_id, retval;
TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
@@ -1461,9 +1464,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
goto out;
}
- /* Save done function into scsi_cmnd struct */
- SCpnt->scsi_done = done;
-
/* Get a free request id */
twl_get_request_id(tw_dev, &request_id);
@@ -1544,7 +1544,7 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
- .shost_attrs = twl_host_attrs,
+ .shost_groups = twl_host_groups,
.emulated = 1,
.no_write_same = 1,
};
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 4ee485ab2714..a853c5497af6 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -532,11 +532,13 @@ static struct device_attribute tw_host_stats_attr = {
};
/* Host attributes initializer */
-static struct device_attribute *tw_host_attrs[] = {
- &tw_host_stats_attr,
+static struct attribute *tw_host_attrs[] = {
+ &tw_host_stats_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(tw_host);
+
/* This function will read the aen queue from the isr */
static int tw_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
{
@@ -1160,7 +1162,7 @@ static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
tw_dev->srb[request_id]->result = (DID_OK << 16);
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ scsi_done(tw_dev->srb[request_id]);
}
command_packet->byte8.param.sgl[0].address = param_value;
command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector);
@@ -1305,7 +1307,7 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
if (srb != NULL) {
srb->result = (DID_RESET << 16);
scsi_dma_unmap(srb);
- srb->scsi_done(srb);
+ scsi_done(srb);
}
}
}
@@ -1505,7 +1507,7 @@ static int tw_scsiop_mode_sense(TW_Device_Extension *tw_dev, int request_id)
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
tw_dev->srb[request_id]->result = (DID_OK << 16);
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ scsi_done(tw_dev->srb[request_id]);
return 0;
}
@@ -1796,7 +1798,7 @@ static int tw_scsiop_request_sense(TW_Device_Extension *tw_dev, int request_id)
/* If we got a request_sense, we probably want a reset, return error */
tw_dev->srb[request_id]->result = (DID_ERROR << 16);
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ scsi_done(tw_dev->srb[request_id]);
return 0;
} /* End tw_scsiop_request_sense() */
@@ -1918,8 +1920,9 @@ static int tw_scsiop_test_unit_ready_complete(TW_Device_Extension *tw_dev, int r
} /* End tw_scsiop_test_unit_ready_complete() */
/* This is the main scsi queue function to handle scsi opcodes */
-static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
unsigned char *command = SCpnt->cmnd;
int request_id = 0;
int retval = 1;
@@ -1929,9 +1932,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
if (test_bit(TW_IN_RESET, &tw_dev->flags))
return SCSI_MLQUEUE_HOST_BUSY;
- /* Save done function into struct scsi_cmnd */
- SCpnt->scsi_done = done;
-
/* Queue the command and get a request id */
tw_state_request_start(tw_dev, &request_id);
@@ -2165,7 +2165,7 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
/* Now complete the io */
if ((error != TW_ISR_DONT_COMPLETE)) {
scsi_dma_unmap(tw_dev->srb[request_id]);
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ scsi_done(tw_dev->srb[request_id]);
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
tw_dev->posted_request_count--;
@@ -2242,7 +2242,7 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = TW_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
- .shost_attrs = tw_host_attrs,
+ .shost_groups = tw_host_groups,
.emulated = 1,
.no_write_same = 1,
};
@@ -2252,7 +2252,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
- int retval = -ENODEV;
+ int retval;
retval = pci_enable_device(pdev);
if (retval) {
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index a12e3525977d..3ad3ebaca8e9 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -163,7 +163,7 @@ STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
-STATIC struct device_attribute *NCR_700_dev_attrs[];
+STATIC const struct attribute_group *NCR_700_dev_groups[];
STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
@@ -300,8 +300,8 @@ NCR_700_detect(struct scsi_host_template *tpnt,
static int banner = 0;
int j;
- if(tpnt->sdev_attrs == NULL)
- tpnt->sdev_attrs = NCR_700_dev_attrs;
+ if (tpnt->sdev_groups == NULL)
+ tpnt->sdev_groups = NCR_700_dev_groups;
memory = dma_alloc_coherent(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL);
if (!memory) {
@@ -634,7 +634,7 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
SCp->host_scribble = NULL;
SCp->result = result;
- SCp->scsi_done(SCp);
+ scsi_done(SCp);
} else {
printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
}
@@ -1571,7 +1571,7 @@ NCR_700_intr(int irq, void *dev_id)
* deadlock on the
* hostdata->state_lock */
SCp->result = DID_RESET << 16;
- SCp->scsi_done(SCp);
+ scsi_done(SCp);
}
mdelay(25);
NCR_700_chip_setup(host);
@@ -1751,8 +1751,7 @@ NCR_700_intr(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static int
-NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
+static int NCR_700_queuecommand_lck(struct scsi_cmnd *SCp)
{
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
@@ -1792,7 +1791,6 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
slot->cmnd = SCp;
- SCp->scsi_done = done;
SCp->host_scribble = (unsigned char *)slot;
SCp->SCp.ptr = NULL;
SCp->SCp.buffer = NULL;
@@ -2087,11 +2085,13 @@ static struct device_attribute NCR_700_active_tags_attr = {
.show = NCR_700_show_active_tags,
};
-STATIC struct device_attribute *NCR_700_dev_attrs[] = {
- &NCR_700_active_tags_attr,
+STATIC struct attribute *NCR_700_dev_attrs[] = {
+ &NCR_700_active_tags_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(NCR_700_dev);
+
EXPORT_SYMBOL(NCR_700_detect);
EXPORT_SYMBOL(NCR_700_release);
EXPORT_SYMBOL(NCR_700_intr);
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 40088dcb98cd..a897c8f914cf 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2624,7 +2624,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
command->reset_chain;
command->reset_chain = NULL;
command->result = DID_RESET << 16;
- command->scsi_done(command);
+ scsi_done(command);
command = nxt_cmd;
}
#endif
@@ -2641,7 +2641,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
blogic_dealloc_ccb(ccb, 1);
adapter->active_cmds[tgt_id]--;
command->result = DID_RESET << 16;
- command->scsi_done(command);
+ scsi_done(command);
}
adapter->bdr_pend[tgt_id] = NULL;
} else {
@@ -2713,7 +2713,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
/*
Call the SCSI Command Completion Routine.
*/
- command->scsi_done(command);
+ scsi_done(command);
}
}
adapter->processing_ccbs = false;
@@ -2866,9 +2866,9 @@ static int blogic_hostreset(struct scsi_cmnd *SCpnt)
Outgoing Mailbox for execution by the associated Host Adapter.
*/
-static int blogic_qcmd_lck(struct scsi_cmnd *command,
- void (*comp_cb) (struct scsi_cmnd *))
+static int blogic_qcmd_lck(struct scsi_cmnd *command)
{
+ void (*comp_cb)(struct scsi_cmnd *) = scsi_done;
struct blogic_adapter *adapter =
(struct blogic_adapter *) command->device->host->hostdata;
struct blogic_tgt_flags *tgt_flags =
@@ -3038,7 +3038,6 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
return SCSI_MLQUEUE_HOST_BUSY;
}
ccb->sensedata = sense_buf;
- command->scsi_done = comp_cb;
if (blogic_multimaster_type(adapter)) {
/*
Place the CCB in an Outgoing Mailbox. The higher levels
@@ -3060,7 +3059,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
blogic_warn("Still unable to write Outgoing Mailbox - Host Adapter Dead?\n", adapter);
blogic_dealloc_ccb(ccb, 1);
command->result = DID_ERROR << 16;
- command->scsi_done(command);
+ scsi_done(command);
}
}
} else {
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index a85589a2a8af..55af3e245a92 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -547,7 +547,7 @@ static void complete_cmd(struct Scsi_Host *instance,
hostdata->sensing = NULL;
}
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
/**
@@ -573,7 +573,7 @@ static int NCR5380_queue_command(struct Scsi_Host *instance,
case WRITE_10:
shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n");
cmd->result = (DID_ERROR << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
@@ -960,7 +960,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
* hostdata->connected will be set to cmd.
* SELECT interrupt will be disabled.
*
- * If failed (no target) : cmd->scsi_done() will be called, and the
+ * If failed (no target) : scsi_done() will be called, and the
* cmd->result host byte set to DID_BAD_TARGET.
*/
@@ -2262,7 +2262,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from issue queue\n", cmd);
cmd->result = DID_ABORT << 16;
- cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
+ scsi_done(cmd); /* No tag or busy flag to worry about */
goto out;
}
@@ -2357,7 +2357,7 @@ static void bus_reset_cleanup(struct Scsi_Host *instance)
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
INIT_LIST_HEAD(&hostdata->autosense);
@@ -2400,7 +2400,7 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd);
scmd->result = DID_RESET << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
}
INIT_LIST_HEAD(&hostdata->unissued);
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 028af6b1057c..564ade03b530 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -911,13 +911,12 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
* queue the command down to the controller
*/
-static int inia100_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
+static int inia100_queue_lck(struct scsi_cmnd *cmd)
{
struct orc_scb *scb;
struct orc_host *host; /* Point to Host adapter control block */
host = (struct orc_host *) cmd->device->host->hostdata;
- cmd->scsi_done = done;
/* Get free SCSI control block */
if ((scb = orc_alloc_scb(host)) == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
@@ -1042,7 +1041,7 @@ static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb)
}
cmd->result = scb->tastat | (scb->hastat << 16);
scsi_dma_unmap(cmd);
- cmd->scsi_done(cmd); /* Notify system DONE */
+ scsi_done(cmd); /* Notify system DONE */
orc_release_scb(host, scb); /* Release SCB for current channel */
}
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index c2d6f0a9e0b1..59f6b7b2a70a 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -223,6 +223,7 @@ static long aac_build_sghba(struct scsi_cmnd *scsicmd,
int sg_max, u64 sg_address);
static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
int pages, int nseg, int nseg_new);
+static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd);
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
#ifdef AAC_DETAILED_STATUS_INFO
@@ -332,7 +333,7 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
struct fib *fibptr) {
struct scsi_device *device;
- if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
+ if (unlikely(!scsicmd)) {
dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
aac_fib_complete(fibptr);
return 0;
@@ -517,6 +518,17 @@ int aac_get_containers(struct aac_dev *dev)
return status;
}
+static void aac_scsi_done(struct scsi_cmnd *scmd)
+{
+ if (scmd->device->request_queue) {
+ /* SCSI command has been submitted by the SCSI mid-layer. */
+ scsi_done(scmd);
+ } else {
+ /* SCSI command has been submitted by aac_probe_container(). */
+ aac_probe_container_scsi_done(scmd);
+ }
+}
+
static void get_container_name_callback(void *context, struct fib * fibptr)
{
struct aac_get_name_resp * get_name_reply;
@@ -558,7 +570,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
}
/*
@@ -614,7 +626,7 @@ static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
return aac_scsi_cmd(scsicmd);
scsicmd->result = DID_NO_CONNECT << 16;
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -804,8 +816,8 @@ static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
int aac_probe_container(struct aac_dev *dev, int cid)
{
- struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
- struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
+ struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd), GFP_KERNEL);
+ struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL);
int status;
if (!scsicmd || !scsidev) {
@@ -813,7 +825,6 @@ int aac_probe_container(struct aac_dev *dev, int cid)
kfree(scsidev);
return -ENOMEM;
}
- scsicmd->scsi_done = aac_probe_container_scsi_done;
scsicmd->device = scsidev;
scsidev->sdev_state = 0;
@@ -1094,7 +1105,7 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
}
/*
@@ -1197,7 +1208,7 @@ static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
- cmd->scsi_done(cmd);
+ aac_scsi_done(cmd);
return 1;
}
return 0;
@@ -2392,7 +2403,7 @@ static void io_callback(void *context, struct fib * fibptr)
}
aac_fib_complete(fibptr);
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
}
static int aac_read(struct scsi_cmnd * scsicmd)
@@ -2463,7 +2474,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -2489,7 +2500,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL;
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
return 0;
@@ -2554,7 +2565,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -2580,7 +2591,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL;
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
@@ -2621,7 +2632,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
- cmd->scsi_done(cmd);
+ aac_scsi_done(cmd);
}
static int aac_synchronize(struct scsi_cmnd *scsicmd)
@@ -2688,7 +2699,7 @@ static void aac_start_stop_callback(void *context, struct fib *fibptr)
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
}
static int aac_start_stop(struct scsi_cmnd *scsicmd)
@@ -2702,7 +2713,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
if (!(aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_POWER_MANAGEMENT)) {
scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -3237,7 +3248,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_done_ret:
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -3546,7 +3557,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
aac_fib_complete(fibptr);
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
}
static void hba_resp_task_complete(struct aac_dev *dev,
@@ -3686,7 +3697,7 @@ out:
if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
scsicmd->SCp.sent_command = 1;
else
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
}
/**
@@ -3706,7 +3717,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
scsicmd->device->lun > 7) {
scsicmd->result = DID_NO_CONNECT << 16;
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -3747,7 +3758,7 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
scsicmd->device->lun > AAC_MAX_LUN - 1) {
scsicmd->result = DID_NO_CONNECT << 16;
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 3168915adaa7..a911252075a6 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -605,12 +605,14 @@ static struct device_attribute aac_unique_id_attr = {
-static struct device_attribute *aac_dev_attrs[] = {
- &aac_raid_level_attr,
- &aac_unique_id_attr,
+static struct attribute *aac_dev_attrs[] = {
+ &aac_raid_level_attr.attr,
+ &aac_unique_id_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(aac_dev);
+
static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
void __user *arg)
{
@@ -1442,21 +1444,23 @@ static struct device_attribute aac_reset = {
.show = aac_show_reset_adapter,
};
-static struct device_attribute *aac_attrs[] = {
- &aac_model,
- &aac_vendor,
- &aac_flags,
- &aac_kernel_version,
- &aac_monitor_version,
- &aac_bios_version,
- &aac_lld_version,
- &aac_serial_number,
- &aac_max_channel,
- &aac_max_id,
- &aac_reset,
+static struct attribute *aac_host_attrs[] = {
+ &aac_model.attr,
+ &aac_vendor.attr,
+ &aac_flags.attr,
+ &aac_kernel_version.attr,
+ &aac_monitor_version.attr,
+ &aac_bios_version.attr,
+ &aac_lld_version.attr,
+ &aac_serial_number.attr,
+ &aac_max_channel.attr,
+ &aac_max_id.attr,
+ &aac_reset.attr,
NULL
};
+ATTRIBUTE_GROUPS(aac_host);
+
ssize_t aac_get_serial_number(struct device *device, char *buf)
{
return aac_show_serial_number(device, &aac_serial_number, buf);
@@ -1483,10 +1487,10 @@ static struct scsi_host_template aac_driver_template = {
#endif
.queuecommand = aac_queuecommand,
.bios_param = aac_biosparm,
- .shost_attrs = aac_attrs,
+ .shost_groups = aac_host_groups,
.slave_configure = aac_slave_configure,
.change_queue_depth = aac_change_queue_depth,
- .sdev_attrs = aac_dev_attrs,
+ .sdev_groups = aac_dev_groups,
.eh_abort_handler = aac_eh_abort,
.eh_device_reset_handler = aac_eh_dev_reset,
.eh_target_reset_handler = aac_eh_target_reset,
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index ffb391967573..ace5eff828e9 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -3308,8 +3308,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
shost->host_no);
seq_printf(m,
- " iop_base 0x%lx, cable_detect: %X, err_code %u\n",
- (unsigned long)v->iop_base,
+ " iop_base 0x%p, cable_detect: %X, err_code %u\n",
+ v->iop_base,
AdvReadWordRegister(iop_base,IOPW_SCSI_CFG1) & CABLE_DETECT,
v->err_code);
@@ -3592,7 +3592,7 @@ static void asc_scsi_done(struct scsi_cmnd *scp)
{
scsi_dma_unmap(scp);
ASC_STATS(scp->device->host, done);
- scp->scsi_done(scp);
+ scsi_done(scp);
}
static void AscSetBank(PortAddr iop_base, uchar bank)
@@ -7477,8 +7477,8 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
return ASC_ERROR;
}
- asc_sg_head = kzalloc(sizeof(asc_scsi_q->sg_head) +
- use_sg * sizeof(struct asc_sg_list), GFP_ATOMIC);
+ asc_sg_head = kzalloc(struct_size(asc_sg_head, sg_list, use_sg),
+ GFP_ATOMIC);
if (!asc_sg_head) {
scsi_dma_unmap(scp);
set_host_byte(scp, DID_SOFT_ERROR);
@@ -8453,14 +8453,12 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
* This function always returns 0. Command return status is saved
* in the 'scp' result field.
*/
-static int
-advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
+static int advansys_queuecommand_lck(struct scsi_cmnd *scp)
{
struct Scsi_Host *shost = scp->device->host;
int asc_res, result = 0;
ASC_STATS(shost, queuecommand);
- scp->scsi_done = done;
asc_res = asc_execute_scsi_cmnd(scp);
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index b13b5c85f3de..d17880b57d17 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -905,13 +905,11 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
* Queue a command and setup interrupts for a free bus.
*/
static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
- struct completion *complete,
- int phase, void (*done)(struct scsi_cmnd *))
+ struct completion *complete, int phase)
{
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
- SCpnt->scsi_done = done;
SCpnt->SCp.phase = not_issued | phase;
SCpnt->SCp.Status = 0x1; /* Ilegal status by SCSI standard */
SCpnt->SCp.Message = 0;
@@ -977,10 +975,9 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
* queue a command
*
*/
-static int aha152x_queue_lck(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int aha152x_queue_lck(struct scsi_cmnd *SCpnt)
{
- return aha152x_internal_queue(SCpnt, NULL, 0, done);
+ return aha152x_internal_queue(SCpnt, NULL, 0);
}
static DEF_SCSI_QCMD(aha152x_queue)
@@ -998,6 +995,14 @@ static void reset_done(struct scsi_cmnd *SCpnt)
}
}
+static void aha152x_scsi_done(struct scsi_cmnd *SCpnt)
+{
+ if (SCpnt->SCp.phase & resetting)
+ reset_done(SCpnt);
+ else
+ scsi_done(SCpnt);
+}
+
/*
* Abort a command
*
@@ -1064,7 +1069,7 @@ static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
SCpnt->cmd_len = 0;
- aha152x_internal_queue(SCpnt, &done, resetting, reset_done);
+ aha152x_internal_queue(SCpnt, &done, resetting);
timeleft = wait_for_completion_timeout(&done, 100*HZ);
if (!timeleft) {
@@ -1439,12 +1444,12 @@ static void busfree_run(struct Scsi_Host *shpnt)
scsi_eh_prep_cmnd(ptr, &sc->ses, NULL, 0, ~0);
DO_UNLOCK(flags);
- aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done);
+ aha152x_internal_queue(ptr, NULL, check_condition);
DO_LOCK(flags);
}
}
- if(DONE_SC && DONE_SC->scsi_done) {
+ if (DONE_SC) {
struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
@@ -1453,13 +1458,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- if(ptr->scsi_done != reset_done) {
+ if (!(ptr->SCp.phase & resetting)) {
kfree(ptr->host_scribble);
ptr->host_scribble=NULL;
}
DO_UNLOCK(flags);
- ptr->scsi_done(ptr);
+ aha152x_scsi_done(ptr);
DO_LOCK(flags);
}
@@ -2258,7 +2263,7 @@ static void rsti_run(struct Scsi_Host *shpnt)
ptr->host_scribble=NULL;
set_host_byte(ptr, DID_RESET);
- ptr->scsi_done(ptr);
+ aha152x_scsi_done(ptr);
}
ptr = next;
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 584a59522038..f0e8ae9f5e40 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -268,8 +268,7 @@ static void aha1542_free_cmd(struct scsi_cmnd *cmd)
struct bio_vec bv;
rq_for_each_segment(bv, rq, iter) {
- memcpy_to_page(bv.bv_page, bv.bv_offset, buf,
- bv.bv_len);
+ memcpy_to_bvec(&bv, buf);
buf += bv.bv_len;
}
}
@@ -281,7 +280,6 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
{
struct Scsi_Host *sh = dev_id;
struct aha1542_hostdata *aha1542 = shost_priv(sh);
- void (*my_done)(struct scsi_cmnd *) = NULL;
int errstatus, mbi, mbo, mbistatus;
int number_serviced;
unsigned long flags;
@@ -369,14 +367,13 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
tmp_cmd = aha1542->int_cmds[mbo];
- if (!tmp_cmd || !tmp_cmd->scsi_done) {
+ if (!tmp_cmd) {
spin_unlock_irqrestore(sh->host_lock, flags);
shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n");
shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat,
ccb[mbo].hastat, ccb[mbo].idlun, mbo);
return IRQ_HANDLED;
}
- my_done = tmp_cmd->scsi_done;
aha1542_free_cmd(tmp_cmd);
/*
* Fetch the sense data, and tuck it away, in the required slot. The
@@ -410,7 +407,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
aha1542->int_cmds[mbo] = NULL; /* This effectively frees up the mailbox slot, as
* far as queuecommand is concerned
*/
- my_done(tmp_cmd);
+ scsi_done(tmp_cmd);
number_serviced++;
};
}
@@ -431,7 +428,7 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
if (*cmd->cmnd == REQUEST_SENSE) {
/* Don't do the command - we have the sense data already */
cmd->result = 0;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
#ifdef DEBUG
@@ -454,8 +451,7 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
struct bio_vec bv;
rq_for_each_segment(bv, rq, iter) {
- memcpy_from_page(buf, bv.bv_page, bv.bv_offset,
- bv.bv_len);
+ memcpy_from_bvec(buf, &bv);
buf += bv.bv_len;
}
}
@@ -488,7 +484,7 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
aha1542->aha1542_last_mbo_used = mbo;
#ifdef DEBUG
- shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
+ shost_printk(KERN_DEBUG, sh, "Sending command (%d)...", mbo);
#endif
/* This gets trashed for some reason */
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 39d8759fe558..18eb4cfcef9a 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -315,9 +315,9 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static int aha1740_queuecommand_lck(struct scsi_cmnd * SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int aha1740_queuecommand_lck(struct scsi_cmnd *SCpnt)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
unchar direction;
unchar *cmd = (unchar *) SCpnt->cmnd;
unchar target = scmd_id(SCpnt);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 92ea24a075b8..5d566d2b2997 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -572,8 +572,7 @@ ahd_linux_info(struct Scsi_Host *host)
/*
* Queue an SCB to the controller.
*/
-static int
-ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
+static int ahd_linux_queue_lck(struct scsi_cmnd *cmd)
{
struct ahd_softc *ahd;
struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device);
@@ -581,7 +580,6 @@ ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
- cmd->scsi_done = scsi_done;
cmd->result = CAM_REQ_INPROG << 16;
rtn = ahd_linux_run_command(ahd, dev, cmd);
@@ -2111,7 +2109,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
ahd_cmd_set_transaction_status(cmd, new_status);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
static void
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 35ec24f28d2c..679a4fd13874 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -196,7 +196,7 @@ int ahd_dmamap_unload(struct ahd_softc *, bus_dma_tag_t, bus_dmamap_t);
/*
* XXX
* ahd_dmamap_sync is only used on buffers allocated with
- * the pci_alloc_consistent() API. Although I'm not sure how
+ * the dma_alloc_coherent() API. Although I'm not sure how
* this works on architectures with a write buffer, Linux does
* not have an API to sync "coherent" memory. Perhaps we need
* to do an mb()?
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 8b3d472aa3cc..d3b1082654d5 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -518,8 +518,7 @@ ahc_linux_info(struct Scsi_Host *host)
/*
* Queue an SCB to the controller.
*/
-static int
-ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
+static int ahc_linux_queue_lck(struct scsi_cmnd *cmd)
{
struct ahc_softc *ahc;
struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device);
@@ -530,7 +529,6 @@ ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd
ahc_lock(ahc, &flags);
if (ahc->platform_data->qfrozen == 0) {
- cmd->scsi_done = scsi_done;
cmd->result = CAM_REQ_INPROG << 16;
rtn = ahc_linux_run_command(ahc, dev, cmd);
}
@@ -1986,7 +1984,7 @@ ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd)
ahc_cmd_set_transaction_status(cmd, new_status);
}
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
static void
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 53240f53b654..4782a304e93c 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -209,7 +209,7 @@ int ahc_dmamap_unload(struct ahc_softc *, bus_dma_tag_t, bus_dmamap_t);
/*
* XXX
* ahc_dmamap_sync is only used on buffers allocated with
- * the pci_alloc_consistent() API. Although I'm not sure how
+ * the dma_alloc_coherent() API. Although I'm not sure how
* this works on architectures with a write buffer, Linux does
* not have an API to sync "coherent" memory. Perhaps we need
* to do an mb()?
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 6ce57f031df5..07df255c4b1b 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -1041,6 +1041,6 @@ extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
struct QBUFFER __iomem *);
extern void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *);
extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *);
-extern struct device_attribute *arcmsr_host_attrs[];
+extern const struct attribute_group *arcmsr_host_groups[];
extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *);
void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb);
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 57be9609d504..baeb5e795690 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -58,8 +58,6 @@
#include <scsi/scsi_transport.h>
#include "arcmsr.h"
-struct device_attribute *arcmsr_host_attrs[];
-
static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin,
@@ -389,16 +387,25 @@ static DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_q
static DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL);
static DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL);
-struct device_attribute *arcmsr_host_attrs[] = {
- &dev_attr_host_driver_version,
- &dev_attr_host_driver_posted_cmd,
- &dev_attr_host_driver_reset,
- &dev_attr_host_driver_abort,
- &dev_attr_host_fw_model,
- &dev_attr_host_fw_version,
- &dev_attr_host_fw_request_len,
- &dev_attr_host_fw_numbers_queue,
- &dev_attr_host_fw_sdram_size,
- &dev_attr_host_fw_hd_channels,
+static struct attribute *arcmsr_host_attrs[] = {
+ &dev_attr_host_driver_version.attr,
+ &dev_attr_host_driver_posted_cmd.attr,
+ &dev_attr_host_driver_reset.attr,
+ &dev_attr_host_driver_abort.attr,
+ &dev_attr_host_fw_model.attr,
+ &dev_attr_host_fw_version.attr,
+ &dev_attr_host_fw_request_len.attr,
+ &dev_attr_host_fw_numbers_queue.attr,
+ &dev_attr_host_fw_sdram_size.attr,
+ &dev_attr_host_fw_hd_channels.attr,
NULL,
};
+
+static const struct attribute_group arcmsr_host_attr_group = {
+ .attrs = arcmsr_host_attrs,
+};
+
+const struct attribute_group *arcmsr_host_groups[] = {
+ &arcmsr_host_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index ec1a834c922d..d3fb8a9c1c39 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -167,7 +167,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
.max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
.cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
- .shost_attrs = arcmsr_host_attrs,
+ .shost_groups = arcmsr_host_groups,
.no_write_same = 1,
};
@@ -1318,7 +1318,7 @@ static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
spin_lock_irqsave(&acb->ccblist_lock, flags);
list_add_tail(&ccb->list, &acb->ccb_free_list);
spin_unlock_irqrestore(&acb->ccblist_lock, flags);
- pcmd->scsi_done(pcmd);
+ scsi_done(pcmd);
}
static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
@@ -1598,7 +1598,7 @@ static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb)
if (ccb->startdone == ARCMSR_CCB_START) {
ccb->pcmd->result = DID_NO_CONNECT << 16;
arcmsr_pci_unmap_dma(ccb);
- ccb->pcmd->scsi_done(ccb->pcmd);
+ scsi_done(ccb->pcmd);
}
}
for (target = 0; target < ARCMSR_MAX_TARGETID; target++) {
@@ -3192,7 +3192,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
if (cmd->device->lun) {
cmd->result = (DID_TIME_OUT << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return;
}
inqdata[0] = TYPE_PROCESSOR;
@@ -3216,23 +3216,22 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
sg = scsi_sglist(cmd);
kunmap_atomic(buffer - sg->offset);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
break;
case WRITE_BUFFER:
case READ_BUFFER: {
if (arcmsr_iop_message_xfer(acb, cmd))
cmd->result = (DID_ERROR << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
break;
default:
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
-static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
- void (* done)(struct scsi_cmnd *))
+static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
@@ -3241,10 +3240,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) {
cmd->result = (DID_NO_CONNECT << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
- cmd->scsi_done = done;
cmd->host_scribble = NULL;
cmd->result = 0;
if (target == 16) {
@@ -3257,7 +3255,7 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
return SCSI_MLQUEUE_HOST_BUSY;
if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
arcmsr_post_ccb(acb, ccb);
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 0cc62c1b0825..81eb3bbdfc51 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -841,13 +841,10 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
}
}
- if (!SCpnt->scsi_done)
- panic("scsi%d.H: null scsi_done function in acornscsi_done", host->host->host_no);
-
clear_bit(SCpnt->device->id * 8 +
(u8)(SCpnt->device->lun & 0x7), host->busyluns);
- SCpnt->scsi_done(SCpnt);
+ scsi_done(SCpnt);
} else
printk("scsi%d: null command in acornscsi_done", host->host->host_no);
@@ -2400,24 +2397,16 @@ acornscsi_intr(int irq, void *dev_id)
*/
/*
- * Function : acornscsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+ * Function : acornscsi_queuecmd(struct scsi_cmnd *cmd)
* Purpose : queues a SCSI command
* Params : cmd - SCSI command
- * done - function called on completion, with pointer to command descriptor
* Returns : 0, or < 0 on error.
*/
-static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
- if (!done) {
- /* there should be some way of rejecting errors like this without panicing... */
- panic("scsi%d: queuecommand called with NULL done function [cmd=%p]",
- host->host->host_no, SCpnt);
- return -EINVAL;
- }
-
#if (DEBUG & DEBUG_NO_WRITE)
if (acornscsi_cmdtype(SCpnt->cmnd[0]) == CMD_WRITE && (NO_WRITE & (1 << SCpnt->device->id))) {
printk(KERN_CRIT "scsi%d.%c: WRITE attempted with NO_WRITE flag set\n",
@@ -2428,7 +2417,6 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
}
#endif
- SCpnt->scsi_done = done;
SCpnt->host_scribble = NULL;
SCpnt->result = 0;
SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 591414120754..7f667c198f6d 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -243,6 +243,7 @@ static struct scsi_host_template arxescsi_template = {
.eh_bus_reset_handler = fas216_eh_bus_reset,
.eh_device_reset_handler = fas216_eh_device_reset,
.eh_abort_handler = fas216_eh_abort,
+ .cmd_size = sizeof(struct fas216_cmd_priv),
.can_queue = 0,
.this_id = 7,
.sg_tablesize = SG_ALL,
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index 9dcd912267e6..3c00d7773876 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -363,6 +363,7 @@ static struct scsi_host_template cumanascsi2_template = {
.eh_bus_reset_handler = fas216_eh_bus_reset,
.eh_device_reset_handler = fas216_eh_device_reset,
.eh_abort_handler = fas216_eh_abort,
+ .cmd_size = sizeof(struct fas216_cmd_priv),
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 5eb2415dda9d..1394590eecea 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -480,6 +480,7 @@ static struct scsi_host_template eesox_template = {
.eh_bus_reset_handler = fas216_eh_bus_reset,
.eh_device_reset_handler = fas216_eh_device_reset,
.eh_abort_handler = fas216_eh_abort,
+ .cmd_size = sizeof(struct fas216_cmd_priv),
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index cf71ef488e36..7019b91f0ce6 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2015,7 +2015,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
* correctly by fas216_std_done.
*/
scsi_eh_restore_cmnd(SCpnt, &info->ses);
- SCpnt->scsi_done(SCpnt);
+ fas216_cmd_priv(SCpnt)->scsi_done(SCpnt);
}
/**
@@ -2086,8 +2086,8 @@ fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
}
done:
- if (SCpnt->scsi_done) {
- SCpnt->scsi_done(SCpnt);
+ if (fas216_cmd_priv(SCpnt)->scsi_done) {
+ fas216_cmd_priv(SCpnt)->scsi_done(SCpnt);
return;
}
@@ -2184,7 +2184,7 @@ no_command:
}
/**
- * fas216_queue_command - queue a command for adapter to process.
+ * fas216_queue_command_internal - queue a command for the adapter to process
* @SCpnt: Command to queue
* @done: done function to call once command is complete
*
@@ -2192,8 +2192,8 @@ no_command:
* Returns: 0 on success, else error.
* Notes: io_request_lock is held, interrupts are disabled.
*/
-static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int fas216_queue_command_internal(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
{
FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
int result;
@@ -2203,7 +2203,7 @@ static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
fas216_log_command(info, LOG_CONNECT, SCpnt,
"received command (%p)", SCpnt);
- SCpnt->scsi_done = done;
+ fas216_cmd_priv(SCpnt)->scsi_done = done;
SCpnt->host_scribble = (void *)fas216_std_done;
SCpnt->result = 0;
@@ -2233,6 +2233,11 @@ static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
return result;
}
+static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt)
+{
+ return fas216_queue_command_internal(SCpnt, scsi_done);
+}
+
DEF_SCSI_QCMD(fas216_queue_command)
/**
@@ -2258,8 +2263,7 @@ static void fas216_internal_done(struct scsi_cmnd *SCpnt)
* Returns: scsi result code.
* Notes: io_request_lock is held, interrupts are disabled.
*/
-static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt)
{
FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
@@ -2272,7 +2276,7 @@ static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
BUG_ON(info->scsi.irq);
info->internal_done = 0;
- fas216_queue_command_lck(SCpnt, fas216_internal_done);
+ fas216_queue_command_internal(SCpnt, fas216_internal_done);
/*
* This wastes time, since we can't return until the command is
@@ -2300,7 +2304,7 @@ static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
spin_lock_irq(info->host->host_lock);
- done(SCpnt);
+ scsi_done(SCpnt);
return 0;
}
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index 847413ce14cf..abf960487314 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -310,6 +310,16 @@ typedef struct {
unsigned long magic_end;
} FAS216_Info;
+/* driver-private data per SCSI command. */
+struct fas216_cmd_priv {
+ void (*scsi_done)(struct scsi_cmnd *cmd);
+};
+
+static inline struct fas216_cmd_priv *fas216_cmd_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
/* Function: int fas216_init (struct Scsi_Host *instance)
* Purpose : initialise FAS/NCR/AMD SCSI structures.
* Params : instance - a driver-specific filled-out structure
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 9cc73da4e876..8fec435cee18 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -286,7 +286,7 @@ static struct scsi_host_template powertecscsi_template = {
.eh_bus_reset_handler = fas216_eh_bus_reset,
.eh_device_reset_handler = fas216_eh_device_reset,
.eh_abort_handler = fas216_eh_abort,
-
+ .cmd_size = sizeof(struct fas216_cmd_priv),
.can_queue = 8,
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 9d179cd15bb8..dcd6fae65a88 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -512,7 +512,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
scsi_dma_unmap(workreq);
spin_lock_irqsave(dev->host->host_lock, flags);
- (*workreq->scsi_done) (workreq);
+ scsi_done(workreq);
#ifdef ED_DBGP
printk("workreq->scsi_done\n");
#endif
@@ -618,9 +618,9 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
*
* Queue a command to the ATP queue. Called with the host lock held.
*/
-static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
- void (*done) (struct scsi_cmnd *))
+static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
unsigned char c;
unsigned int m;
struct atp_unit *dev;
@@ -654,17 +654,6 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
return 0;
}
- if (done) {
- req_p->scsi_done = done;
- } else {
-#ifdef ED_DBGP
- printk( "atp870u_queuecommand: done can't be NULL\n");
-#endif
- req_p->result = 0;
- done(req_p);
- return 0;
- }
-
/*
* Count new command
*/
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index e70f69f791db..ab55681145f8 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -163,17 +163,20 @@ DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
beiscsi_active_session_disp, NULL);
DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
beiscsi_free_session_disp, NULL);
-static struct device_attribute *beiscsi_attrs[] = {
- &dev_attr_beiscsi_log_enable,
- &dev_attr_beiscsi_drvr_ver,
- &dev_attr_beiscsi_adapter_family,
- &dev_attr_beiscsi_fw_ver,
- &dev_attr_beiscsi_active_session_count,
- &dev_attr_beiscsi_free_session_count,
- &dev_attr_beiscsi_phys_port,
+
+static struct attribute *beiscsi_attrs[] = {
+ &dev_attr_beiscsi_log_enable.attr,
+ &dev_attr_beiscsi_drvr_ver.attr,
+ &dev_attr_beiscsi_adapter_family.attr,
+ &dev_attr_beiscsi_fw_ver.attr,
+ &dev_attr_beiscsi_active_session_count.attr,
+ &dev_attr_beiscsi_free_session_count.attr,
+ &dev_attr_beiscsi_phys_port.attr,
NULL,
};
+ATTRIBUTE_GROUPS(beiscsi);
+
static char const *cqe_desc[] = {
"RESERVED_DESC",
"SOL_CMD_COMPLETE",
@@ -391,7 +394,7 @@ static struct scsi_host_template beiscsi_sht = {
.eh_abort_handler = beiscsi_eh_abort,
.eh_device_reset_handler = beiscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_session_reset,
- .shost_attrs = beiscsi_attrs,
+ .shost_groups = beiscsi_groups,
.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
.can_queue = BE2_IO_DEPTH,
.this_id = -1,
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 5ae1e3f78910..c8b947c16069 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -956,36 +956,52 @@ static DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL);
static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO,
bfad_im_num_of_discovered_ports_show, NULL);
-struct device_attribute *bfad_im_host_attrs[] = {
- &dev_attr_serial_number,
- &dev_attr_model,
- &dev_attr_model_description,
- &dev_attr_node_name,
- &dev_attr_symbolic_name,
- &dev_attr_hardware_version,
- &dev_attr_driver_version,
- &dev_attr_option_rom_version,
- &dev_attr_firmware_version,
- &dev_attr_number_of_ports,
- &dev_attr_driver_name,
- &dev_attr_number_of_discovered_ports,
+static struct attribute *bfad_im_host_attrs[] = {
+ &dev_attr_serial_number.attr,
+ &dev_attr_model.attr,
+ &dev_attr_model_description.attr,
+ &dev_attr_node_name.attr,
+ &dev_attr_symbolic_name.attr,
+ &dev_attr_hardware_version.attr,
+ &dev_attr_driver_version.attr,
+ &dev_attr_option_rom_version.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_number_of_ports.attr,
+ &dev_attr_driver_name.attr,
+ &dev_attr_number_of_discovered_ports.attr,
NULL,
};
-struct device_attribute *bfad_im_vport_attrs[] = {
- &dev_attr_serial_number,
- &dev_attr_model,
- &dev_attr_model_description,
- &dev_attr_node_name,
- &dev_attr_symbolic_name,
- &dev_attr_hardware_version,
- &dev_attr_driver_version,
- &dev_attr_option_rom_version,
- &dev_attr_firmware_version,
- &dev_attr_number_of_ports,
- &dev_attr_driver_name,
- &dev_attr_number_of_discovered_ports,
+static const struct attribute_group bfad_im_host_attr_group = {
+ .attrs = bfad_im_host_attrs
+};
+
+const struct attribute_group *bfad_im_host_groups[] = {
+ &bfad_im_host_attr_group,
+ NULL
+};
+
+struct attribute *bfad_im_vport_attrs[] = {
+ &dev_attr_serial_number.attr,
+ &dev_attr_model.attr,
+ &dev_attr_model_description.attr,
+ &dev_attr_node_name.attr,
+ &dev_attr_symbolic_name.attr,
+ &dev_attr_hardware_version.attr,
+ &dev_attr_driver_version.attr,
+ &dev_attr_option_rom_version.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_number_of_ports.attr,
+ &dev_attr_driver_name.attr,
+ &dev_attr_number_of_discovered_ports.attr,
NULL,
};
+static const struct attribute_group bfad_im_vport_attr_group = {
+ .attrs = bfad_im_vport_attrs
+};
+const struct attribute_group *bfad_im_vport_groups[] = {
+ &bfad_im_vport_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 6b5841b1c06e..759d2bb1ecdd 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -96,7 +96,7 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
}
}
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
}
void
@@ -124,7 +124,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
}
}
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
}
void
@@ -226,7 +226,7 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd)
timeout *= 2;
}
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
bfa_trc(bfad, hal_io->iotag);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"scsi%d: complete abort 0x%p iotag 0x%x\n",
@@ -809,7 +809,7 @@ struct scsi_host_template bfad_im_scsi_host_template = {
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
.cmd_per_lun = 3,
- .shost_attrs = bfad_im_host_attrs,
+ .shost_groups = bfad_im_host_groups,
.max_sectors = BFAD_MAX_SECTORS,
.vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
};
@@ -831,7 +831,7 @@ struct scsi_host_template bfad_im_vport_template = {
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
.cmd_per_lun = 3,
- .shost_attrs = bfad_im_vport_attrs,
+ .shost_groups = bfad_im_vport_groups,
.max_sectors = BFAD_MAX_SECTORS,
};
@@ -1199,9 +1199,9 @@ bfad_im_itnim_work_handler(struct work_struct *work)
/*
* Scsi_Host template entry, queue a SCSI command to the BFAD.
*/
-static int
-bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+static int bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) cmnd->device->host->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
@@ -1233,8 +1233,6 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
if (sg_cnt < 0)
return SCSI_MLQUEUE_HOST_BUSY;
- cmnd->scsi_done = done;
-
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) {
printk(KERN_WARNING
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index f16d4b219e44..829345b514d1 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -174,8 +174,8 @@ extern struct fc_function_template bfad_im_vport_fc_function_template;
extern struct scsi_transport_template *bfad_im_scsi_transport_template;
extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
-extern struct device_attribute *bfad_im_host_attrs[];
-extern struct device_attribute *bfad_im_vport_attrs[];
+extern const struct attribute_group *bfad_im_host_groups[];
+extern const struct attribute_group *bfad_im_vport_groups[];
irqreturn_t bfad_intx(int irq, void *dev_id);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 8863a74e6c57..71fa62bd3083 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2951,11 +2951,13 @@ bnx2fc_tm_timeout_store(struct device *dev,
static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show,
bnx2fc_tm_timeout_store);
-static struct device_attribute *bnx2fc_host_attrs[] = {
- &dev_attr_tm_timeout,
+static struct attribute *bnx2fc_host_attrs[] = {
+ &dev_attr_tm_timeout.attr,
NULL,
};
+ATTRIBUTE_GROUPS(bnx2fc_host);
+
/*
* scsi_host_template structure used while registering with SCSI-ml
*/
@@ -2977,7 +2979,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.max_sectors = 0x3fbf,
.track_queue_depth = 1,
.slave_configure = bnx2fc_slave_configure,
- .shost_attrs = bnx2fc_host_attrs,
+ .shost_groups = bnx2fc_host_groups,
};
static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index f2996a9b2f63..b9114113ee73 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -205,7 +205,7 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
sc_cmd->allowed);
scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
}
struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
@@ -1610,7 +1610,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
}
sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
if (io_req->wait_for_abts_comp) {
@@ -1853,7 +1853,7 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
return 0;
}
@@ -2019,7 +2019,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
break;
}
sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 663a63d4dae4..df7d04afce05 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -795,7 +795,7 @@ extern struct cnic_ulp_ops bnx2i_cnic_cb;
extern unsigned int sq_size;
extern unsigned int rq_size;
-extern struct device_attribute *bnx2i_dev_attributes[];
+extern const struct attribute_group *bnx2i_dev_groups[];
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1b5f3e143f07..e21b053b4f3e 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2266,7 +2266,7 @@ static struct scsi_host_template bnx2i_host_template = {
.cmd_per_lun = 128,
.this_id = -1,
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
- .shost_attrs = bnx2i_dev_attributes,
+ .shost_groups = bnx2i_dev_groups,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index bea00073cb7c..d6b0bbb5176b 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -142,8 +142,17 @@ static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
bnx2i_show_ccell_info, bnx2i_set_ccell_info);
-struct device_attribute *bnx2i_dev_attributes[] = {
- &dev_attr_sq_size,
- &dev_attr_num_ccell,
+static struct attribute *bnx2i_dev_attributes[] = {
+ &dev_attr_sq_size.attr,
+ &dev_attr_num_ccell.attr,
+ NULL
+};
+
+static const struct attribute_group bnx2i_dev_attr_group = {
+ .attrs = bnx2i_dev_attributes
+};
+
+const struct attribute_group *bnx2i_dev_groups[] = {
+ &bnx2i_dev_attr_group,
NULL
};
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index dc98f51f466f..d5ac93897023 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -619,7 +619,7 @@ csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
struct fc_els_csp *csp;
struct fc_els_cssp *clsp;
enum fw_retval retval;
- __be32 nport_id;
+ __be32 nport_id = 0;
retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
if (retval != FW_SUCCESS) {
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 3b2eb6ce1fcf..55db02521221 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1460,14 +1460,16 @@ static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port);
static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
csio_store_dbg_level);
-static struct device_attribute *csio_fcoe_lport_attrs[] = {
- &dev_attr_hw_state,
- &dev_attr_device_reset,
- &dev_attr_disable_port,
- &dev_attr_dbg_level,
+static struct attribute *csio_fcoe_lport_attrs[] = {
+ &dev_attr_hw_state.attr,
+ &dev_attr_device_reset.attr,
+ &dev_attr_disable_port.attr,
+ &dev_attr_dbg_level.attr,
NULL,
};
+ATTRIBUTE_GROUPS(csio_fcoe_lport);
+
static ssize_t
csio_show_num_reg_rnodes(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1479,12 +1481,14 @@ csio_show_num_reg_rnodes(struct device *dev,
static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
-static struct device_attribute *csio_fcoe_vport_attrs[] = {
- &dev_attr_num_reg_rnodes,
- &dev_attr_dbg_level,
+static struct attribute *csio_fcoe_vport_attrs[] = {
+ &dev_attr_num_reg_rnodes.attr,
+ &dev_attr_dbg_level.attr,
NULL,
};
+ATTRIBUTE_GROUPS(csio_fcoe_vport);
+
static inline uint32_t
csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
{
@@ -1720,7 +1724,7 @@ out:
}
cmnd->result = (((host_status) << 16) | scsi_status);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
/* Wake up waiting threads */
csio_scsi_cmnd(req) = NULL;
@@ -1748,7 +1752,7 @@ csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
}
cmnd->result = (((host_status) << 16) | scsi_status);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
csio_scsi_cmnd(req) = NULL;
CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
} else {
@@ -1876,7 +1880,7 @@ err:
return rv;
err_done:
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return 0;
}
@@ -1979,7 +1983,7 @@ inval_scmnd:
spin_unlock_irq(&hw->lock);
cmnd->result = (DID_ERROR << 16);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return FAILED;
}
@@ -2277,7 +2281,7 @@ struct scsi_host_template csio_fcoe_shost_template = {
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
.cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
- .shost_attrs = csio_fcoe_lport_attrs,
+ .shost_groups = csio_fcoe_lport_groups,
.max_sectors = CSIO_MAX_SECTOR_SIZE,
};
@@ -2296,7 +2300,7 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
.cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
- .shost_attrs = csio_fcoe_vport_attrs,
+ .shost_groups = csio_fcoe_vport_groups,
.max_sectors = CSIO_MAX_SECTOR_SIZE,
};
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index b2730e859df8..e7be95ee7d64 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -171,7 +171,7 @@ static void cmd_complete(struct afu_cmd *cmd)
dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
__func__, scp, scp->result, cmd->sa.ioasc);
- scp->scsi_done(scp);
+ scsi_done(scp);
} else if (cmd->cmd_tmf) {
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
cfg->tmf_active = false;
@@ -205,7 +205,7 @@ static void flush_pending_cmds(struct hwq *hwq)
if (cmd->scp) {
scp = cmd->scp;
scp->result = (DID_IMM_RETRY << 16);
- scp->scsi_done(scp);
+ scsi_done(scp);
} else {
cmd->cmd_aborted = true;
@@ -601,7 +601,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
case STATE_FAILTERM:
dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
scp->result = (DID_NO_CONNECT << 16);
- scp->scsi_done(scp);
+ scsi_done(scp);
rc = 0;
goto out;
default:
@@ -3103,33 +3103,37 @@ static DEVICE_ATTR_RW(irqpoll_weight);
static DEVICE_ATTR_RW(num_hwqs);
static DEVICE_ATTR_RW(hwq_mode);
-static struct device_attribute *cxlflash_host_attrs[] = {
- &dev_attr_port0,
- &dev_attr_port1,
- &dev_attr_port2,
- &dev_attr_port3,
- &dev_attr_lun_mode,
- &dev_attr_ioctl_version,
- &dev_attr_port0_lun_table,
- &dev_attr_port1_lun_table,
- &dev_attr_port2_lun_table,
- &dev_attr_port3_lun_table,
- &dev_attr_irqpoll_weight,
- &dev_attr_num_hwqs,
- &dev_attr_hwq_mode,
+static struct attribute *cxlflash_host_attrs[] = {
+ &dev_attr_port0.attr,
+ &dev_attr_port1.attr,
+ &dev_attr_port2.attr,
+ &dev_attr_port3.attr,
+ &dev_attr_lun_mode.attr,
+ &dev_attr_ioctl_version.attr,
+ &dev_attr_port0_lun_table.attr,
+ &dev_attr_port1_lun_table.attr,
+ &dev_attr_port2_lun_table.attr,
+ &dev_attr_port3_lun_table.attr,
+ &dev_attr_irqpoll_weight.attr,
+ &dev_attr_num_hwqs.attr,
+ &dev_attr_hwq_mode.attr,
NULL
};
+ATTRIBUTE_GROUPS(cxlflash_host);
+
/*
* Device attributes
*/
static DEVICE_ATTR_RO(mode);
-static struct device_attribute *cxlflash_dev_attrs[] = {
- &dev_attr_mode,
+static struct attribute *cxlflash_dev_attrs[] = {
+ &dev_attr_mode.attr,
NULL
};
+ATTRIBUTE_GROUPS(cxlflash_dev);
+
/*
* Host template
*/
@@ -3150,8 +3154,8 @@ static struct scsi_host_template driver_template = {
.this_id = -1,
.sg_tablesize = 1, /* No scatter gather support */
.max_sectors = CXLFLASH_MAX_SECTORS,
- .shost_attrs = cxlflash_host_attrs,
- .sdev_attrs = cxlflash_dev_attrs,
+ .shost_groups = cxlflash_host_groups,
+ .sdev_groups = cxlflash_dev_groups,
};
/*
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 24c7cefb0b78..9b8796c9e634 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -960,8 +960,9 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
* and is expected to be held on return.
*
**/
-static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct DeviceCtlBlk *dcb;
struct ScsiReqBlk *srb;
struct AdapterCtlBlk *acb =
@@ -995,8 +996,6 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
goto complete;
}
- /* set callback and clear result in the command */
- cmd->scsi_done = done;
set_host_byte(cmd, DID_OK);
set_status_byte(cmd, SAM_STAT_GOOD);
@@ -3336,7 +3335,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
}
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
waiting_process_next(acb);
}
@@ -3367,7 +3366,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
if (force) {
/* For new EH, we normally don't need to give commands back,
* as they all complete or all time out */
- p->scsi_done(p);
+ scsi_done(p);
}
}
if (!list_empty(&dcb->srb_going_list))
@@ -3394,7 +3393,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
if (force) {
/* For new EH, we normally don't need to give commands back,
* as they all complete or all time out */
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
if (!list_empty(&dcb->srb_waiting_list))
@@ -4618,6 +4617,7 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* initialise the adapter and everything we need */
if (adapter_init(acb, io_port_base, io_port_len, irq)) {
dprintkl(KERN_INFO, "adapter init failed\n");
+ acb = NULL;
goto fail;
}
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 7af96d14c9bc..93227c04ef59 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -416,12 +416,11 @@ static int adpt_slave_configure(struct scsi_device * device)
return 0;
}
-static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
+static int adpt_queue_lck(struct scsi_cmnd *cmd)
{
adpt_hba* pHba = NULL;
struct adpt_device* pDev = NULL; /* dpt per device information */
- cmd->scsi_done = done;
/*
* SCSI REQUEST_SENSE commands will be executed automatically by the
* Host Adapter for any errors, so they should not be executed
@@ -431,7 +430,7 @@ static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd
if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
cmd->result = (DID_OK << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
@@ -456,7 +455,7 @@ static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd
// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
cmd->result = (DID_NO_CONNECT << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
cmd->device->hostdata = pDev;
@@ -2227,7 +2226,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
pHba->name, cmd->cmnd[0]);
cmd->result = (DID_ERROR <<16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
}
@@ -2451,9 +2450,7 @@ static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
cmd->result |= (dev_status);
- if(cmd->scsi_done != NULL){
- cmd->scsi_done(cmd);
- }
+ scsi_done(cmd);
}
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
index eab68fd9337a..b2b61bc45f12 100644
--- a/drivers/scsi/elx/efct/efct_driver.c
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -541,11 +541,9 @@ efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, efct);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 ||
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
dev_warn(&pdev->dev, "trying DMA_BIT_MASK(32)\n");
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 ||
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
rc = -1;
goto dma_mask_out;
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
index 4d73e92909ab..8b004a5818d6 100644
--- a/drivers/scsi/elx/efct/efct_lio.c
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -382,7 +382,7 @@ efct_lio_sg_map(struct efct_io *io)
struct efct_scsi_tgt_io *ocp = &io->tgt_io;
struct se_cmd *cmd = &ocp->cmd;
- ocp->seg_map_cnt = pci_map_sg(io->efct->pci, cmd->t_data_sg,
+ ocp->seg_map_cnt = dma_map_sg(&io->efct->pci->dev, cmd->t_data_sg,
cmd->t_data_nents, cmd->data_direction);
if (ocp->seg_map_cnt == 0)
return -EFAULT;
@@ -398,7 +398,7 @@ efct_lio_sg_unmap(struct efct_io *io)
if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg))
return;
- pci_unmap_sg(io->efct->pci, cmd->t_data_sg,
+ dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg,
ocp->seg_map_cnt, cmd->data_direction);
ocp->seg_map_cnt = 0;
}
diff --git a/drivers/scsi/elx/efct/efct_scsi.c b/drivers/scsi/elx/efct/efct_scsi.c
index cf2e41dd354c..afb154992053 100644
--- a/drivers/scsi/elx/efct/efct_scsi.c
+++ b/drivers/scsi/elx/efct/efct_scsi.c
@@ -38,8 +38,6 @@ efct_scsi_io_alloc(struct efct_node *node)
xport = efct->xport;
- spin_lock_irqsave(&node->active_ios_lock, flags);
-
io = efct_io_pool_io_alloc(efct->xport->io_pool);
if (!io) {
efc_log_err(efct, "IO alloc Failed\n");
@@ -65,6 +63,7 @@ efct_scsi_io_alloc(struct efct_node *node)
/* Add to node's active_ios list */
INIT_LIST_HEAD(&io->list_entry);
+ spin_lock_irqsave(&node->active_ios_lock, flags);
list_add(&io->list_entry, &node->active_ios);
spin_unlock_irqrestore(&node->active_ios_lock, flags);
diff --git a/drivers/scsi/elx/libefc/efc.h b/drivers/scsi/elx/libefc/efc.h
index 927016283f41..468ff3cc9c00 100644
--- a/drivers/scsi/elx/libefc/efc.h
+++ b/drivers/scsi/elx/libefc/efc.h
@@ -47,6 +47,6 @@ enum efc_scsi_del_target_reason {
#define nport_sm_trace(nport) \
efc_log_debug(nport->efc, \
- "[%s] %-20s\n", nport->display_name, efc_sm_event_name(evt)) \
+ "[%s] %-20s %-20s\n", nport->display_name, __func__, efc_sm_event_name(evt)) \
#endif /* __EFC_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_cmds.c b/drivers/scsi/elx/libefc/efc_cmds.c
index 37e6697d86b8..f8665d48904a 100644
--- a/drivers/scsi/elx/libefc/efc_cmds.c
+++ b/drivers/scsi/elx/libefc/efc_cmds.c
@@ -249,6 +249,7 @@ efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe,
{
struct efc_nport *nport = arg;
+ nport->attaching = false;
if (efc_nport_get_mbox_status(nport, mqe, status)) {
efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe);
return -EIO;
@@ -286,6 +287,8 @@ efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id)
if (rc) {
efc_log_err(efc, "REG_VPI command failure\n");
efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
+ } else {
+ nport->attaching = true;
}
return rc;
@@ -302,8 +305,10 @@ efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport)
/* Issue the UNREG_VPI command to free the assigned VPI context */
if (nport->attached)
efc_nport_free_unreg_vpi(nport);
- else
+ else if (nport->attaching)
nport->free_req_pending = true;
+ else
+ efc_sm_post_event(&nport->sm, EFC_EVT_NPORT_FREE_OK, NULL);
return 0;
}
diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c
index 3270ce40196c..9661eea93aa1 100644
--- a/drivers/scsi/elx/libefc/efc_fabric.c
+++ b/drivers/scsi/elx/libefc/efc_fabric.c
@@ -685,7 +685,7 @@ efc_process_gidpt_payload(struct efc_node *node,
}
/* Allocate a buffer for all nodes */
- active_nodes = kzalloc(port_count * sizeof(*active_nodes), GFP_ATOMIC);
+ active_nodes = kcalloc(port_count, sizeof(*active_nodes), GFP_ATOMIC);
if (!active_nodes) {
node_printf(node, "efc_malloc failed\n");
return -EIO;
diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h
index ee291cabf7e0..dde20891c2dd 100644
--- a/drivers/scsi/elx/libefc/efclib.h
+++ b/drivers/scsi/elx/libefc/efclib.h
@@ -142,6 +142,7 @@ struct efc_nport {
bool is_vport;
bool free_req_pending;
bool attached;
+ bool attaching;
bool p2p_winner;
struct efc_domain *domain;
u64 wwpn;
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 647f82898b6e..7a4eadad23d7 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -828,7 +828,7 @@ int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) {
cmd->result = DID_NO_CONNECT << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
@@ -988,7 +988,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)
scsi_set_resid(cmd, 0);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return SUCCESS;
}
@@ -1054,7 +1054,7 @@ check_active_queue:
scsi_set_resid(cmd, 0);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return SUCCESS;
}
@@ -1535,7 +1535,7 @@ void esas2r_complete_request_cb(struct esas2r_adapter *a,
scsi_set_resid(rq->cmd, 0);
}
- rq->cmd->scsi_done(rq->cmd);
+ scsi_done(rq->cmd);
esas2r_free_request(a, rq);
}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 9a8c037a2f21..57787537285a 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -936,7 +936,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
}
}
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
list_del(&ent->list);
esp_put_ent(esp, ent);
@@ -952,7 +952,7 @@ static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
scsi_track_queue_full(dev, lp->num_tagged - 1);
}
-static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int esp_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct scsi_device *dev = cmd->device;
struct esp *esp = shost_priv(dev->host);
@@ -965,8 +965,6 @@ static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_
ent->cmd = cmd;
- cmd->scsi_done = done;
-
spriv = ESP_CMD_PRIV(cmd);
spriv->num_sg = 0;
@@ -2038,7 +2036,7 @@ static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
esp_unmap_sense(esp, ent);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
list_del(&ent->list);
esp_put_ent(esp, ent);
}
@@ -2061,7 +2059,7 @@ static void esp_reset_cleanup(struct esp *esp)
list_del(&ent->list);
cmd->result = DID_RESET << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
esp_put_ent(esp, ent);
}
@@ -2535,7 +2533,7 @@ static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
list_del(&ent->list);
cmd->result = DID_ABORT << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
esp_put_ent(esp, ent);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 5ae6c207d3ac..6415f88738ad 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -307,7 +307,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
}
/* Do not support for bonding device */
- if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) {
+ if (netif_is_bond_master(netdev)) {
FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
return -EOPNOTSUPP;
}
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index eda2be534aa7..9159b4057c5d 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -206,7 +206,7 @@ static void fdomain_finish_cmd(struct fdomain *fd)
{
outb(0, fd->base + REG_ICTL);
fdomain_make_bus_idle(fd);
- fd->cur_cmd->scsi_done(fd->cur_cmd);
+ scsi_done(fd->cur_cmd);
fd->cur_cmd = NULL;
}
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 69f373b53132..b95d0063dedb 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -322,7 +322,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
extern struct workqueue_struct *fnic_event_queue;
extern struct workqueue_struct *fnic_fip_queue;
-extern struct device_attribute *fnic_attrs[];
+extern const struct attribute_group *fnic_host_groups[];
void fnic_clear_intr_mode(struct fnic *fnic);
int fnic_set_intr_mode(struct fnic *fnic);
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
index aea0c3becfd4..bbe2ca4971b2 100644
--- a/drivers/scsi/fnic/fnic_attrs.c
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -48,9 +48,18 @@ static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL);
static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL);
-struct device_attribute *fnic_attrs[] = {
- &dev_attr_fnic_state,
- &dev_attr_drv_version,
- &dev_attr_link_state,
+static struct attribute *fnic_host_attrs[] = {
+ &dev_attr_fnic_state.attr,
+ &dev_attr_drv_version.attr,
+ &dev_attr_link_state.attr,
NULL,
};
+
+static const struct attribute_group fnic_host_attr_group = {
+ .attrs = fnic_host_attrs
+};
+
+const struct attribute_group *fnic_host_groups[] = {
+ &fnic_host_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 786f9d2704b6..44dbaa662d94 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -122,7 +122,7 @@ static struct scsi_host_template fnic_host_template = {
.can_queue = FNIC_DFLT_IO_REQ,
.sg_tablesize = FNIC_MAX_SG_DESC_CNT,
.max_sectors = 0xffff,
- .shost_attrs = fnic_attrs,
+ .shost_groups = fnic_host_groups,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index f8afbfb468dc..88c549f257db 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -420,8 +420,9 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
* Routine to send a scsi cdb
* Called with host_lock held and interrupts disabled.
*/
-static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
const int tag = scsi_cmd_to_rq(sc)->tag;
struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
@@ -560,7 +561,6 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
CMD_SP(sc) = (char *)io_req;
CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
- sc->scsi_done = done;
/* create copy wq desc and enqueue it */
wq = &fnic->wq_copy[0];
@@ -1051,8 +1051,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
}
/* Call SCSI completion function to complete the IO */
- if (sc->scsi_done)
- sc->scsi_done(sc);
+ scsi_done(sc);
}
/* fnic_fcpio_itmf_cmpl_handler
@@ -1193,28 +1192,25 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
- if (sc->scsi_done) {
- FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
- sc->device->host->host_no, id,
- sc,
- jiffies_to_msecs(jiffies - start_time),
- desc,
- (((u64)hdr_status << 40) |
- (u64)sc->cmnd[0] << 32 |
- (u64)sc->cmnd[2] << 24 |
- (u64)sc->cmnd[3] << 16 |
- (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) |
- CMD_STATE(sc)));
- sc->scsi_done(sc);
- atomic64_dec(&fnic_stats->io_stats.active_ios);
- if (atomic64_read(&fnic->io_cmpl_skip))
- atomic64_dec(&fnic->io_cmpl_skip);
- else
- atomic64_inc(&fnic_stats->io_stats.io_completions);
- }
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id,
+ sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc,
+ (((u64)hdr_status << 40) |
+ (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) |
+ CMD_STATE(sc)));
+ scsi_done(sc);
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
}
-
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
CMD_LR_STATUS(sc) = hdr_status;
@@ -1421,23 +1417,22 @@ cleanup_scsi_cmd:
atomic64_inc(&fnic_stats->io_stats.io_completions);
/* Complete the command to SCSI */
- if (sc->scsi_done) {
- if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
- shost_printk(KERN_ERR, fnic->lport->host,
- "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
- tag, sc);
-
- FNIC_TRACE(fnic_cleanup_io,
- sc->device->host->host_no, tag, sc,
- jiffies_to_msecs(jiffies - start_time),
- 0, ((u64)sc->cmnd[0] << 32 |
- (u64)sc->cmnd[2] << 24 |
- (u64)sc->cmnd[3] << 16 |
- (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
-
- sc->scsi_done(sc);
- }
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
+ tag, sc);
+
+ FNIC_TRACE(fnic_cleanup_io,
+ sc->device->host->host_no, tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+ scsi_done(sc);
+
return true;
}
@@ -1495,17 +1490,15 @@ wq_copy_cleanup_scsi_cmd:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
- if (sc->scsi_done) {
- FNIC_TRACE(fnic_wq_copy_cleanup_handler,
- sc->device->host->host_no, id, sc,
- jiffies_to_msecs(jiffies - start_time),
- 0, ((u64)sc->cmnd[0] << 32 |
- (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
- (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_TRACE(fnic_wq_copy_cleanup_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
- sc->scsi_done(sc);
- }
+ scsi_done(sc);
}
static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
@@ -1931,16 +1924,14 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
- if (sc->scsi_done) {
/* Call SCSI completion function to complete the IO */
- sc->result = (DID_ABORT << 16);
- sc->scsi_done(sc);
- atomic64_dec(&fnic_stats->io_stats.active_ios);
- if (atomic64_read(&fnic->io_cmpl_skip))
- atomic64_dec(&fnic->io_cmpl_skip);
- else
- atomic64_inc(&fnic_stats->io_stats.io_completions);
- }
+ sc->result = DID_ABORT << 16;
+ scsi_done(sc);
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
fnic_abort_cmd_end:
FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc,
@@ -2153,11 +2144,10 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
* Any IO is returned during reset, it needs to call scsi_done
* to return the scsi_cmnd to upper layer.
*/
- if (sc->scsi_done) {
- /* Set result to let upper SCSI layer retry */
- sc->result = DID_RESET << 16;
- sc->scsi_done(sc);
- }
+ /* Set result to let upper SCSI layer retry */
+ sc->result = DID_RESET << 16;
+ scsi_done(sc);
+
return true;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 436d174f2194..2213a91923a5 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -35,7 +35,7 @@
#define HISI_SAS_QUEUE_SLOTS 4096
#define HISI_SAS_MAX_ITCT_ENTRIES 1024
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
-#define HISI_SAS_RESET_BIT 0
+#define HISI_SAS_RESETTING_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
#define HISI_SAS_PM_BIT 2
#define HISI_SAS_HW_FAULT_BIT 3
@@ -649,6 +649,7 @@ extern int hisi_sas_probe(struct platform_device *pdev,
extern int hisi_sas_remove(struct platform_device *pdev);
extern int hisi_sas_slave_configure(struct scsi_device *sdev);
+extern int hisi_sas_slave_alloc(struct scsi_device *sdev);
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 9515c45affa5..f206c433de32 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -724,7 +724,7 @@ static int hisi_sas_init_device(struct domain_device *device)
*/
local_phy = sas_get_local_phy(device);
if (!scsi_is_sas_phy_local(local_phy) &&
- !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
+ !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
unsigned long deadline = ata_deadline(jiffies, 20000);
struct sata_device *sata_dev = &device->sata_dev;
struct ata_host *ata_host = sata_dev->ata_host;
@@ -756,6 +756,20 @@ static int hisi_sas_init_device(struct domain_device *device)
return rc;
}
+int hisi_sas_slave_alloc(struct scsi_device *sdev)
+{
+ struct domain_device *ddev;
+ int rc;
+
+ rc = sas_slave_alloc(sdev);
+ if (rc)
+ return rc;
+ ddev = sdev_to_domain_dev(sdev);
+
+ return hisi_sas_init_device(ddev);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
+
static int hisi_sas_dev_found(struct domain_device *device)
{
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
@@ -802,9 +816,6 @@ static int hisi_sas_dev_found(struct domain_device *device)
dev_info(dev, "dev[%d:%x] found\n",
sas_dev->device_id, sas_dev->dev_type);
- rc = hisi_sas_init_device(device);
- if (rc)
- goto err_out;
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
return 0;
@@ -1072,7 +1083,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
sas_dev->device_id, sas_dev->dev_type);
down(&hisi_hba->sem);
- if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
+ if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0, true);
@@ -1135,9 +1146,17 @@ static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata)
{
+ struct hisi_sas_phy *phy = container_of(sas_phy,
+ struct hisi_sas_phy, sas_phy);
struct sas_ha_struct *sas_ha = sas_phy->ha;
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ DECLARE_COMPLETION_ONSTACK(completion);
int phy_no = sas_phy->id;
+ u8 sts = phy->phy_attached;
+ int ret = 0;
+
+ phy->reset_completion = &completion;
switch (func) {
case PHY_FUNC_HARD_RESET:
@@ -1152,26 +1171,40 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
case PHY_FUNC_DISABLE:
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
- break;
+ goto out;
case PHY_FUNC_SET_LINK_RATE:
- return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
+ ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
+ break;
+
case PHY_FUNC_GET_EVENTS:
if (hisi_hba->hw->get_events) {
hisi_hba->hw->get_events(hisi_hba, phy_no);
- break;
+ goto out;
}
fallthrough;
case PHY_FUNC_RELEASE_SPINUP_HOLD:
default:
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out;
}
- return 0;
+
+ if (sts && !wait_for_completion_timeout(&completion, 2 * HZ)) {
+ dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
+ phy_no, func);
+ if (phy->in_reset)
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ phy->reset_completion = NULL;
+
+ return ret;
}
static void hisi_sas_task_done(struct sas_task *task)
{
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@@ -1229,7 +1262,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
if (res) {
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
dev_err(dev, "abort tmf: executing internal task failed: %d\n",
res);
goto ex_err;
@@ -1554,8 +1587,7 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
scsi_block_requests(shost);
hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
- if (timer_pending(&hisi_hba->timer))
- del_timer_sync(&hisi_hba->timer);
+ del_timer_sync(&hisi_hba->timer);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
}
@@ -1576,7 +1608,7 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
hisi_sas_reset_init_all_devices(hisi_hba);
up(&hisi_hba->sem);
scsi_unblock_requests(shost);
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
}
@@ -1587,7 +1619,7 @@ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
if (!hisi_hba->hw->soft_reset)
return -1;
- if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
return -1;
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
@@ -1611,7 +1643,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
up(&hisi_hba->sem);
scsi_unblock_requests(shost);
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
return rc;
}
@@ -1773,7 +1805,6 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
- DECLARE_COMPLETION_ONSTACK(phyreset);
int rc, reset_type;
if (!local_phy->enabled) {
@@ -1786,8 +1817,11 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
sas_ha->sas_phy[local_phy->number];
struct hisi_sas_phy *phy =
container_of(sas_phy, struct hisi_sas_phy, sas_phy);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phy->lock, flags);
phy->in_reset = 1;
- phy->reset_completion = &phyreset;
+ spin_unlock_irqrestore(&phy->lock, flags);
}
reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
@@ -1801,17 +1835,14 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
sas_ha->sas_phy[local_phy->number];
struct hisi_sas_phy *phy =
container_of(sas_phy, struct hisi_sas_phy, sas_phy);
- int ret = wait_for_completion_timeout(&phyreset,
- I_T_NEXUS_RESET_PHYUP_TIMEOUT);
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
- phy->reset_completion = NULL;
phy->in_reset = 0;
spin_unlock_irqrestore(&phy->lock, flags);
/* report PHY down if timed out */
- if (!ret)
+ if (rc == -ETIMEDOUT)
hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
} else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
/*
@@ -1839,14 +1870,33 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
}
hisi_sas_dereg_device(hisi_hba, device);
- if (dev_is_sata(device)) {
+ rc = hisi_sas_debug_I_T_nexus_reset(device);
+ if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
+ struct sas_phy *local_phy;
+
rc = hisi_sas_softreset_ata_disk(device);
- if (rc == TMF_RESP_FUNC_FAILED)
- return TMF_RESP_FUNC_FAILED;
+ switch (rc) {
+ case -ECOMM:
+ rc = -ENODEV;
+ break;
+ case TMF_RESP_FUNC_FAILED:
+ case -EMSGSIZE:
+ case -EIO:
+ local_phy = sas_get_local_phy(device);
+ rc = sas_phy_enable(local_phy, 0);
+ if (!rc) {
+ local_phy->enabled = 0;
+ dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
+ SAS_ADDR(device->sas_addr), rc);
+ rc = -ENODEV;
+ }
+ sas_put_local_phy(local_phy);
+ break;
+ default:
+ break;
+ }
}
- rc = hisi_sas_debug_I_T_nexus_reset(device);
-
if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
hisi_sas_release_task(hisi_hba, device);
@@ -2097,7 +2147,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
task, abort_flag, tag, dq);
if (res) {
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
dev_err(dev, "internal task abort: executing internal task failed: %d\n",
res);
goto exit;
@@ -2251,7 +2301,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
} else {
struct hisi_sas_port *port = phy->port;
- if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
+ if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
phy->in_reset) {
dev_info(dev, "ignore flutter phy%d down\n", phy_no);
return;
@@ -2769,8 +2819,7 @@ int hisi_sas_remove(struct platform_device *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
- if (timer_pending(&hisi_hba->timer))
- del_timer(&hisi_hba->timer);
+ del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
sas_remove_host(sha->core.shost);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index afe639994f3d..3059d19e4368 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1327,7 +1327,6 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
irqreturn_t res = IRQ_HANDLED;
- unsigned long flags;
irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) {
@@ -1380,15 +1379,9 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
phy->identify.target_port_protocols =
SAS_PROTOCOL_SMP;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
-
- spin_lock_irqsave(&phy->lock, flags);
- if (phy->reset_completion) {
- phy->in_reset = 0;
- complete(phy->reset_completion);
- }
- spin_unlock_irqrestore(&phy->lock, flags);
-
end:
+ if (phy->reset_completion)
+ complete(phy->reset_completion);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
CHL_INT2_SL_PHY_ENA_MSK);
@@ -1422,7 +1415,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
goto end;
}
- if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
GFP_ATOMIC);
@@ -1749,11 +1742,13 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba)
return 0;
}
-static struct device_attribute *host_attrs_v1_hw[] = {
- &dev_attr_phy_event_threshold,
+static struct attribute *host_v1_hw_attrs[] = {
+ &dev_attr_phy_event_threshold.attr,
NULL
};
+ATTRIBUTE_GROUPS(host_v1_hw);
+
static struct scsi_host_template sht_v1_hw = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
@@ -1771,13 +1766,13 @@ static struct scsi_host_template sht_v1_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
- .slave_alloc = sas_slave_alloc,
+ .slave_alloc = hisi_sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
- .shost_attrs = host_attrs_v1_hw,
+ .shost_groups = host_v1_hw_groups,
.host_reset = hisi_sas_host_reset,
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b0b2361e63fe..64ed3e472e65 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2368,18 +2368,18 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
case STAT_IO_COMPLETE:
/* internal abort command complete */
ts->stat = TMF_RESP_FUNC_SUCC;
- del_timer(&slot->internal_abort_timer);
+ del_timer_sync(&slot->internal_abort_timer);
goto out;
case STAT_IO_NO_DEVICE:
ts->stat = TMF_RESP_FUNC_COMPLETE;
- del_timer(&slot->internal_abort_timer);
+ del_timer_sync(&slot->internal_abort_timer);
goto out;
case STAT_IO_NOT_VALID:
/* abort single io, controller don't find
* the io need to abort
*/
ts->stat = TMF_RESP_FUNC_FAILED;
- del_timer(&slot->internal_abort_timer);
+ del_timer_sync(&slot->internal_abort_timer);
goto out;
default:
break;
@@ -2641,7 +2641,6 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
struct device *dev = hisi_hba->dev;
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
- unsigned long flags;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
@@ -2696,14 +2695,9 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
set_link_timer_quirk(hisi_hba);
}
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
- spin_lock_irqsave(&phy->lock, flags);
- if (phy->reset_completion) {
- phy->in_reset = 0;
- complete(phy->reset_completion);
- }
- spin_unlock_irqrestore(&phy->lock, flags);
-
end:
+ if (phy->reset_completion)
+ complete(phy->reset_completion);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_PHY_ENABLE_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
@@ -2824,7 +2818,7 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
if ((bcast_status & RX_BCAST_CHG_MSK) &&
- !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
GFP_ATOMIC);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -3204,7 +3198,6 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
irqreturn_t res = IRQ_HANDLED;
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
- unsigned long flags;
int phy_no, offset;
del_timer(&phy->timer);
@@ -3280,12 +3273,8 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
- spin_lock_irqsave(&phy->lock, flags);
- if (phy->reset_completion) {
- phy->in_reset = 0;
+ if (phy->reset_completion)
complete(phy->reset_completion);
- }
- spin_unlock_irqrestore(&phy->lock, flags);
end:
hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk);
@@ -3542,11 +3531,13 @@ static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
}
-static struct device_attribute *host_attrs_v2_hw[] = {
- &dev_attr_phy_event_threshold,
+static struct attribute *host_v2_hw_attrs[] = {
+ &dev_attr_phy_event_threshold.attr,
NULL
};
+ATTRIBUTE_GROUPS(host_v2_hw);
+
static int map_queues_v2_hw(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
@@ -3584,13 +3575,13 @@ static struct scsi_host_template sht_v2_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
- .slave_alloc = sas_slave_alloc,
+ .slave_alloc = hisi_sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
- .shost_attrs = host_attrs_v2_hw,
+ .shost_groups = host_v2_hw_groups,
.host_reset = hisi_sas_host_reset,
.map_queues = map_queues_v2_hw,
.host_tagset = 1,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 27884f3106ab..0ef6c21bf081 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -519,6 +519,8 @@ struct hisi_sas_err_record_v3 {
#define CHNL_INT_STS_INT2_MSK BIT(3)
#define CHNL_WIDTH 4
+#define BAR_NO_V3_HW 5
+
enum {
DSM_FUNC_ERR_HANDLE_MSI = 0,
};
@@ -1481,7 +1483,6 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct device *dev = hisi_hba->dev;
- unsigned long flags;
del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
@@ -1563,13 +1564,9 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
phy->phy_attached = 1;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
res = IRQ_HANDLED;
- spin_lock_irqsave(&phy->lock, flags);
- if (phy->reset_completion) {
- phy->in_reset = 0;
- complete(phy->reset_completion);
- }
- spin_unlock_irqrestore(&phy->lock, flags);
end:
+ if (phy->reset_completion)
+ complete(phy->reset_completion);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_PHY_ENABLE_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
@@ -1616,7 +1613,7 @@ static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
if ((bcast_status & RX_BCAST_CHG_MSK) &&
- !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
GFP_ATOMIC);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -2770,14 +2767,16 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
return 0;
}
-static struct device_attribute *host_attrs_v3_hw[] = {
- &dev_attr_phy_event_threshold,
- &dev_attr_intr_conv_v3_hw,
- &dev_attr_intr_coal_ticks_v3_hw,
- &dev_attr_intr_coal_count_v3_hw,
+static struct attribute *host_v3_hw_attrs[] = {
+ &dev_attr_phy_event_threshold.attr,
+ &dev_attr_intr_conv_v3_hw.attr,
+ &dev_attr_intr_coal_ticks_v3_hw.attr,
+ &dev_attr_intr_coal_count_v3_hw.attr,
NULL
};
+ATTRIBUTE_GROUPS(host_v3_hw);
+
#define HISI_SAS_DEBUGFS_REG(x) {#x, x}
struct hisi_sas_debugfs_reg_lu {
@@ -3156,13 +3155,13 @@ static struct scsi_host_template sht_v3_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
- .slave_alloc = sas_slave_alloc,
+ .slave_alloc = hisi_sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
- .shost_attrs = host_attrs_v3_hw,
+ .shost_groups = host_v3_hw_groups,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.host_reset = hisi_sas_host_reset,
.host_tagset = 1,
@@ -3687,7 +3686,6 @@ static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
do_div(timestamp, NSEC_PER_MSEC);
hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
- hisi_hba->debugfs_dump_index++;
debugfs_snapshot_prepare_v3_hw(hisi_hba);
@@ -3703,6 +3701,7 @@ static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
debugfs_create_files_v3_hw(hisi_hba);
debugfs_snapshot_restore_v3_hw(hisi_hba);
+ hisi_hba->debugfs_dump_index++;
}
static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
@@ -4677,15 +4676,15 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct sas_ha_struct *sha;
int rc, phy_nr, port_nr, i;
- rc = pci_enable_device(pdev);
+ rc = pcim_enable_device(pdev);
if (rc)
goto err_out;
pci_set_master(pdev);
- rc = pci_request_regions(pdev, DRV_NAME);
+ rc = pcim_iomap_regions(pdev, 1 << BAR_NO_V3_HW, DRV_NAME);
if (rc)
- goto err_out_disable_device;
+ goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
@@ -4693,20 +4692,20 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc) {
dev_err(dev, "No usable DMA addressing method\n");
rc = -ENODEV;
- goto err_out_regions;
+ goto err_out;
}
shost = hisi_sas_shost_alloc_pci(pdev);
if (!shost) {
rc = -ENOMEM;
- goto err_out_regions;
+ goto err_out;
}
sha = SHOST_TO_SAS_HA(shost);
hisi_hba = shost_priv(shost);
dev_set_drvdata(dev, sha);
- hisi_hba->regs = pcim_iomap(pdev, 5, 0);
+ hisi_hba->regs = pcim_iomap_table(pdev)[BAR_NO_V3_HW];
if (!hisi_hba->regs) {
dev_err(dev, "cannot map register\n");
rc = -ENOMEM;
@@ -4761,7 +4760,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = interrupt_preinit_v3_hw(hisi_hba);
if (rc)
goto err_out_debugfs;
- dev_err(dev, "%d hw queues\n", shost->nr_hw_queues);
+
rc = scsi_add_host(shost, dev);
if (rc)
goto err_out_free_irq_vectors;
@@ -4800,10 +4799,6 @@ err_out_debugfs:
err_out_ha:
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
-err_out_regions:
- pci_release_regions(pdev);
-err_out_disable_device:
- pci_disable_device(pdev);
err_out:
return rc;
}
@@ -4833,16 +4828,13 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct Scsi_Host *shost = sha->core.shost;
pm_runtime_get_noresume(dev);
- if (timer_pending(&hisi_hba->timer))
- del_timer(&hisi_hba->timer);
+ del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
flush_workqueue(hisi_hba->wq);
sas_remove_host(sha->core.shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
debugfs_exit_v3_hw(hisi_hba);
scsi_host_put(shost);
@@ -4856,7 +4848,7 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
int rc;
dev_info(dev, "FLR prepare\n");
- set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_prepare(hisi_hba);
rc = disable_host_v3_hw(hisi_hba);
@@ -4902,7 +4894,7 @@ static int _suspend_v3_hw(struct device *device)
return -ENODEV;
}
- if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
return -1;
scsi_block_requests(shost);
@@ -4913,7 +4905,7 @@ static int _suspend_v3_hw(struct device *device)
if (rc) {
dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
scsi_unblock_requests(shost);
return rc;
}
@@ -4952,7 +4944,7 @@ static int _resume_v3_hw(struct device *device)
}
phys_init_v3_hw(hisi_hba);
sas_resume_ha(sha);
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
return 0;
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 24b72ee4246f..cda0135c43db 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -377,7 +377,7 @@ static struct device_type scsi_host_type = {
struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
- int index;
+ int index, i, j = 0;
shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
if (!shost)
@@ -476,12 +476,23 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type;
shost->shost_gendev.type = &scsi_host_type;
+ scsi_enable_async_suspend(&shost->shost_gendev);
device_initialize(&shost->shost_dev);
shost->shost_dev.parent = &shost->shost_gendev;
shost->shost_dev.class = &shost_class;
dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
- shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
+ shost->shost_dev.groups = shost->shost_dev_attr_groups;
+ shost->shost_dev_attr_groups[j++] = &scsi_shost_attr_group;
+ if (sht->shost_groups) {
+ for (i = 0; sht->shost_groups[i] &&
+ j < ARRAY_SIZE(shost->shost_dev_attr_groups);
+ i++, j++) {
+ shost->shost_dev_attr_groups[j] =
+ sht->shost_groups[i];
+ }
+ }
+ WARN_ON_ONCE(j >= ARRAY_SIZE(shost->shost_dev_attr_groups));
shost->ehandler = kthread_run(scsi_error_handler, shost,
"scsi_eh_%d", shost->host_no);
@@ -667,7 +678,7 @@ static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
scsi_dma_unmap(scmd);
scmd->result = 0;
set_host_byte(scmd, status);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return true;
}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 3faa87fa296a..cdf3328cc065 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -936,30 +936,34 @@ static DEVICE_ATTR(ctlr_num, S_IRUGO,
static DEVICE_ATTR(legacy_board, S_IRUGO,
host_show_legacy_board, NULL);
-static struct device_attribute *hpsa_sdev_attrs[] = {
- &dev_attr_raid_level,
- &dev_attr_lunid,
- &dev_attr_unique_id,
- &dev_attr_hp_ssd_smart_path_enabled,
- &dev_attr_path_info,
- &dev_attr_sas_address,
+static struct attribute *hpsa_sdev_attrs[] = {
+ &dev_attr_raid_level.attr,
+ &dev_attr_lunid.attr,
+ &dev_attr_unique_id.attr,
+ &dev_attr_hp_ssd_smart_path_enabled.attr,
+ &dev_attr_path_info.attr,
+ &dev_attr_sas_address.attr,
NULL,
};
-static struct device_attribute *hpsa_shost_attrs[] = {
- &dev_attr_rescan,
- &dev_attr_firmware_revision,
- &dev_attr_commands_outstanding,
- &dev_attr_transport_mode,
- &dev_attr_resettable,
- &dev_attr_hp_ssd_smart_path_status,
- &dev_attr_raid_offload_debug,
- &dev_attr_lockup_detected,
- &dev_attr_ctlr_num,
- &dev_attr_legacy_board,
+ATTRIBUTE_GROUPS(hpsa_sdev);
+
+static struct attribute *hpsa_shost_attrs[] = {
+ &dev_attr_rescan.attr,
+ &dev_attr_firmware_revision.attr,
+ &dev_attr_commands_outstanding.attr,
+ &dev_attr_transport_mode.attr,
+ &dev_attr_resettable.attr,
+ &dev_attr_hp_ssd_smart_path_status.attr,
+ &dev_attr_raid_offload_debug.attr,
+ &dev_attr_lockup_detected.attr,
+ &dev_attr_ctlr_num.attr,
+ &dev_attr_legacy_board.attr,
NULL,
};
+ATTRIBUTE_GROUPS(hpsa_shost);
+
#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
HPSA_MAX_CONCURRENT_PASSTHRUS)
@@ -980,8 +984,8 @@ static struct scsi_host_template hpsa_driver_template = {
#ifdef CONFIG_COMPAT
.compat_ioctl = hpsa_compat_ioctl,
#endif
- .sdev_attrs = hpsa_sdev_attrs,
- .shost_attrs = hpsa_shost_attrs,
+ .sdev_groups = hpsa_sdev_groups,
+ .shost_groups = hpsa_shost_groups,
.max_sectors = 2048,
.no_write_same = 1,
};
@@ -2482,8 +2486,8 @@ static void hpsa_cmd_free_and_done(struct ctlr_info *h,
struct CommandList *c, struct scsi_cmnd *cmd)
{
hpsa_cmd_resolve_and_free(h, c);
- if (cmd && cmd->scsi_done)
- cmd->scsi_done(cmd);
+ if (cmd)
+ scsi_done(cmd);
}
static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
@@ -5671,7 +5675,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
* if it encountered a dma mapping failure.
*/
cmd->result = DID_IMM_RETRY << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
@@ -5691,19 +5695,19 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
dev = cmd->device->hostdata;
if (!dev) {
cmd->result = DID_NO_CONNECT << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
if (dev->removed) {
cmd->result = DID_NO_CONNECT << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
if (unlikely(lockup_detected(h))) {
cmd->result = DID_NO_CONNECT << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 61cda7b7624f..d04245e379d7 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -769,7 +769,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
skip_resid:
dprintk("scsi_done(%p)\n", scp);
- scp->scsi_done(scp);
+ scsi_done(scp);
free_req(hba, &hba->reqs[tag]);
}
@@ -993,8 +993,7 @@ static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
return 0;
}
-static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
- void (*done)(struct scsi_cmnd *))
+static int hptiop_queuecommand_lck(struct scsi_cmnd *scp)
{
struct Scsi_Host *host = scp->device->host;
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
@@ -1002,9 +1001,6 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
int sg_count = 0;
struct hptiop_request *_req;
- BUG_ON(!done);
- scp->scsi_done = done;
-
_req = get_req(hba);
if (_req == NULL) {
dprintk("hptiop_queuecmd : no free req\n");
@@ -1059,7 +1055,7 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
cmd_done:
dprintk("scsi_done(scp=%p)\n", scp);
- scp->scsi_done(scp);
+ scsi_done(scp);
return 0;
}
@@ -1150,12 +1146,14 @@ static struct device_attribute hptiop_attr_fw_version = {
.show = hptiop_show_fw_version,
};
-static struct device_attribute *hptiop_attrs[] = {
- &hptiop_attr_version,
- &hptiop_attr_fw_version,
+static struct attribute *hptiop_host_attrs[] = {
+ &hptiop_attr_version.attr,
+ &hptiop_attr_fw_version.attr,
NULL
};
+ATTRIBUTE_GROUPS(hptiop_host);
+
static int hptiop_slave_config(struct scsi_device *sdev)
{
if (sdev->type == TYPE_TAPE)
@@ -1172,7 +1170,7 @@ static struct scsi_host_template driver_template = {
.info = hptiop_info,
.emulated = 0,
.proc_name = driver_name,
- .shost_attrs = hptiop_attrs,
+ .shost_groups = hptiop_host_groups,
.slave_configure = hptiop_slave_config,
.this_id = -1,
.change_queue_depth = hptiop_adjust_disk_queue_depth,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 01f79991bf4a..d0eab5700dc5 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1046,7 +1046,7 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
if (cmnd) {
scsi_dma_unmap(cmnd);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
}
ibmvfc_free_event(evt);
@@ -1849,7 +1849,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
cmnd->result = (DID_ERROR << 16);
scsi_dma_unmap(cmnd);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
}
ibmvfc_free_event(evt);
@@ -1935,7 +1935,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
if (unlikely((rc = fc_remote_port_chkready(rport))) ||
unlikely((rc = ibmvfc_host_chkready(vhost)))) {
cmnd->result = rc;
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return 0;
}
@@ -1975,7 +1975,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
"Failed to map DMA buffer for command. rc=%d\n", rc);
cmnd->result = DID_ERROR << 16;
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return 0;
}
@@ -3589,18 +3589,20 @@ static struct bin_attribute ibmvfc_trace_attr = {
};
#endif
-static struct device_attribute *ibmvfc_attrs[] = {
- &dev_attr_partition_name,
- &dev_attr_device_name,
- &dev_attr_port_loc_code,
- &dev_attr_drc_name,
- &dev_attr_npiv_version,
- &dev_attr_capabilities,
- &dev_attr_log_level,
- &dev_attr_nr_scsi_channels,
+static struct attribute *ibmvfc_host_attrs[] = {
+ &dev_attr_partition_name.attr,
+ &dev_attr_device_name.attr,
+ &dev_attr_port_loc_code.attr,
+ &dev_attr_drc_name.attr,
+ &dev_attr_npiv_version.attr,
+ &dev_attr_capabilities.attr,
+ &dev_attr_log_level.attr,
+ &dev_attr_nr_scsi_channels.attr,
NULL
};
+ATTRIBUTE_GROUPS(ibmvfc_host);
+
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "IBM POWER Virtual FC Adapter",
@@ -3621,7 +3623,7 @@ static struct scsi_host_template driver_template = {
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = IBMVFC_MAX_SECTORS,
- .shost_attrs = ibmvfc_attrs,
+ .shost_groups = ibmvfc_host_groups,
.track_queue_depth = 1,
.host_tagset = 1,
};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index ea8e01f49cba..63f32f843e75 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -454,7 +454,7 @@ static int initialize_event_pool(struct event_pool *pool,
pool->iu_storage =
dma_alloc_coherent(hostdata->dev,
pool->size * sizeof(*pool->iu_storage),
- &pool->iu_token, 0);
+ &pool->iu_token, GFP_KERNEL);
if (!pool->iu_storage) {
kfree(pool->events);
return -ENOMEM;
@@ -1039,9 +1039,9 @@ static inline u16 lun_from_dev(struct scsi_device *dev)
* @cmnd: struct scsi_cmnd to be executed
* @done: Callback function to be called when cmd is completed
*/
-static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
- void (*done) (struct scsi_cmnd *))
+static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct srp_cmd *srp_cmd;
struct srp_event_struct *evt_struct;
struct srp_indirect_buf *indirect;
@@ -2065,18 +2065,20 @@ static int ibmvscsi_host_reset(struct Scsi_Host *shost, int reset_type)
return 0;
}
-static struct device_attribute *ibmvscsi_attrs[] = {
- &ibmvscsi_host_vhost_loc,
- &ibmvscsi_host_vhost_name,
- &ibmvscsi_host_srp_version,
- &ibmvscsi_host_partition_name,
- &ibmvscsi_host_partition_number,
- &ibmvscsi_host_mad_version,
- &ibmvscsi_host_os_type,
- &ibmvscsi_host_config,
+static struct attribute *ibmvscsi_host_attrs[] = {
+ &ibmvscsi_host_vhost_loc.attr,
+ &ibmvscsi_host_vhost_name.attr,
+ &ibmvscsi_host_srp_version.attr,
+ &ibmvscsi_host_partition_name.attr,
+ &ibmvscsi_host_partition_number.attr,
+ &ibmvscsi_host_mad_version.attr,
+ &ibmvscsi_host_os_type.attr,
+ &ibmvscsi_host_config.attr,
NULL
};
+ATTRIBUTE_GROUPS(ibmvscsi_host);
+
/* ------------------------------------------------------------
* SCSI driver registration
*/
@@ -2096,7 +2098,7 @@ static struct scsi_host_template driver_template = {
.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .shost_attrs = ibmvscsi_attrs,
+ .shost_groups = ibmvscsi_host_groups,
};
/**
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 10b6c6daaacd..61f06f6885a5 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3948,41 +3948,16 @@ static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
NULL,
};
-static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
- char *page)
-{
- struct se_portal_group *se_tpg = to_tpg(item);
- struct ibmvscsis_tport *tport = container_of(se_tpg,
- struct ibmvscsis_tport,
- se_tpg);
- return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
-}
-
-static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
- const char *page, size_t count)
+static int ibmvscsis_enable_tpg(struct se_portal_group *se_tpg, bool enable)
{
- struct se_portal_group *se_tpg = to_tpg(item);
struct ibmvscsis_tport *tport = container_of(se_tpg,
struct ibmvscsis_tport,
se_tpg);
struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
- unsigned long tmp;
- int rc;
long lrc;
- rc = kstrtoul(page, 0, &tmp);
- if (rc < 0) {
- dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
- return -EINVAL;
- }
-
- if ((tmp != 0) && (tmp != 1)) {
- dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
- return -EINVAL;
- }
-
- if (tmp) {
+ if (enable) {
spin_lock_bh(&vscsi->intr_lock);
tport->enabled = true;
lrc = ibmvscsis_enable_change_state(vscsi);
@@ -3998,17 +3973,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
spin_unlock_bh(&vscsi->intr_lock);
}
- dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
- vscsi->state);
-
- return count;
+ return 0;
}
-CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
-
-static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
- &ibmvscsis_tpg_attr_enable,
- NULL,
-};
static const struct target_core_fabric_ops ibmvscsis_ops = {
.module = THIS_MODULE,
@@ -4038,10 +4004,10 @@ static const struct target_core_fabric_ops ibmvscsis_ops = {
.fabric_make_wwn = ibmvscsis_make_tport,
.fabric_drop_wwn = ibmvscsis_drop_tport,
.fabric_make_tpg = ibmvscsis_make_tpg,
+ .fabric_enable_tpg = ibmvscsis_enable_tpg,
.fabric_drop_tpg = ibmvscsis_drop_tpg,
.tfc_wwn_attrs = ibmvscsis_wwn_attrs,
- .tfc_tpg_base_attrs = ibmvscsis_tpg_attrs,
};
static void ibmvscsis_dev_release(struct device *dev) {};
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 943c9102a7eb..8afdb4dba2be 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -769,7 +769,7 @@ static void imm_interrupt(struct work_struct *work)
spin_lock_irqsave(host->host_lock, flags);
dev->cur_cmd = NULL;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
spin_unlock_irqrestore(host->host_lock, flags);
return;
}
@@ -910,8 +910,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
-static int imm_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int imm_queuecommand_lck(struct scsi_cmnd *cmd)
{
imm_struct *dev = imm_dev(cmd->device->host);
@@ -922,7 +921,6 @@ static int imm_queuecommand_lck(struct scsi_cmnd *cmd,
dev->failed = 0;
dev->jstart = jiffies;
dev->cur_cmd = cmd;
- cmd->scsi_done = done;
cmd->result = DID_ERROR << 16; /* default return code */
cmd->SCp.phase = 0; /* bus free */
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 9b75e19a9bab..fd6da96bc51a 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2609,14 +2609,11 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
* will cause the mid layer to call us again later with the command)
*/
-static int i91u_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int i91u_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
struct scsi_ctrl_blk *cmnd;
- cmd->scsi_done = done;
-
cmnd = initio_alloc_scb(host);
if (!cmnd)
return SCSI_MLQUEUE_HOST_BUSY;
@@ -2788,7 +2785,7 @@ static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
cmnd->result = cblk->tastat | (cblk->hastat << 16);
i91u_unmap_scb(host->pci_dev, cmnd);
- cmnd->scsi_done(cmnd); /* Notify system DONE */
+ scsi_done(cmnd); /* Notify system DONE */
initio_release_scb(host, cblk); /* Release SCB for current channel */
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 5d78f7e939a3..104bee9b3a9d 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -866,7 +866,7 @@ static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
scsi_cmd->result |= (DID_ERROR << 16);
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
@@ -4236,18 +4236,20 @@ static struct bin_attribute ipr_ioa_async_err_log = {
.write = ipr_next_async_err_log
};
-static struct device_attribute *ipr_ioa_attrs[] = {
- &ipr_fw_version_attr,
- &ipr_log_level_attr,
- &ipr_diagnostics_attr,
- &ipr_ioa_state_attr,
- &ipr_ioa_reset_attr,
- &ipr_update_fw_attr,
- &ipr_ioa_fw_type_attr,
- &ipr_iopoll_weight_attr,
+static struct attribute *ipr_ioa_attrs[] = {
+ &ipr_fw_version_attr.attr,
+ &ipr_log_level_attr.attr,
+ &ipr_diagnostics_attr.attr,
+ &ipr_ioa_state_attr.attr,
+ &ipr_ioa_reset_attr.attr,
+ &ipr_update_fw_attr.attr,
+ &ipr_ioa_fw_type_attr.attr,
+ &ipr_iopoll_weight_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(ipr_ioa);
+
#ifdef CONFIG_SCSI_IPR_DUMP
/**
* ipr_read_dump - Dump the adapter
@@ -4732,15 +4734,17 @@ static struct device_attribute ipr_raw_mode_attr = {
.store = ipr_store_raw_mode
};
-static struct device_attribute *ipr_dev_attrs[] = {
- &ipr_adapter_handle_attr,
- &ipr_resource_path_attr,
- &ipr_device_id_attr,
- &ipr_resource_type_attr,
- &ipr_raw_mode_attr,
+static struct attribute *ipr_dev_attrs[] = {
+ &ipr_adapter_handle_attr.attr,
+ &ipr_resource_path_attr.attr,
+ &ipr_device_id_attr.attr,
+ &ipr_resource_type_attr.attr,
+ &ipr_raw_mode_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(ipr_dev);
+
/**
* ipr_biosparam - Return the HSC mapping
* @sdev: scsi device struct
@@ -6065,7 +6069,7 @@ static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->in_erp = 0;
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
@@ -6502,7 +6506,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
@@ -6531,7 +6535,7 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
scsi_dma_unmap(scsi_cmd);
spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
@@ -6685,7 +6689,7 @@ err_nodev:
spin_lock_irqsave(hrrq->lock, hrrq_flags);
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
scsi_cmd->result = (DID_NO_CONNECT << 16);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return 0;
}
@@ -6762,8 +6766,8 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = IPR_MAX_SGLIST,
.max_sectors = IPR_IOA_MAX_SECTORS,
.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
- .shost_attrs = ipr_ioa_attrs,
- .sdev_attrs = ipr_dev_attrs,
+ .shost_groups = ipr_ioa_groups,
+ .sdev_groups = ipr_dev_groups,
.proc_name = IPR_NAME,
};
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index cdd94fb2aab7..498bf04499ce 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -936,7 +936,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
scb->scsi_cmd->result = DID_ERROR << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
@@ -946,7 +946,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
scsi_cmd->result = DID_ERROR;
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
}
ha->active = FALSE;
@@ -965,7 +965,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
scb->scsi_cmd->result = DID_ERROR << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
@@ -975,7 +975,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
scsi_cmd->result = DID_ERROR << 16;
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
}
ha->active = FALSE;
@@ -994,7 +994,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
scb->scsi_cmd->result = DID_RESET << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
@@ -1035,8 +1035,9 @@ static int ips_eh_reset(struct scsi_cmnd *SC)
/* Linux obtains io_request_lock before calling this function */
/* */
/****************************************************************************/
-static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
+static int ips_queue_lck(struct scsi_cmnd *SC)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
ips_ha_t *ha;
ips_passthru_t *pt;
@@ -1064,8 +1065,6 @@ static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)
return (0);
}
- SC->scsi_done = done;
-
DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)",
ips_name,
ha->host_num,
@@ -1099,7 +1098,7 @@ static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)
ha->ioctl_reset = 1; /* This reset request is from an IOCTL */
__ips_eh_reset(SC);
SC->result = DID_OK << 16;
- SC->scsi_done(SC);
+ scsi_done(SC);
return (0);
}
@@ -2579,7 +2578,7 @@ ips_next(ips_ha_t * ha, int intr)
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
@@ -2587,7 +2586,7 @@ ips_next(ips_ha_t * ha, int intr)
case IPS_SUCCESS_IMM:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_OK << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
@@ -2712,7 +2711,7 @@ ips_next(ips_ha_t * ha, int intr)
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
}
if (scb->bus)
@@ -2723,7 +2722,7 @@ ips_next(ips_ha_t * ha, int intr)
break;
case IPS_SUCCESS_IMM:
if (scb->scsi_cmd)
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
if (scb->bus)
ha->dcdb_active[scb->bus - 1] &=
@@ -3206,7 +3205,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
@@ -3214,7 +3213,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
case IPS_SUCCESS_IMM:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
@@ -3231,7 +3230,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id);
}
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index ffd33e5decae..aade707c5553 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -142,11 +142,13 @@ static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, c
static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
-static struct device_attribute *isci_host_attrs[] = {
- &dev_attr_isci_id,
+static struct attribute *isci_host_attrs[] = {
+ &dev_attr_isci_id.attr,
NULL
};
+ATTRIBUTE_GROUPS(isci_host);
+
static struct scsi_host_template isci_sht = {
.module = THIS_MODULE,
@@ -173,7 +175,7 @@ static struct scsi_host_template isci_sht = {
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
- .shost_attrs = isci_host_attrs,
+ .shost_groups = isci_host_groups,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 8f4531f22ac2..cae168b8916f 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -182,8 +182,4 @@ void *isci_task_ssp_request_get_response_data_address(
u32 isci_task_ssp_request_get_response_data_length(
struct isci_request *request);
-int isci_queuecommand(
- struct scsi_cmnd *scsi_cmd,
- void (*donefunc)(struct scsi_cmnd *));
-
#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 509eacd7893d..871b11edb586 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1870,7 +1870,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
return 0;
}
@@ -1880,7 +1880,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
* online
*/
sc_cmd->result = DID_IMM_RETRY << 16;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
goto out;
}
@@ -2087,7 +2087,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
list_del(&fsp->list);
sc_cmd->SCp.ptr = NULL;
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
/* release ref from initial allocation in queue command */
fc_fcp_pkt_release(fsp);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 5bc91d34df63..284b939fb1ea 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -468,7 +468,7 @@ static void iscsi_free_task(struct iscsi_task *task)
* it will decide how to return sc to scsi-ml.
*/
if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ)
- sc->scsi_done(sc);
+ scsi_done(sc);
}
}
@@ -1807,7 +1807,7 @@ fault:
ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
sc->cmnd[0], reason);
scsi_set_resid(sc, scsi_bufflen(sc));
- sc->scsi_done(sc);
+ scsi_done(sc);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_queuecommand);
@@ -2950,6 +2950,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
session->tmf_state = TMF_INITIAL;
timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0);
mutex_init(&session->eh_mutex);
+ init_waitqueue_head(&session->ehwait);
spin_lock_init(&session->frwd_lock);
spin_lock_init(&session->back_lock);
@@ -3077,8 +3078,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
goto login_task_data_alloc_fail;
conn->login_task->data = conn->data = data;
- init_waitqueue_head(&session->ehwait);
-
return cls_conn;
login_task_data_alloc_fail:
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 80592f53017a..b640e09af6a4 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -147,6 +147,7 @@ Undo_phys:
return error;
}
+EXPORT_SYMBOL_GPL(sas_register_ha);
static void sas_disable_events(struct sas_ha_struct *sas_ha)
{
@@ -176,6 +177,7 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
return 0;
}
+EXPORT_SYMBOL_GPL(sas_unregister_ha);
static int sas_get_linkerrors(struct sas_phy *phy)
{
@@ -252,7 +254,7 @@ static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
}
}
-static int sas_phy_enable(struct sas_phy *phy, int enable)
+int sas_phy_enable(struct sas_phy *phy, int enable)
{
int ret;
enum phy_func cmd;
@@ -284,6 +286,7 @@ static int sas_phy_enable(struct sas_phy *phy, int enable)
}
return ret;
}
+EXPORT_SYMBOL_GPL(sas_phy_enable);
int sas_phy_reset(struct sas_phy *phy, int hard_reset)
{
@@ -313,6 +316,7 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
}
return ret;
}
+EXPORT_SYMBOL_GPL(sas_phy_reset);
int sas_set_phy_speed(struct sas_phy *phy,
struct sas_phy_linkrates *rates)
@@ -659,5 +663,3 @@ MODULE_LICENSE("GPL v2");
module_init(sas_class_init);
module_exit(sas_class_exit);
-EXPORT_SYMBOL_GPL(sas_register_ha);
-EXPORT_SYMBOL_GPL(sas_unregister_ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 08ffb8788290..d337fdf1b9ca 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -125,7 +125,7 @@ static void sas_scsi_task_done(struct sas_task *task)
}
sas_end_task(sc, task);
- sc->scsi_done(sc);
+ scsi_done(sc);
}
static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
@@ -198,9 +198,10 @@ out_free_task:
else
cmd->result = DID_ERROR << 16;
out_done:
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
+EXPORT_SYMBOL_GPL(sas_queuecommand);
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
@@ -511,6 +512,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
return FAILED;
}
+EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
{
@@ -532,6 +534,7 @@ int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
return FAILED;
}
+EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
/* Try to reset a device */
static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
@@ -790,6 +793,7 @@ int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(sas_ioctl);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
{
@@ -832,6 +836,7 @@ int sas_target_alloc(struct scsi_target *starget)
starget->hostdata = found_dev;
return 0;
}
+EXPORT_SYMBOL_GPL(sas_target_alloc);
#define SAS_DEF_QD 256
@@ -860,6 +865,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
return 0;
}
+EXPORT_SYMBOL_GPL(sas_slave_configure);
int sas_change_queue_depth(struct scsi_device *sdev, int depth)
{
@@ -872,6 +878,7 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
depth = 1;
return scsi_change_queue_depth(sdev, depth);
}
+EXPORT_SYMBOL_GPL(sas_change_queue_depth);
int sas_bios_param(struct scsi_device *scsi_dev,
struct block_device *bdev,
@@ -884,6 +891,7 @@ int sas_bios_param(struct scsi_device *scsi_dev,
return 0;
}
+EXPORT_SYMBOL_GPL(sas_bios_param);
/*
* Tell an upper layer that it needs to initiate an abort for a given task.
@@ -910,6 +918,7 @@ void sas_task_abort(struct sas_task *task)
else
blk_abort_request(scsi_cmd_to_rq(sc));
}
+EXPORT_SYMBOL_GPL(sas_task_abort);
int sas_slave_alloc(struct scsi_device *sdev)
{
@@ -918,6 +927,7 @@ int sas_slave_alloc(struct scsi_device *sdev)
return 0;
}
+EXPORT_SYMBOL_GPL(sas_slave_alloc);
void sas_target_destroy(struct scsi_target *starget)
{
@@ -929,6 +939,7 @@ void sas_target_destroy(struct scsi_target *starget)
starget->hostdata = NULL;
sas_put_device(found_dev);
}
+EXPORT_SYMBOL_GPL(sas_target_destroy);
#define SAS_STRING_ADDR_SIZE 16
@@ -956,15 +967,3 @@ out:
}
EXPORT_SYMBOL_GPL(sas_request_addr);
-EXPORT_SYMBOL_GPL(sas_queuecommand);
-EXPORT_SYMBOL_GPL(sas_target_alloc);
-EXPORT_SYMBOL_GPL(sas_slave_configure);
-EXPORT_SYMBOL_GPL(sas_change_queue_depth);
-EXPORT_SYMBOL_GPL(sas_bios_param);
-EXPORT_SYMBOL_GPL(sas_task_abort);
-EXPORT_SYMBOL_GPL(sas_phy_reset);
-EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
-EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
-EXPORT_SYMBOL_GPL(sas_slave_alloc);
-EXPORT_SYMBOL_GPL(sas_target_destroy);
-EXPORT_SYMBOL_GPL(sas_ioctl);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 337e6ed24821..2f8e6d0a926f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1029,6 +1029,7 @@ struct lpfc_hba {
* Firmware supports Forced Link Speed
* capability
*/
+#define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
#define HBA_CGN_RSVD1 0x200000 /* Reserved CGN flag */
#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ebe417921dac..dd4c51b6ef4e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -6394,160 +6394,178 @@ LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE,
LPFC_VMID_PRIO_TAG_ALL_TARGETS,
"Enable Priority Tagging VMID support");
-struct device_attribute *lpfc_hba_attrs[] = {
- &dev_attr_nvme_info,
- &dev_attr_scsi_stat,
- &dev_attr_bg_info,
- &dev_attr_bg_guard_err,
- &dev_attr_bg_apptag_err,
- &dev_attr_bg_reftag_err,
- &dev_attr_info,
- &dev_attr_serialnum,
- &dev_attr_modeldesc,
- &dev_attr_modelname,
- &dev_attr_programtype,
- &dev_attr_portnum,
- &dev_attr_fwrev,
- &dev_attr_hdw,
- &dev_attr_option_rom_version,
- &dev_attr_link_state,
- &dev_attr_num_discovered_ports,
- &dev_attr_menlo_mgmt_mode,
- &dev_attr_lpfc_drvr_version,
- &dev_attr_lpfc_enable_fip,
- &dev_attr_lpfc_temp_sensor,
- &dev_attr_lpfc_log_verbose,
- &dev_attr_lpfc_lun_queue_depth,
- &dev_attr_lpfc_tgt_queue_depth,
- &dev_attr_lpfc_hba_queue_depth,
- &dev_attr_lpfc_peer_port_login,
- &dev_attr_lpfc_nodev_tmo,
- &dev_attr_lpfc_devloss_tmo,
- &dev_attr_lpfc_enable_fc4_type,
- &dev_attr_lpfc_fcp_class,
- &dev_attr_lpfc_use_adisc,
- &dev_attr_lpfc_first_burst_size,
- &dev_attr_lpfc_ack0,
- &dev_attr_lpfc_xri_rebalancing,
- &dev_attr_lpfc_topology,
- &dev_attr_lpfc_scan_down,
- &dev_attr_lpfc_link_speed,
- &dev_attr_lpfc_fcp_io_sched,
- &dev_attr_lpfc_ns_query,
- &dev_attr_lpfc_fcp2_no_tgt_reset,
- &dev_attr_lpfc_cr_delay,
- &dev_attr_lpfc_cr_count,
- &dev_attr_lpfc_multi_ring_support,
- &dev_attr_lpfc_multi_ring_rctl,
- &dev_attr_lpfc_multi_ring_type,
- &dev_attr_lpfc_fdmi_on,
- &dev_attr_lpfc_enable_SmartSAN,
- &dev_attr_lpfc_max_luns,
- &dev_attr_lpfc_enable_npiv,
- &dev_attr_lpfc_fcf_failover_policy,
- &dev_attr_lpfc_enable_rrq,
- &dev_attr_lpfc_fcp_wait_abts_rsp,
- &dev_attr_nport_evt_cnt,
- &dev_attr_board_mode,
- &dev_attr_max_vpi,
- &dev_attr_used_vpi,
- &dev_attr_max_rpi,
- &dev_attr_used_rpi,
- &dev_attr_max_xri,
- &dev_attr_used_xri,
- &dev_attr_npiv_info,
- &dev_attr_issue_reset,
- &dev_attr_lpfc_poll,
- &dev_attr_lpfc_poll_tmo,
- &dev_attr_lpfc_task_mgmt_tmo,
- &dev_attr_lpfc_use_msi,
- &dev_attr_lpfc_nvme_oas,
- &dev_attr_lpfc_nvme_embed_cmd,
- &dev_attr_lpfc_fcp_imax,
- &dev_attr_lpfc_force_rscn,
- &dev_attr_lpfc_cq_poll_threshold,
- &dev_attr_lpfc_cq_max_proc_limit,
- &dev_attr_lpfc_fcp_cpu_map,
- &dev_attr_lpfc_fcp_mq_threshold,
- &dev_attr_lpfc_hdw_queue,
- &dev_attr_lpfc_irq_chann,
- &dev_attr_lpfc_suppress_rsp,
- &dev_attr_lpfc_nvmet_mrq,
- &dev_attr_lpfc_nvmet_mrq_post,
- &dev_attr_lpfc_nvme_enable_fb,
- &dev_attr_lpfc_nvmet_fb_size,
- &dev_attr_lpfc_enable_bg,
- &dev_attr_lpfc_soft_wwnn,
- &dev_attr_lpfc_soft_wwpn,
- &dev_attr_lpfc_soft_wwn_enable,
- &dev_attr_lpfc_enable_hba_reset,
- &dev_attr_lpfc_enable_hba_heartbeat,
- &dev_attr_lpfc_EnableXLane,
- &dev_attr_lpfc_XLanePriority,
- &dev_attr_lpfc_xlane_lun,
- &dev_attr_lpfc_xlane_tgt,
- &dev_attr_lpfc_xlane_vpt,
- &dev_attr_lpfc_xlane_lun_state,
- &dev_attr_lpfc_xlane_lun_status,
- &dev_attr_lpfc_xlane_priority,
- &dev_attr_lpfc_sg_seg_cnt,
- &dev_attr_lpfc_max_scsicmpl_time,
- &dev_attr_lpfc_stat_data_ctrl,
- &dev_attr_lpfc_aer_support,
- &dev_attr_lpfc_aer_state_cleanup,
- &dev_attr_lpfc_sriov_nr_virtfn,
- &dev_attr_lpfc_req_fw_upgrade,
- &dev_attr_lpfc_suppress_link_up,
- &dev_attr_iocb_hw,
- &dev_attr_pls,
- &dev_attr_pt,
- &dev_attr_txq_hw,
- &dev_attr_txcmplq_hw,
- &dev_attr_lpfc_sriov_hw_max_virtfn,
- &dev_attr_protocol,
- &dev_attr_lpfc_xlane_supported,
- &dev_attr_lpfc_enable_mds_diags,
- &dev_attr_lpfc_ras_fwlog_buffsize,
- &dev_attr_lpfc_ras_fwlog_level,
- &dev_attr_lpfc_ras_fwlog_func,
- &dev_attr_lpfc_enable_bbcr,
- &dev_attr_lpfc_enable_dpp,
- &dev_attr_lpfc_enable_mi,
- &dev_attr_cmf_info,
- &dev_attr_lpfc_max_vmid,
- &dev_attr_lpfc_vmid_inactivity_timeout,
- &dev_attr_lpfc_vmid_app_header,
- &dev_attr_lpfc_vmid_priority_tagging,
+static struct attribute *lpfc_hba_attrs[] = {
+ &dev_attr_nvme_info.attr,
+ &dev_attr_scsi_stat.attr,
+ &dev_attr_bg_info.attr,
+ &dev_attr_bg_guard_err.attr,
+ &dev_attr_bg_apptag_err.attr,
+ &dev_attr_bg_reftag_err.attr,
+ &dev_attr_info.attr,
+ &dev_attr_serialnum.attr,
+ &dev_attr_modeldesc.attr,
+ &dev_attr_modelname.attr,
+ &dev_attr_programtype.attr,
+ &dev_attr_portnum.attr,
+ &dev_attr_fwrev.attr,
+ &dev_attr_hdw.attr,
+ &dev_attr_option_rom_version.attr,
+ &dev_attr_link_state.attr,
+ &dev_attr_num_discovered_ports.attr,
+ &dev_attr_menlo_mgmt_mode.attr,
+ &dev_attr_lpfc_drvr_version.attr,
+ &dev_attr_lpfc_enable_fip.attr,
+ &dev_attr_lpfc_temp_sensor.attr,
+ &dev_attr_lpfc_log_verbose.attr,
+ &dev_attr_lpfc_lun_queue_depth.attr,
+ &dev_attr_lpfc_tgt_queue_depth.attr,
+ &dev_attr_lpfc_hba_queue_depth.attr,
+ &dev_attr_lpfc_peer_port_login.attr,
+ &dev_attr_lpfc_nodev_tmo.attr,
+ &dev_attr_lpfc_devloss_tmo.attr,
+ &dev_attr_lpfc_enable_fc4_type.attr,
+ &dev_attr_lpfc_fcp_class.attr,
+ &dev_attr_lpfc_use_adisc.attr,
+ &dev_attr_lpfc_first_burst_size.attr,
+ &dev_attr_lpfc_ack0.attr,
+ &dev_attr_lpfc_xri_rebalancing.attr,
+ &dev_attr_lpfc_topology.attr,
+ &dev_attr_lpfc_scan_down.attr,
+ &dev_attr_lpfc_link_speed.attr,
+ &dev_attr_lpfc_fcp_io_sched.attr,
+ &dev_attr_lpfc_ns_query.attr,
+ &dev_attr_lpfc_fcp2_no_tgt_reset.attr,
+ &dev_attr_lpfc_cr_delay.attr,
+ &dev_attr_lpfc_cr_count.attr,
+ &dev_attr_lpfc_multi_ring_support.attr,
+ &dev_attr_lpfc_multi_ring_rctl.attr,
+ &dev_attr_lpfc_multi_ring_type.attr,
+ &dev_attr_lpfc_fdmi_on.attr,
+ &dev_attr_lpfc_enable_SmartSAN.attr,
+ &dev_attr_lpfc_max_luns.attr,
+ &dev_attr_lpfc_enable_npiv.attr,
+ &dev_attr_lpfc_fcf_failover_policy.attr,
+ &dev_attr_lpfc_enable_rrq.attr,
+ &dev_attr_lpfc_fcp_wait_abts_rsp.attr,
+ &dev_attr_nport_evt_cnt.attr,
+ &dev_attr_board_mode.attr,
+ &dev_attr_max_vpi.attr,
+ &dev_attr_used_vpi.attr,
+ &dev_attr_max_rpi.attr,
+ &dev_attr_used_rpi.attr,
+ &dev_attr_max_xri.attr,
+ &dev_attr_used_xri.attr,
+ &dev_attr_npiv_info.attr,
+ &dev_attr_issue_reset.attr,
+ &dev_attr_lpfc_poll.attr,
+ &dev_attr_lpfc_poll_tmo.attr,
+ &dev_attr_lpfc_task_mgmt_tmo.attr,
+ &dev_attr_lpfc_use_msi.attr,
+ &dev_attr_lpfc_nvme_oas.attr,
+ &dev_attr_lpfc_nvme_embed_cmd.attr,
+ &dev_attr_lpfc_fcp_imax.attr,
+ &dev_attr_lpfc_force_rscn.attr,
+ &dev_attr_lpfc_cq_poll_threshold.attr,
+ &dev_attr_lpfc_cq_max_proc_limit.attr,
+ &dev_attr_lpfc_fcp_cpu_map.attr,
+ &dev_attr_lpfc_fcp_mq_threshold.attr,
+ &dev_attr_lpfc_hdw_queue.attr,
+ &dev_attr_lpfc_irq_chann.attr,
+ &dev_attr_lpfc_suppress_rsp.attr,
+ &dev_attr_lpfc_nvmet_mrq.attr,
+ &dev_attr_lpfc_nvmet_mrq_post.attr,
+ &dev_attr_lpfc_nvme_enable_fb.attr,
+ &dev_attr_lpfc_nvmet_fb_size.attr,
+ &dev_attr_lpfc_enable_bg.attr,
+ &dev_attr_lpfc_soft_wwnn.attr,
+ &dev_attr_lpfc_soft_wwpn.attr,
+ &dev_attr_lpfc_soft_wwn_enable.attr,
+ &dev_attr_lpfc_enable_hba_reset.attr,
+ &dev_attr_lpfc_enable_hba_heartbeat.attr,
+ &dev_attr_lpfc_EnableXLane.attr,
+ &dev_attr_lpfc_XLanePriority.attr,
+ &dev_attr_lpfc_xlane_lun.attr,
+ &dev_attr_lpfc_xlane_tgt.attr,
+ &dev_attr_lpfc_xlane_vpt.attr,
+ &dev_attr_lpfc_xlane_lun_state.attr,
+ &dev_attr_lpfc_xlane_lun_status.attr,
+ &dev_attr_lpfc_xlane_priority.attr,
+ &dev_attr_lpfc_sg_seg_cnt.attr,
+ &dev_attr_lpfc_max_scsicmpl_time.attr,
+ &dev_attr_lpfc_stat_data_ctrl.attr,
+ &dev_attr_lpfc_aer_support.attr,
+ &dev_attr_lpfc_aer_state_cleanup.attr,
+ &dev_attr_lpfc_sriov_nr_virtfn.attr,
+ &dev_attr_lpfc_req_fw_upgrade.attr,
+ &dev_attr_lpfc_suppress_link_up.attr,
+ &dev_attr_iocb_hw.attr,
+ &dev_attr_pls.attr,
+ &dev_attr_pt.attr,
+ &dev_attr_txq_hw.attr,
+ &dev_attr_txcmplq_hw.attr,
+ &dev_attr_lpfc_sriov_hw_max_virtfn.attr,
+ &dev_attr_protocol.attr,
+ &dev_attr_lpfc_xlane_supported.attr,
+ &dev_attr_lpfc_enable_mds_diags.attr,
+ &dev_attr_lpfc_ras_fwlog_buffsize.attr,
+ &dev_attr_lpfc_ras_fwlog_level.attr,
+ &dev_attr_lpfc_ras_fwlog_func.attr,
+ &dev_attr_lpfc_enable_bbcr.attr,
+ &dev_attr_lpfc_enable_dpp.attr,
+ &dev_attr_lpfc_enable_mi.attr,
+ &dev_attr_cmf_info.attr,
+ &dev_attr_lpfc_max_vmid.attr,
+ &dev_attr_lpfc_vmid_inactivity_timeout.attr,
+ &dev_attr_lpfc_vmid_app_header.attr,
+ &dev_attr_lpfc_vmid_priority_tagging.attr,
NULL,
};
-struct device_attribute *lpfc_vport_attrs[] = {
- &dev_attr_info,
- &dev_attr_link_state,
- &dev_attr_num_discovered_ports,
- &dev_attr_lpfc_drvr_version,
- &dev_attr_lpfc_log_verbose,
- &dev_attr_lpfc_lun_queue_depth,
- &dev_attr_lpfc_tgt_queue_depth,
- &dev_attr_lpfc_nodev_tmo,
- &dev_attr_lpfc_devloss_tmo,
- &dev_attr_lpfc_hba_queue_depth,
- &dev_attr_lpfc_peer_port_login,
- &dev_attr_lpfc_restrict_login,
- &dev_attr_lpfc_fcp_class,
- &dev_attr_lpfc_use_adisc,
- &dev_attr_lpfc_first_burst_size,
- &dev_attr_lpfc_max_luns,
- &dev_attr_nport_evt_cnt,
- &dev_attr_npiv_info,
- &dev_attr_lpfc_enable_da_id,
- &dev_attr_lpfc_max_scsicmpl_time,
- &dev_attr_lpfc_stat_data_ctrl,
- &dev_attr_lpfc_static_vport,
- &dev_attr_cmf_info,
+static const struct attribute_group lpfc_hba_attr_group = {
+ .attrs = lpfc_hba_attrs
+};
+
+const struct attribute_group *lpfc_hba_groups[] = {
+ &lpfc_hba_attr_group,
+ NULL
+};
+
+static struct attribute *lpfc_vport_attrs[] = {
+ &dev_attr_info.attr,
+ &dev_attr_link_state.attr,
+ &dev_attr_num_discovered_ports.attr,
+ &dev_attr_lpfc_drvr_version.attr,
+ &dev_attr_lpfc_log_verbose.attr,
+ &dev_attr_lpfc_lun_queue_depth.attr,
+ &dev_attr_lpfc_tgt_queue_depth.attr,
+ &dev_attr_lpfc_nodev_tmo.attr,
+ &dev_attr_lpfc_devloss_tmo.attr,
+ &dev_attr_lpfc_hba_queue_depth.attr,
+ &dev_attr_lpfc_peer_port_login.attr,
+ &dev_attr_lpfc_restrict_login.attr,
+ &dev_attr_lpfc_fcp_class.attr,
+ &dev_attr_lpfc_use_adisc.attr,
+ &dev_attr_lpfc_first_burst_size.attr,
+ &dev_attr_lpfc_max_luns.attr,
+ &dev_attr_nport_evt_cnt.attr,
+ &dev_attr_npiv_info.attr,
+ &dev_attr_lpfc_enable_da_id.attr,
+ &dev_attr_lpfc_max_scsicmpl_time.attr,
+ &dev_attr_lpfc_stat_data_ctrl.attr,
+ &dev_attr_lpfc_static_vport.attr,
+ &dev_attr_cmf_info.attr,
NULL,
};
+static const struct attribute_group lpfc_vport_attr_group = {
+ .attrs = lpfc_vport_attrs
+};
+
+const struct attribute_group *lpfc_vport_groups[] = {
+ &lpfc_vport_attr_group,
+ NULL
+};
+
/**
* sysfs_ctlreg_write - Write method for writing to ctlreg
* @filp: open sysfs file
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index c512f4199142..89e36bf14d8f 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -119,6 +119,8 @@ int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_nodelist *lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did);
struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
int lpfc_nlp_put(struct lpfc_nodelist *);
+void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp);
void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb);
int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
@@ -205,6 +207,7 @@ void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
void lpfc_update_vport_wwn(struct lpfc_vport *vport);
int lpfc_config_port_post(struct lpfc_hba *);
+int lpfc_sli4_refresh_params(struct lpfc_hba *phba);
int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *);
void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
@@ -428,8 +431,8 @@ void lpfc_get_cfgparam(struct lpfc_hba *);
void lpfc_get_vport_cfgparam(struct lpfc_vport *);
int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
void lpfc_free_sysfs_attr(struct lpfc_vport *);
-extern struct device_attribute *lpfc_hba_attrs[];
-extern struct device_attribute *lpfc_vport_attrs[];
+extern const struct attribute_group *lpfc_hba_groups[];
+extern const struct attribute_group *lpfc_vport_groups[];
extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_template_nvme;
extern struct fc_function_template lpfc_transport_functions;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 871b665bd72e..37a4b79010bf 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -85,6 +85,13 @@ enum lpfc_fc4_xpt_flags {
NLP_XPT_HAS_HH = 0x10
};
+enum lpfc_nlp_save_flags {
+ /* devloss occurred during recovery */
+ NLP_IN_RECOV_POST_DEV_LOSS = 0x1,
+ /* wait for outstanding LOGO to cmpl */
+ NLP_WAIT_FOR_LOGO = 0x2,
+};
+
struct lpfc_nodelist {
struct list_head nlp_listp;
struct serv_parm fc_sparam; /* buffer for service params */
@@ -144,8 +151,9 @@ struct lpfc_nodelist {
unsigned long *active_rrqs_xri_bitmap;
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
- u32 upcall_flags;
-#define NLP_WAIT_FOR_LOGO 0x2
+
+ /* flags to keep ndlp alive until special conditions are met */
+ enum lpfc_nlp_save_flags save_flags;
enum lpfc_fc4_xpt_flags fc4_xpt_flags;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 052c0e5b1119..b940e0268f96 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1059,9 +1059,10 @@ stop_rr_fcf_flogi:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
"0150 FLOGI failure Status:x%x/x%x "
- "xri x%x TMO:x%x\n",
+ "xri x%x TMO:x%x refcnt %d\n",
irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->sli4_xritag, irsp->ulpTimeout);
+ cmdiocb->sli4_xritag, irsp->ulpTimeout,
+ kref_read(&ndlp->kref));
/* If this is not a loop open failure, bail out */
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
@@ -1122,12 +1123,12 @@ stop_rr_fcf_flogi:
/* FLOGI completes successfully */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0101 FLOGI completes successfully, I/O tag:x%x, "
- "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x\n",
+ "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n",
cmdiocb->iotag, cmdiocb->sli4_xritag,
irsp->un.ulpWord[4], sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
vport->port_state, vport->fc_flag,
- sp->cmn.priority_tagging);
+ sp->cmn.priority_tagging, kref_read(&ndlp->kref));
if (sp->cmn.priority_tagging)
vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA;
@@ -1205,8 +1206,6 @@ flogifail:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
- if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
- lpfc_nlp_put(ndlp);
if (!lpfc_error_lost_link(irsp)) {
/* FLOGI failed, so just use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -2330,6 +2329,13 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
+ /*
+ * For P2P topology, retain the node so that PLOGI can be
+ * attempted on it again.
+ */
+ if (vport->fc_flag & FC_PT2PT)
+ goto out;
+
/* As long as this node is not registered with the SCSI
* or NVMe transport and no other PRLIs are outstanding,
* it is no longer an active node. Otherwise devloss
@@ -2899,9 +2905,9 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &(rspiocb->iocb);
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
- if (ndlp->upcall_flags & NLP_WAIT_FOR_LOGO) {
+ if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
wake_up_waiter = 1;
- ndlp->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
+ ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
}
spin_unlock_irq(&ndlp->lock);
@@ -4571,6 +4577,19 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry = 1;
delay = 100;
break;
+ case IOERR_SLI_ABORTED:
+ /* Retry ELS PLOGI command?
+ * Possibly the rport just wasn't ready.
+ */
+ if (cmd == ELS_CMD_PLOGI) {
+ /* No retry if state change */
+ if (ndlp &&
+ ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
+ goto out_retry;
+ retry = 1;
+ maxretry = 2;
+ }
+ break;
}
break;
@@ -5296,6 +5315,7 @@ out:
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
(vport && vport->port_type == LPFC_NPIV_PORT) &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
spin_lock_irq(&ndlp->lock);
@@ -5599,11 +5619,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
}
/* The NPIV instance is rejecting this unsolicited ELS. Make sure the
- * node's assigned RPI needs to be released as this node will get
- * freed.
+ * node's assigned RPI gets released provided this node is not already
+ * registered with the transport.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
- vport->port_type == LPFC_NPIV_PORT) {
+ vport->port_type == LPFC_NPIV_PORT &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_RELEASE_RPI;
spin_unlock_irq(&ndlp->lock);
@@ -6216,6 +6237,7 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
* from backend
*/
lpfc_nlp_unreg_node(vport, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
continue;
}
@@ -10713,6 +10735,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4]);
goto fdisc_failed;
}
+
+ lpfc_check_nlp_post_devloss(vport, ndlp);
+
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
@@ -11385,6 +11410,7 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ struct lpfc_nodelist *ndlp = NULL;
unsigned long iflag = 0;
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
@@ -11392,7 +11418,20 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
lpfc_nlp_put(sglq_entry->ndlp);
+ ndlp = sglq_entry->ndlp;
sglq_entry->ndlp = NULL;
+
+ /* If the xri on the abts_els_sgl list is for the Fport
+ * node and the vport is unloading, the xri aborted wcqe
+ * likely isn't coming back. Just release the sgl.
+ */
+ if ((vport->load_flag & FC_UNLOADING) &&
+ ndlp->nlp_DID == Fabric_DID) {
+ list_del(&sglq_entry->list);
+ sglq_entry->state = SGL_FREED;
+ list_add_tail(&sglq_entry->list,
+ &phba->sli4_hba.lpfc_els_sgl_list);
+ }
}
}
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7195ca0275f9..9fe6e5b386ce 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -209,7 +209,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+
+ /* If there is a PLOGI in progress, and we are in a
+ * NLP_NPR_2B_DISC state, don't turn off the flag.
+ */
+ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
/*
* The backend does not expect any more calls associated with this
@@ -341,6 +346,37 @@ static void lpfc_check_inactive_vmid(struct lpfc_hba *phba)
}
/**
+ * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss
+ * @vport: Pointer to vport object.
+ * @ndlp: Pointer to remote node object.
+ *
+ * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of
+ * node during dev_loss_tmo processing, then this function restores the nlp_put
+ * kref decrement from lpfc_dev_loss_tmo_handler.
+ **/
+void
+lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) {
+ ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_nlp_get(ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
+ "8438 Devloss timeout reversed on DID x%x "
+ "refcnt %d ndlp %p flag x%x "
+ "port_state = x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
+ ndlp->nlp_flag, vport->port_state);
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ }
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+}
+
+/**
* lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
* @ndlp: Pointer to remote node object.
*
@@ -358,6 +394,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
uint8_t *name;
int warn_on = 0;
int fcf_inuse = 0;
+ bool recovering = false;
+ struct fc_vport *fc_vport = NULL;
unsigned long iflags;
vport = ndlp->vport;
@@ -394,6 +432,64 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
/* Fabric nodes are done. */
if (ndlp->nlp_type & NLP_FABRIC) {
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ /* In massive vport configuration settings, it's possible
+ * dev_loss_tmo fired during node recovery. So, check if
+ * fabric nodes are in discovery states outstanding.
+ */
+ switch (ndlp->nlp_DID) {
+ case Fabric_DID:
+ fc_vport = vport->fc_vport;
+ if (fc_vport &&
+ fc_vport->vport_state == FC_VPORT_INITIALIZING)
+ recovering = true;
+ break;
+ case Fabric_Cntl_DID:
+ if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+ recovering = true;
+ break;
+ case FDMI_DID:
+ fallthrough;
+ case NameServer_DID:
+ if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
+ recovering = true;
+ break;
+ }
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ /* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing
+ * the following lpfc_nlp_put is necessary after fabric node is
+ * recovered.
+ */
+ if (recovering) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_NODE,
+ "8436 Devloss timeout marked on "
+ "DID x%x refcnt %d ndlp %p "
+ "flag x%x port_state = x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref),
+ ndlp, ndlp->nlp_flag,
+ vport->port_state);
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ /* Fabric node fully recovered before this dev_loss_tmo
+ * queue work is processed. Thus, ignore the
+ * dev_loss_tmo event.
+ */
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_NODE,
+ "8437 Devloss timeout ignored on "
+ "DID x%x refcnt %d ndlp %p "
+ "flag x%x port_state = x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref),
+ ndlp, ndlp->nlp_flag,
+ vport->port_state);
+ return fcf_inuse;
+ }
+
lpfc_nlp_put(ndlp);
return fcf_inuse;
}
@@ -423,6 +519,14 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
}
+ /* If we are devloss, but we are in the process of rediscovering the
+ * ndlp, don't issue a NLP_EVT_DEVICE_RM event.
+ */
+ if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) {
+ return fcf_inuse;
+ }
+
if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
@@ -966,8 +1070,20 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
struct lpfc_nodelist *ndlp, *next_ndlp;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ /* It's possible the FLOGI to the fabric node never
+ * successfully completed and never registered with the
+ * transport. In this case there is no way to clean up
+ * the node.
+ */
+ if (ndlp->nlp_DID == Fabric_DID) {
+ if (ndlp->nlp_prev_state ==
+ NLP_STE_UNUSED_NODE &&
+ !ndlp->fc4_xpt_flags)
+ lpfc_nlp_put(ndlp);
+ }
continue;
+ }
if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
((vport->port_type == LPFC_NPIV_PORT) &&
@@ -4351,6 +4467,8 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto out;
}
+ lpfc_check_nlp_post_devloss(vport, ndlp);
+
if (phba->sli_rev < LPFC_SLI_REV4)
ndlp->nlp_rpi = mb->un.varWords[0];
@@ -4360,6 +4478,7 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_state);
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -4449,8 +4568,9 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
fc_remote_port_rolechg(rport, rport_ids.roles);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3183 %s rport x%px DID x%x, role x%x\n",
- __func__, rport, rport->port_id, rport->roles);
+ "3183 %s rport x%px DID x%x, role x%x refcnt %d\n",
+ __func__, rport, rport->port_id, rport->roles,
+ kref_read(&ndlp->kref));
if ((rport->scsi_target_id != -1) &&
(rport->scsi_target_id < LPFC_MAX_TARGET)) {
@@ -4475,8 +4595,9 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"3184 rport unregister x%06x, rport x%px "
- "xptflg x%x\n",
- ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags);
+ "xptflg x%x refcnt %d\n",
+ ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
fc_remote_port_delete(rport);
lpfc_nlp_put(ndlp);
@@ -4525,9 +4646,10 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
void
lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
-
unsigned long iflags;
+ lpfc_check_nlp_post_devloss(vport, ndlp);
+
spin_lock_irqsave(&ndlp->lock, iflags);
if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
/* Already registered with backend, trigger rescan */
@@ -4679,8 +4801,11 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Reg/Unreg for FCP and NVME Transport interface */
if ((old_state == NLP_STE_MAPPED_NODE ||
old_state == NLP_STE_UNMAPPED_NODE)) {
- /* For nodes marked for ADISC, Handle unreg in ADISC cmpl */
- if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
+ /* For nodes marked for ADISC, Handle unreg in ADISC cmpl
+ * if linkup. In linkdown do unreg_node
+ */
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC) ||
+ !lpfc_is_link_up(vport->phba))
lpfc_nlp_unreg_node(vport, ndlp);
}
@@ -5233,6 +5358,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
mempool_free(mbox, phba->mbox_mem_pool);
acc_plogi = 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 7359505e6041..6ec42991d2ab 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -673,6 +673,10 @@ struct lpfc_register {
#define lpfc_sliport_status_rdy_SHIFT 23
#define lpfc_sliport_status_rdy_MASK 0x1
#define lpfc_sliport_status_rdy_WORD word0
+#define lpfc_sliport_status_pldv_SHIFT 0
+#define lpfc_sliport_status_pldv_MASK 0x1
+#define lpfc_sliport_status_pldv_WORD word0
+#define CFG_PLD 0x3C
#define MAX_IF_TYPE_2_RESETS 6
#define LPFC_CTL_PORT_CTL_OFFSET 0x408
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 195169badb37..ba17a8f740a9 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -68,6 +68,7 @@
static enum cpuhp_state lpfc_cpuhp_state;
/* Used when mapping IRQ vectors in a driver centric manner */
static uint32_t lpfc_present_cpu;
+static bool lpfc_pldv_detect;
static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
@@ -662,6 +663,50 @@ lpfc_config_port_post(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_refresh_params - update driver copy of params.
+ * @phba: Pointer to HBA context object.
+ *
+ * This is called to refresh driver copy of dynamic fields from the
+ * common_get_sli4_parameters descriptor.
+ **/
+int
+lpfc_sli4_refresh_params(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mqe *mqe;
+ struct lpfc_sli4_parameters *mbx_sli4_parameters;
+ int length, rc;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ mqe = &mboxq->u.mqe;
+ /* Read the port's SLI4 Config Parameters */
+ length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (unlikely(rc)) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
+ }
+ mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
+ phba->sli4_hba.pc_sli4_params.mi_ver =
+ bf_get(cfg_mi_ver, mbx_sli4_parameters);
+ phba->sli4_hba.pc_sli4_params.cmf =
+ bf_get(cfg_cmf, mbx_sli4_parameters);
+ phba->sli4_hba.pc_sli4_params.pls =
+ bf_get(cfg_pvl, mbx_sli4_parameters);
+
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
* lpfc_hba_init_link - Initialize the FC link
* @phba: pointer to lpfc hba data structure.
* @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
@@ -1606,6 +1651,11 @@ void
lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
spin_lock_irq(&phba->hbalock);
+ if (phba->link_state == LPFC_HBA_ERROR &&
+ phba->hba_flag & HBA_PCI_ERR) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
phba->link_state = LPFC_HBA_ERROR;
spin_unlock_irq(&phba->hbalock);
@@ -1945,7 +1995,6 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (pci_channel_offline(phba->pcidev)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3166 pci channel is offline\n");
- lpfc_sli4_offline_eratt(phba);
return;
}
@@ -3643,6 +3692,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
int i;
+ int offline = 0;
if (vport->fc_flag & FC_OFFLINE_MODE)
return;
@@ -3651,6 +3701,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_linkdown(phba);
+ offline = pci_channel_offline(phba->pcidev);
+
/* Issue an unreg_login to all nodes on all vports */
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
@@ -3673,7 +3725,14 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(&ndlp->lock);
- lpfc_unreg_rpi(vports[i], ndlp);
+ if (offline) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~(NLP_UNREG_INP |
+ NLP_RPI_REGISTERED);
+ spin_unlock_irq(&ndlp->lock);
+ } else {
+ lpfc_unreg_rpi(vports[i], ndlp);
+ }
/*
* Whenever an SLI4 port goes offline, free the
* RPI. Get a new RPI when the adapter port
@@ -3694,12 +3753,16 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_disc_state_machine(vports[i], ndlp,
NULL, NLP_EVT_DEVICE_RECOVERY);
- /* Don't remove the node unless the
+ /* Don't remove the node unless the node
* has been unregistered with the
- * transport. If so, let dev_loss
- * take care of the node.
+ * transport, and we're not in recovery
+ * before dev_loss_tmo triggered.
+ * Otherwise, let dev_loss take care of
+ * the node.
*/
- if (!(ndlp->fc4_xpt_flags &
+ if (!(ndlp->save_flags &
+ NLP_IN_RECOV_POST_DEV_LOSS) &&
+ !(ndlp->fc4_xpt_flags &
(NVME_XPT_REGD | SCSI_XPT_REGD)))
lpfc_disc_state_machine
(vports[i], ndlp,
@@ -4559,7 +4622,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
/* Template for all vports this physical port creates */
memcpy(&phba->vport_template, &lpfc_template,
sizeof(*template));
- phba->vport_template.shost_attrs = lpfc_vport_attrs;
+ phba->vport_template.shost_groups = lpfc_vport_groups;
phba->vport_template.eh_bus_reset_handler = NULL;
phba->vport_template.eh_host_reset_handler = NULL;
phba->vport_template.vendor_id = 0;
@@ -5862,7 +5925,7 @@ lpfc_cmf_timer(struct hrtimer *timer)
uint32_t io_cnt;
uint32_t head, tail;
uint32_t busy, max_read;
- uint64_t total, rcv, lat, mbpi;
+ uint64_t total, rcv, lat, mbpi, extra;
int timer_interval = LPFC_CMF_INTERVAL;
uint32_t ms;
struct lpfc_cgn_stat *cgs;
@@ -5929,7 +5992,19 @@ lpfc_cmf_timer(struct hrtimer *timer)
phba->hba_flag & HBA_SETUP) {
mbpi = phba->cmf_last_sync_bw;
phba->cmf_last_sync_bw = 0;
- lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total);
+ extra = 0;
+
+ /* Calculate any extra bytes needed to account for the
+ * timer accuracy. If we are less than LPFC_CMF_INTERVAL
+ * add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL
+ * add an extra 2%. The goal is to equalize total with a
+ * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1
+ */
+ if (ms == LPFC_CMF_INTERVAL)
+ extra = div_u64(total, 50);
+ else if (ms < LPFC_CMF_INTERVAL)
+ extra = div_u64(total, 33);
+ lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
} else {
/* For Monitor mode or link down we want mbpi
* to be the full link speed
@@ -6428,6 +6503,12 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"3194 Unable to retrieve supported "
"speeds, rc = 0x%x\n", rc);
}
+ rc = lpfc_sli4_refresh_params(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3174 Unable to update pls support, "
+ "rc x%x\n", rc);
+ }
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL;
@@ -6538,7 +6619,7 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
/* Cannot find existing Fabric ndlp, so allocate a new one */
ndlp = lpfc_nlp_init(vport, Fabric_DID);
if (!ndlp)
- return 0;
+ return NULL;
/* Set the node type */
ndlp->nlp_type |= NLP_FABRIC;
/* Put ndlp onto node list */
@@ -7358,7 +7439,7 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
out_disable_device:
pci_disable_device(pdev);
out_error:
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1401 Failed to enable pci device\n");
return -ENODEV;
}
@@ -8401,7 +8482,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->lpfc_stop_port = lpfc_stop_port_s4;
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1431 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
@@ -9333,7 +9414,15 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
phba->work_status[0],
phba->work_status[1]);
port_error = -ENODEV;
+ break;
}
+
+ if (lpfc_pldv_detect &&
+ bf_get(lpfc_sli_intf_sli_family,
+ &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_FAMILY_G6)
+ pci_write_config_byte(phba->pcidev,
+ LPFC_SLI_INTF, CFG_PLD);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -11541,6 +11630,9 @@ wait:
goto out;
}
+ if (bf_get(lpfc_sliport_status_pldv, &reg_data))
+ lpfc_pldv_detect = true;
+
if (!port_reset) {
/*
* Reset the port now
@@ -11623,7 +11715,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
/* There is no SLI3 failback for SLI4 devices. */
if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_VALID) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2894 SLI_INTF reg contents invalid "
"sli_intf reg 0x%x\n",
phba->sli4_hba.sli_intf.word0);
@@ -13368,8 +13460,6 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
atomic_set(&phba->cgn_sync_alarm_cnt, 0);
atomic_set(&phba->cgn_sync_warn_cnt, 0);
- atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
- atomic64_set(&phba->cgn_acqe_stat.warn, 0);
atomic_set(&phba->cgn_driver_evt_cnt, 0);
atomic_set(&phba->cgn_latency_evt_cnt, 0);
atomic64_set(&phba->cgn_latency_evt, 0);
@@ -14080,6 +14170,10 @@ lpfc_pci_resume_one_s3(struct device *dev_d)
return error;
}
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
+ /* Init hba_eq_hdl array */
+ lpfc_hba_eq_hdl_array_init(phba);
/* Configure and enable interrupt */
intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -15033,14 +15127,17 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
lpfc_sli4_prep_dev_for_recover(phba);
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
+ phba->hba_flag |= HBA_PCI_ERR;
/* Fatal error, prepare for slot reset */
lpfc_sli4_prep_dev_for_reset(phba);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
+ phba->hba_flag |= HBA_PCI_ERR;
/* Permanent failure, prepare for device down */
lpfc_sli4_prep_dev_for_perm_failure(phba);
return PCI_ERS_RESULT_DISCONNECT;
default:
+ phba->hba_flag |= HBA_PCI_ERR;
/* Unknown state, prepare and request slot reset */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2825 Unknown PCI error state: x%x\n", state);
@@ -15084,6 +15181,7 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
pci_restore_state(pdev);
+ phba->hba_flag &= ~HBA_PCI_ERR;
/*
* As the new kernel behavior of pci_restore_state() API call clears
* device saved_state flag, need to save the restored state again.
@@ -15106,6 +15204,7 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
} else
phba->intr_mode = intr_mode;
+ lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
/* Log the current active interrupt mode */
lpfc_log_intr_mode(phba, phba->intr_mode);
@@ -15307,6 +15406,10 @@ lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+ if (phba->link_state == LPFC_HBA_ERROR &&
+ phba->hba_flag & HBA_IOQ_FLUSH)
+ return PCI_ERS_RESULT_NEED_RESET;
+
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
rc = lpfc_io_error_detected_s3(pdev, state);
@@ -15523,6 +15626,8 @@ lpfc_init(void)
/* Initialize in case vector mapping is needed */
lpfc_present_cpu = num_present_cpus();
+ lpfc_pldv_detect = false;
+
error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"lpfc/sli4:online",
lpfc_cpu_online, lpfc_cpu_offline);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 479b3eed6208..9601edd838e1 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -209,8 +209,9 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete of remoteport x%px\n",
- remoteport);
+ "6146 remoteport delete of remoteport x%px, ndlp x%px "
+ "DID x%x xflags x%x\n",
+ remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags);
spin_lock_irq(&ndlp->lock);
/* The register rebind might have occurred before the delete
@@ -936,6 +937,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int cpu;
#endif
+ int offline = 0;
/* Sanity check on return of outstanding command */
if (!lpfc_ncmd) {
@@ -1097,11 +1099,12 @@ out_err:
nCmd->transferred_length = 0;
nCmd->rcv_rsplen = 0;
nCmd->status = NVME_SC_INTERNAL;
+ offline = pci_channel_offline(vport->phba->pcidev);
}
}
/* pick up SLI4 exhange busy condition */
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline)
lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
else
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
@@ -1296,7 +1299,6 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
struct sli4_sge *first_data_sgl;
struct ulp_bde64 *bde;
dma_addr_t physaddr = 0;
- uint32_t num_bde = 0;
uint32_t dma_len = 0;
uint32_t dma_offset = 0;
int nseg, i, j;
@@ -1350,7 +1352,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
}
sgl->word2 = 0;
- if ((num_bde + 1) == nseg) {
+ if (nseg == 1) {
bf_set(lpfc_sli4_sge_last, sgl, 1);
bf_set(lpfc_sli4_sge_type, sgl,
LPFC_SGE_TYPE_DATA);
@@ -1419,8 +1421,9 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
j++;
}
- if (phba->cfg_enable_pbde) {
- /* Use PBDE support for first SGL only, offset == 0 */
+
+ /* PBDE support for first data SGE only */
+ if (nseg == 1 && phba->cfg_enable_pbde) {
/* Words 13-15 */
bde = (struct ulp_bde64 *)
&wqe->words[13];
@@ -1431,11 +1434,11 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bde->tus.w = cpu_to_le32(bde->tus.w);
- /* Word 11 */
+ /* Word 11 - set PBDE bit */
bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
} else {
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
- bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
+ /* Word 11 - PBDE bit disabled by default template */
}
} else {
@@ -2166,6 +2169,10 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_nvme = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
qp = &phba->sli4_hba.hdwq[i];
+ if (!vport || !vport->localport ||
+ !qp || !qp->io_wq)
+ return;
+
pring = qp->io_wq->pring;
if (!pring)
continue;
@@ -2173,6 +2180,10 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_scsi += qp->abts_scsi_io_bufs;
abts_nvme += qp->abts_nvme_io_bufs;
}
+ if (!vport || !vport->localport ||
+ vport->phba->hba_flag & HBA_PCI_ERR)
+ return;
+
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6176 Lport x%px Localport x%px wait "
"timed out. Pending %d [%d:%d]. "
@@ -2212,6 +2223,8 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
return;
localport = vport->localport;
+ if (!localport)
+ return;
lport = (struct lpfc_nvme_lport *)localport->private;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
@@ -2528,7 +2541,8 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* return values is ignored. The upcall is a courtesy to the
* transport.
*/
- if (vport->load_flag & FC_UNLOADING)
+ if (vport->load_flag & FC_UNLOADING ||
+ unlikely(vport->phba->hba_flag & HBA_PCI_ERR))
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
ret = nvme_fc_unregister_remoteport(remoteport);
@@ -2557,6 +2571,42 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
}
/**
+ * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @lpfc_ncmd: The nvme job structure for the request being aborted.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * NVME aborted xri. Aborted NVME IO commands are completed to the transport
+ * here.
+ **/
+void
+lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
+ struct lpfc_io_buf *lpfc_ncmd)
+{
+ struct nvmefc_fcp_req *nvme_cmd = NULL;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6533 %s nvme_cmd %p tag x%x abort complete and "
+ "xri released\n", __func__,
+ lpfc_ncmd->nvmeCmd,
+ lpfc_ncmd->cur_iocbq.iotag);
+
+ /* Aborted NVME commands are required to not complete
+ * before the abort exchange command fully completes.
+ * Once completed, it is available via the put list.
+ */
+ if (lpfc_ncmd->nvmeCmd) {
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->transferred_length = 0;
+ nvme_cmd->rcv_rsplen = 0;
+ nvme_cmd->status = NVME_SC_INTERNAL;
+ nvme_cmd->done(nvme_cmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+}
+
+/**
* lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 6e3dd0b9bcfa..731802527b81 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -2708,7 +2708,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
struct ulp_bde64 *bde;
dma_addr_t physaddr;
int i, cnt, nsegs;
- int do_pbde;
+ bool use_pbde = false;
int xc = 1;
if (!lpfc_is_link_up(phba)) {
@@ -2816,9 +2816,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
if (!xc)
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
- /* Word 11 - set sup, irsp, irsplen later */
- do_pbde = 0;
-
/* Word 12 */
wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
@@ -2896,12 +2893,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
if (!xc)
bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
- /* Word 11 - set pbde later */
- if (phba->cfg_enable_pbde) {
- do_pbde = 1;
+ /* Word 11 - check for pbde */
+ if (nsegs == 1 && phba->cfg_enable_pbde) {
+ use_pbde = true;
+ /* Word 11 - PBDE bit already preset by template */
} else {
+ /* Overwrite default template setting */
bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
- do_pbde = 0;
}
/* Word 12 */
@@ -2972,7 +2970,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
((rsp->rsplen >> 2) - 1));
memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
}
- do_pbde = 0;
/* Word 12 */
wqe->fcp_trsp.rsvd_12_15[0] = 0;
@@ -3007,23 +3004,24 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(cnt);
- if (i == 0) {
- bde = (struct ulp_bde64 *)&wqe->words[13];
- if (do_pbde) {
- /* Words 13-15 (PBDE) */
- bde->addrLow = sgl->addr_lo;
- bde->addrHigh = sgl->addr_hi;
- bde->tus.f.bdeSize =
- le32_to_cpu(sgl->sge_len);
- bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bde->tus.w = cpu_to_le32(bde->tus.w);
- } else {
- memset(bde, 0, sizeof(struct ulp_bde64));
- }
- }
sgl++;
ctxp->offset += cnt;
}
+
+ bde = (struct ulp_bde64 *)&wqe->words[13];
+ if (use_pbde) {
+ /* decrement sgl ptr backwards once to first data sge */
+ sgl--;
+
+ /* Words 13-15 (PBDE) */
+ bde->addrLow = sgl->addr_lo;
+ bde->addrHigh = sgl->addr_hi;
+ bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len);
+ bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bde->tus.w = cpu_to_le32(bde->tus.w);
+ } else {
+ memset(bde, 0, sizeof(struct ulp_bde64));
+ }
ctxp->state = LPFC_NVME_STE_DATA;
ctxp->entry_cnt++;
return nvmewqe;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index befdf864c43b..6ccf573acdec 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -493,8 +493,8 @@ void
lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri, int idx)
{
- uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
- uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ u16 xri = 0;
+ u16 rxid = 0;
struct lpfc_io_buf *psb, *next_psb;
struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0;
@@ -504,15 +504,22 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
int rrq_empty = 0;
struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
struct scsi_cmnd *cmd;
+ int offline = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return;
-
+ offline = pci_channel_offline(phba->pcidev);
+ if (!offline) {
+ xri = bf_get(lpfc_wcqe_xa_xri, axri);
+ rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ }
qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_io_buf_list, list) {
+ if (offline)
+ xri = psb->cur_iocbq.sli4_xritag;
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del_init(&psb->list);
psb->flags &= ~LPFC_SBUF_XBUSY;
@@ -521,8 +528,15 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
- return;
+ if (!offline) {
+ lpfc_sli4_nvme_xri_aborted(phba, axri,
+ psb);
+ return;
+ }
+ lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&qp->abts_io_buf_list_lock);
+ continue;
}
qp->abts_scsi_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
@@ -534,13 +548,13 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- if (ndlp) {
+ if (ndlp && !offline) {
lpfc_set_rrq_active(phba, ndlp,
psb->cur_iocbq.sli4_lxritag, rxid, 1);
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
}
- if (phba->cfg_fcp_wait_abts_rsp) {
+ if (phba->cfg_fcp_wait_abts_rsp || offline) {
spin_lock_irqsave(&psb->buf_lock, iflag);
cmd = psb->pCmd;
psb->pCmd = NULL;
@@ -550,7 +564,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
* scsi_done upcall.
*/
if (cmd)
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
/*
* We expect there is an abort thread waiting
@@ -567,25 +581,30 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
lpfc_release_scsi_buf_s4(phba, psb);
if (rrq_empty)
lpfc_worker_wake_up(phba);
- return;
+ if (!offline)
+ return;
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&qp->abts_io_buf_list_lock);
+ continue;
}
}
spin_unlock(&qp->abts_io_buf_list_lock);
- for (i = 1; i <= phba->sli.last_iotag; i++) {
- iocbq = phba->sli.iocbq_lookup[i];
-
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- (iocbq->iocb_flag & LPFC_IO_LIBDFC))
- continue;
- if (iocbq->sli4_xritag != xri)
- continue;
- psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
- psb->flags &= ~LPFC_SBUF_XBUSY;
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- if (!list_empty(&pring->txq))
- lpfc_worker_wake_up(phba);
- return;
+ if (!offline) {
+ for (i = 1; i <= phba->sli.last_iotag; i++) {
+ iocbq = phba->sli.iocbq_lookup[i];
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ (iocbq->iocb_flag & LPFC_IO_LIBDFC))
+ continue;
+ if (iocbq->sli4_xritag != xri)
+ continue;
+ psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
+ psb->flags &= ~LPFC_SBUF_XBUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (!list_empty(&pring->txq))
+ lpfc_worker_wake_up(phba);
+ return;
+ }
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
@@ -875,7 +894,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
bpl += 2;
if (scsi_sg_count(scsi_cmnd)) {
/*
- * The driver stores the segment count returned from pci_map_sg
+ * The driver stores the segment count returned from dma_map_sg
* because this a count of dma-mappings used to map the use_sg
* pages. They are not guaranteed to be the same for those
* architectures that implement an IOMMU.
@@ -2570,7 +2589,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
bpl += 2;
if (scsi_sg_count(scsi_cmnd)) {
/*
- * The driver stores the segment count returned from pci_map_sg
+ * The driver stores the segment count returned from dma_map_sg
* because this a count of dma-mappings used to map the use_sg
* pages. They are not guaranteed to be the same for those
* architectures that implement an IOMMU.
@@ -3215,7 +3234,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
struct lpfc_vport *vport = phba->pport;
union lpfc_wqe128 *wqe = &pwqeq->wqe;
dma_addr_t physaddr;
- uint32_t num_bde = 0;
uint32_t dma_len;
uint32_t dma_offset = 0;
int nseg, i, j;
@@ -3231,7 +3249,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
*/
if (scsi_sg_count(scsi_cmnd)) {
/*
- * The driver stores the segment count returned from pci_map_sg
+ * The driver stores the segment count returned from dma_map_sg
* because this a count of dma-mappings used to map the use_sg
* pages. They are not guaranteed to be the same for those
* architectures that implement an IOMMU.
@@ -3277,7 +3295,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
j = 2;
for (i = 0; i < nseg; i++) {
sgl->word2 = 0;
- if ((num_bde + 1) == nseg) {
+ if (nseg == 1) {
bf_set(lpfc_sli4_sge_last, sgl, 1);
bf_set(lpfc_sli4_sge_type, sgl,
LPFC_SGE_TYPE_DATA);
@@ -3346,13 +3364,15 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
j++;
}
- /*
- * Setup the first Payload BDE. For FCoE we just key off
- * Performance Hints, for FC we use lpfc_enable_pbde.
- * We populate words 13-15 of IOCB/WQE.
+
+ /* PBDE support for first data SGE only.
+ * For FCoE, we key off Performance Hints.
+ * For FC, we key off lpfc_enable_pbde.
*/
- if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
- phba->cfg_enable_pbde) {
+ if (nseg == 1 &&
+ ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
+ phba->cfg_enable_pbde)) {
+ /* Words 13-15 */
bde = (struct ulp_bde64 *)
&wqe->words[13];
bde->addrLow = first_data_sgl->addr_lo;
@@ -3362,12 +3382,15 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bde->tus.w = cpu_to_le32(bde->tus.w);
+ /* Word 11 - set PBDE bit */
+ bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
} else {
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
+ /* Word 11 - PBDE bit disabled by default template */
}
} else {
sgl += 1;
- /* clear the last flag in the fcp_rsp map entry */
+ /* set the last flag in the fcp_rsp map entry */
sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
@@ -3380,10 +3403,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
}
}
- /* Word 11 */
- if (phba->cfg_enable_pbde)
- bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
-
/*
* Finish initializing those IOCB fields that are dependent on the
* scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
@@ -3469,7 +3488,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
*/
if (scsi_sg_count(scsi_cmnd)) {
/*
- * The driver stores the segment count returned from pci_map_sg
+ * The driver stores the segment count returned from dma_map_sg
* because this a count of dma-mappings used to map the use_sg
* pages. They are not guaranteed to be the same for those
* architectures that implement an IOMMU.
@@ -3941,7 +3960,8 @@ lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
int cpu;
/* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */
- if (phba->cmf_active_mode == LPFC_CFG_MANAGED) {
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
+ phba->cmf_max_bytes_per_interval) {
total = 0;
for_each_present_cpu(cpu) {
cgs = per_cpu_ptr(phba->cmf_stat, cpu);
@@ -4481,7 +4501,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
goto out;
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
/*
* If there is an abort thread waiting for command completion
@@ -4750,7 +4770,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
#endif
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
/*
* If there is an abort thread waiting for command completion
@@ -5095,7 +5115,7 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1418 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
@@ -5822,7 +5842,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
shost);
out_fail_command:
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return 0;
}
@@ -6455,28 +6475,28 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
/* Issue LOGO, if no LOGO is outstanding */
spin_lock_irqsave(&pnode->lock, flags);
- if (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO) &&
+ if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) &&
!pnode->logo_waitq) {
pnode->logo_waitq = &waitq;
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
pnode->nlp_flag |= NLP_ISSUE_LOGO;
- pnode->upcall_flags |= NLP_WAIT_FOR_LOGO;
+ pnode->save_flags |= NLP_WAIT_FOR_LOGO;
spin_unlock_irqrestore(&pnode->lock, flags);
lpfc_unreg_rpi(vport, pnode);
wait_event_timeout(waitq,
- (!(pnode->upcall_flags &
+ (!(pnode->save_flags &
NLP_WAIT_FOR_LOGO)),
msecs_to_jiffies(dev_loss_tmo *
1000));
- if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) {
+ if (pnode->save_flags & NLP_WAIT_FOR_LOGO) {
lpfc_printf_vlog(vport, KERN_ERR, logit,
"0725 SCSI layer TGTRST "
"failed & LOGO TMO (%d, %llu) "
"return x%x\n",
tgt_id, lun_id, status);
spin_lock_irqsave(&pnode->lock, flags);
- pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
+ pnode->save_flags &= ~NLP_WAIT_FOR_LOGO;
} else {
spin_lock_irqsave(&pnode->lock, flags);
}
@@ -6628,6 +6648,13 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
if (rc)
goto error;
+ /* Wait for successful restart of adapter */
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rc = lpfc_sli_chipset_init(phba);
+ if (rc)
+ goto error;
+ }
+
rc = lpfc_online(phba);
if (rc)
goto error;
@@ -7182,7 +7209,7 @@ struct scsi_host_template lpfc_template_nvme = {
.this_id = -1,
.sg_tablesize = 1,
.cmd_per_lun = 1,
- .shost_attrs = lpfc_hba_attrs,
+ .shost_groups = lpfc_hba_groups,
.max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.track_queue_depth = 0,
@@ -7208,7 +7235,7 @@ struct scsi_host_template lpfc_template = {
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
- .shost_attrs = lpfc_hba_attrs,
+ .shost_groups = lpfc_hba_groups,
.max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 026a1196a54d..5dedb3de271d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1404,7 +1404,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
}
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
- (sglq->state != SGL_XRI_ABORTED)) {
+ (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
+ sglq->state != SGL_XRI_ABORTED) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
iflag);
@@ -4583,10 +4584,12 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
lpfc_sli_cancel_iocbs(phba, &txq,
IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
- /* Flush the txcmpq */
+ /* Flush the txcmplq */
lpfc_sli_cancel_iocbs(phba, &txcmplq,
IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ lpfc_sli4_io_xri_aborted(phba, NULL, 0);
}
} else {
pring = &psli->sli3_ring[LPFC_FCP_RING];
@@ -7761,8 +7764,6 @@ lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Zero out Congestion Signal ACQE counter */
phba->cgn_acqe_cnt = 0;
- atomic64_set(&phba->cgn_acqe_stat.warn, 0);
- atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
&pmb->u.mqe.un.set_feature);
@@ -7890,36 +7891,19 @@ static int
lpfc_cmf_setup(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *mboxq;
- struct lpfc_mqe *mqe;
struct lpfc_dmabuf *mp;
struct lpfc_pc_sli4_params *sli4_params;
- struct lpfc_sli4_parameters *mbx_sli4_parameters;
- int length;
int rc, cmf, mi_ver;
+ rc = lpfc_sli4_refresh_params(phba);
+ if (unlikely(rc))
+ return rc;
+
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq)
return -ENOMEM;
- mqe = &mboxq->u.mqe;
- /* Read the port's SLI4 Config Parameters */
- length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
- sizeof(struct lpfc_sli4_cfg_mhdr));
- lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
- LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
- length, LPFC_SLI4_MBX_EMBED);
-
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (unlikely(rc)) {
- mempool_free(mboxq, phba->mbox_mem_pool);
- return rc;
- }
-
- /* Gather info on CMF and MI support */
sli4_params = &phba->sli4_hba.pc_sli4_params;
- mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
- sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
- sli4_params->cmf = bf_get(cfg_cmf, mbx_sli4_parameters);
/* Are we forcing MI off via module parameter? */
if (!phba->cfg_enable_mi)
@@ -8014,6 +7998,10 @@ lpfc_cmf_setup(struct lpfc_hba *phba)
/* initialize congestion buffer info */
lpfc_init_congestion_buf(phba);
lpfc_init_congestion_stat(phba);
+
+ /* Zero out Congestion Signal counters */
+ atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
+ atomic64_set(&phba->cgn_acqe_stat.warn, 0);
}
rc = lpfc_sli4_cgn_params_read(phba);
@@ -8153,6 +8141,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
struct lpfc_vport *vport = phba->pport;
struct lpfc_dmabuf *mp;
struct lpfc_rqb *rqbp;
+ u32 flg;
/* Perform a PCI function reset to start from clean */
rc = lpfc_pci_function_reset(phba);
@@ -8166,7 +8155,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
else {
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
+ flg = phba->sli.sli_flag;
spin_unlock_irq(&phba->hbalock);
+ /* Allow a little time after setting SLI_ACTIVE for any polled
+ * MBX commands to complete via BSG.
+ */
+ for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
+ msleep(20);
+ spin_lock_irq(&phba->hbalock);
+ flg = phba->sli.sli_flag;
+ spin_unlock_irq(&phba->hbalock);
+ }
}
lpfc_sli4_dip(phba);
@@ -9750,7 +9749,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
"(%d):2541 Mailbox command x%x "
"(x%x/x%x) failure: "
"mqe_sta: x%x mcqe_sta: x%x/x%x "
- "Data: x%x x%x\n,",
+ "Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
@@ -9784,7 +9783,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
"(%d):2597 Sync Mailbox command "
"x%x (x%x/x%x) failure: "
"mqe_sta: x%x mcqe_sta: x%x/x%x "
- "Data: x%x x%x\n,",
+ "Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
@@ -10010,7 +10009,7 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1420 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
@@ -11178,7 +11177,7 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1419 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
@@ -12404,17 +12403,17 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
- if (cmdiocb->iocb_flag & LPFC_IO_FCP) {
- abtsiocbp->iocb_flag |= LPFC_IO_FCP;
- abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
- }
+ if (cmdiocb->iocb_flag & LPFC_IO_FCP)
+ abtsiocbp->iocb_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
if (cmdiocb->iocb_flag & LPFC_IO_FOF)
abtsiocbp->iocb_flag |= LPFC_IO_FOF;
- if (phba->link_state >= LPFC_LINK_UP)
- iabt->ulpCommand = CMD_ABORT_XRI_CN;
- else
+ if (phba->link_state < LPFC_LINK_UP ||
+ (phba->sli_rev == LPFC_SLI_REV4 &&
+ phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN))
iabt->ulpCommand = CMD_CLOSE_XRI_CN;
+ else
+ iabt->ulpCommand = CMD_ABORT_XRI_CN;
if (cmpl)
abtsiocbp->iocb_cmpl = cmpl;
@@ -12488,15 +12487,54 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
+ * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
+ * @iocbq: Pointer to iocb object.
+ * @vport: Pointer to driver virtual port object.
+ *
+ * This function acts as an iocb filter for functions which abort FCP iocbs.
+ *
+ * Return values
+ * -ENODEV, if a null iocb or vport ptr is encountered
+ * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
+ * driver already started the abort process, or is an abort iocb itself
+ * 0, passes criteria for aborting the FCP I/O iocb
+ **/
+static int
+lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
+ struct lpfc_vport *vport)
+{
+ IOCB_t *icmd = NULL;
+
+ /* No null ptr vports */
+ if (!iocbq || iocbq->vport != vport)
+ return -ENODEV;
+
+ /* iocb must be for FCP IO, already exists on the TX cmpl queue,
+ * can't be premarked as driver aborted, nor be an ABORT iocb itself
+ */
+ icmd = &iocbq->iocb;
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
+ (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
+ (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ icmd->ulpCommand == CMD_CLOSE_XRI_CN))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
* @iocbq: Pointer to driver iocb object.
* @vport: Pointer to driver virtual port object.
* @tgt_id: SCSI ID of the target.
* @lun_id: LUN ID of the scsi device.
* @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
*
- * This function acts as an iocb filter for functions which abort or count
- * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
+ * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
+ * host.
+ *
+ * It will return
* 0 if the filtering criteria is met for the given iocb and will return
* 1 if the filtering criteria is not met.
* If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
@@ -12515,22 +12553,8 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
lpfc_ctx_cmd ctx_cmd)
{
struct lpfc_io_buf *lpfc_cmd;
- IOCB_t *icmd = NULL;
int rc = 1;
- if (!iocbq || iocbq->vport != vport)
- return rc;
-
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
- iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
- return rc;
-
- icmd = &iocbq->iocb;
- if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN)
- return rc;
-
lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
if (lpfc_cmd->pCmd == NULL)
@@ -12585,17 +12609,33 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
+ IOCB_t *icmd = NULL;
int sum, i;
+ unsigned long iflags;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
- if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
- ctx_cmd) == 0)
+ if (!iocbq || iocbq->vport != vport)
+ continue;
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
+ continue;
+
+ /* Include counting outstanding aborts */
+ icmd = &iocbq->iocb;
+ if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ icmd->ulpCommand == CMD_CLOSE_XRI_CN) {
+ sum++;
+ continue;
+ }
+
+ if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+ ctx_cmd) == 0)
sum++;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return sum;
}
@@ -12662,7 +12702,11 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*
* This function sends an abort command for every SCSI command
* associated with the given virtual port pending on the ring
- * filtered by lpfc_sli_validate_fcp_iocb function.
+ * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
+ * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
+ * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
+ * followed by lpfc_sli_validate_fcp_iocb.
+ *
* When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
* FCP iocbs associated with lun specified by tgt_id and lun_id
* parameters
@@ -12694,6 +12738,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
+ if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
+ continue;
+
if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
abort_cmd) != 0)
continue;
@@ -12726,7 +12773,11 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
*
* This function sends an abort command for every SCSI command
* associated with the given virtual port pending on the ring
- * filtered by lpfc_sli_validate_fcp_iocb function.
+ * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
+ * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
+ * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
+ * followed by lpfc_sli_validate_fcp_iocb.
+ *
* When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
* FCP iocbs associated with lun specified by tgt_id and lun_id
* parameters
@@ -12764,6 +12815,9 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
+ if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
+ continue;
+
if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
cmd) != 0)
continue;
@@ -21107,6 +21161,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
fail_msg,
piocbq->iotag, piocbq->sli4_xritag);
list_add_tail(&piocbq->list, &completions);
+ fail_msg = NULL;
}
spin_unlock_irqrestore(&pring->ring_lock, iflags);
}
@@ -21966,8 +22021,26 @@ lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
qp = &phba->sli4_hba.hdwq[hwqid];
lpfc_ncmd = NULL;
+ if (!qp) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "5556 NULL qp for hwqid x%x\n", hwqid);
+ return lpfc_ncmd;
+ }
multixri_pool = qp->p_multixri_pool;
+ if (!multixri_pool) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "5557 NULL multixri for hwqid x%x\n", hwqid);
+ return lpfc_ncmd;
+ }
pvt_pool = &multixri_pool->pvt_pool;
+ if (!pvt_pool) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "5558 NULL pvt_pool for hwqid x%x\n", hwqid);
+ return lpfc_ncmd;
+ }
multixri_pool->io_req_count++;
/* If pvt_pool is empty, move some XRIs from public to private pool */
@@ -22043,6 +22116,12 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
qp = &phba->sli4_hba.hdwq[hwqid];
lpfc_cmd = NULL;
+ if (!qp) {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "5555 NULL qp for hwqid x%x\n", hwqid);
+ return lpfc_cmd;
+ }
if (phba->cfg_xri_rebalancing)
lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 99c5d1e4da5e..5962cf508842 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1116,6 +1116,8 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba);
+void lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
+ struct lpfc_io_buf *lpfc_ncmd);
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri,
struct lpfc_io_buf *lpfc_ncmd);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index a7aba7833425..5a4d3b24fbce 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.0.0.1"
+#define LPFC_DRIVER_VERSION "14.0.0.3"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index ec9840d322e5..3976a18f6333 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -66,8 +66,7 @@ static irqreturn_t do_mac53c94_interrupt(int, void *);
static void cmd_done(struct fsc_state *, int result);
static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *);
-
-static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int mac53c94_queue_lck(struct scsi_cmnd *cmd)
{
struct fsc_state *state;
@@ -83,7 +82,6 @@ static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cm
}
#endif
- cmd->scsi_done = done;
cmd->host_scribble = NULL;
state = (struct fsc_state *) cmd->device->host->hostdata;
@@ -348,7 +346,7 @@ static void cmd_done(struct fsc_state *state, int result)
cmd = state->current_req;
if (cmd) {
cmd->result = result;
- (*cmd->scsi_done)(cmd);
+ scsi_done(cmd);
state->current_req = NULL;
}
state->phase = idle;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 56910e94dbf2..0d31d7a5e335 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -370,8 +370,7 @@ mega_runpendq(adapter_t *adapter)
*
* The command queuing entry point for the mid-layer.
*/
-static int
-megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+static int megaraid_queue_lck(struct scsi_cmnd *scmd)
{
adapter_t *adapter;
scb_t *scb;
@@ -380,9 +379,6 @@ megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
adapter = (adapter_t *)scmd->device->host->hostdata;
- scmd->scsi_done = done;
-
-
/*
* Allocate and build a SCB request
* busy flag will be set if mega_build_cmd() command could not
@@ -586,7 +582,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
/* have just LUN 0 for each target on virtual channels */
if (cmd->device->lun) {
cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
@@ -605,7 +601,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
if(ldrv_num > max_ldrv_num ) {
cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
@@ -617,7 +613,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
* devices
*/
cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
}
@@ -637,7 +633,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
*/
if( !adapter->has_cluster ) {
cmd->result = (DID_OK << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
@@ -655,7 +651,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
return scb;
#else
cmd->result = (DID_OK << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
#endif
@@ -670,7 +666,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
kunmap_atomic(buf - sg->offset);
cmd->result = (DID_OK << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
@@ -866,7 +862,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
if( ! adapter->has_cluster ) {
cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
@@ -889,7 +885,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
default:
cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
}
@@ -1654,7 +1650,7 @@ mega_rundoneq (adapter_t *adapter)
struct scsi_pointer* spos = (struct scsi_pointer *)pos;
cmd = list_entry(spos, struct scsi_cmnd, SCp);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
INIT_LIST_HEAD(&adapter->completed_list);
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index d20c2e4ee793..14f930d27ca1 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -305,20 +305,23 @@ static struct pci_driver megaraid_pci_driver = {
static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_app_hndl);
// Host template initializer for megaraid mbox sysfs device attributes
-static struct device_attribute *megaraid_shost_attrs[] = {
- &dev_attr_megaraid_mbox_app_hndl,
+static struct attribute *megaraid_shost_attrs[] = {
+ &dev_attr_megaraid_mbox_app_hndl.attr,
NULL,
};
+ATTRIBUTE_GROUPS(megaraid_shost);
static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_ld);
// Host template initializer for megaraid mbox sysfs device attributes
-static struct device_attribute *megaraid_sdev_attrs[] = {
- &dev_attr_megaraid_mbox_ld,
+static struct attribute *megaraid_sdev_attrs[] = {
+ &dev_attr_megaraid_mbox_ld.attr,
NULL,
};
+ATTRIBUTE_GROUPS(megaraid_sdev);
+
/*
* Scsi host template for megaraid unified driver
*/
@@ -331,8 +334,8 @@ static struct scsi_host_template megaraid_template_g = {
.eh_host_reset_handler = megaraid_reset_handler,
.change_queue_depth = scsi_change_queue_depth,
.no_write_same = 1,
- .sdev_attrs = megaraid_sdev_attrs,
- .shost_attrs = megaraid_shost_attrs,
+ .sdev_groups = megaraid_sdev_groups,
+ .shost_groups = megaraid_shost_groups,
};
@@ -1432,15 +1435,14 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
*
* Queue entry point for mailbox based controllers.
*/
-static int
-megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
+static int megaraid_queue_command_lck(struct scsi_cmnd *scp)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
adapter_t *adapter;
scb_t *scb;
int if_busy;
adapter = SCP2ADAPTER(scp);
- scp->scsi_done = done;
scp->result = 0;
/*
@@ -2358,7 +2360,7 @@ megaraid_mbox_dpc(unsigned long devp)
megaraid_dealloc_scb(adapter, scb);
// send the scsi packet back to kernel
- scp->scsi_done(scp);
+ scsi_done(scp);
}
return;
@@ -2416,7 +2418,7 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
scb->sno, scb->dev_channel, scb->dev_target));
scp->result = (DID_ABORT << 16);
- scp->scsi_done(scp);
+ scsi_done(scp);
megaraid_dealloc_scb(adapter, scb);
@@ -2446,7 +2448,7 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
scb->dev_channel, scb->dev_target));
scp->result = (DID_ABORT << 16);
- scp->scsi_done(scp);
+ scsi_done(scp);
megaraid_dealloc_scb(adapter, scb);
@@ -2566,7 +2568,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
}
scb->scp->result = (DID_RESET << 16);
- scb->scp->scsi_done(scb->scp);
+ scsi_done(scb->scp);
megaraid_dealloc_scb(adapter, scb);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 7af2c23652b0..2c9d1b796475 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -21,8 +21,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.717.02.00-rc1"
-#define MEGASAS_RELDATE "May 19, 2021"
+#define MEGASAS_VERSION "07.719.03.00-rc1"
+#define MEGASAS_RELDATE "Sep 29, 2021"
#define MEGASAS_MSIX_NAME_LEN 32
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 39d8754e63ac..aeb95f409826 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1794,7 +1794,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (instance->unload == 1) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -1809,7 +1809,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return SCSI_MLQUEUE_HOST_BUSY;
} else {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
}
@@ -1818,7 +1818,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (!mr_device_priv_data ||
(atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -1826,7 +1826,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
ld_tgt_id = MEGASAS_TARGET_ID(scmd->device);
if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
}
@@ -1857,7 +1857,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return instance->instancet->build_and_issue_cmd(instance, scmd);
out_done:
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -2783,7 +2783,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
reset_index, reset_cmd,
reset_cmd->scmd->cmnd[0]);
- reset_cmd->scmd->scsi_done(reset_cmd->scmd);
+ scsi_done(reset_cmd->scmd);
megasas_return_cmd(instance, reset_cmd);
} else if (reset_cmd->sync_cmd) {
dev_notice(&instance->pdev->dev, "%p synch cmds"
@@ -3481,19 +3481,21 @@ static DEVICE_ATTR_RW(enable_sdev_max_qd);
static DEVICE_ATTR_RO(dump_system_regs);
static DEVICE_ATTR_RO(raid_map_id);
-static struct device_attribute *megaraid_host_attrs[] = {
- &dev_attr_fw_crash_buffer_size,
- &dev_attr_fw_crash_buffer,
- &dev_attr_fw_crash_state,
- &dev_attr_page_size,
- &dev_attr_ldio_outstanding,
- &dev_attr_fw_cmds_outstanding,
- &dev_attr_enable_sdev_max_qd,
- &dev_attr_dump_system_regs,
- &dev_attr_raid_map_id,
+static struct attribute *megaraid_host_attrs[] = {
+ &dev_attr_fw_crash_buffer_size.attr,
+ &dev_attr_fw_crash_buffer.attr,
+ &dev_attr_fw_crash_state.attr,
+ &dev_attr_page_size.attr,
+ &dev_attr_ldio_outstanding.attr,
+ &dev_attr_fw_cmds_outstanding.attr,
+ &dev_attr_enable_sdev_max_qd.attr,
+ &dev_attr_dump_system_regs.attr,
+ &dev_attr_raid_map_id.attr,
NULL,
};
+ATTRIBUTE_GROUPS(megaraid_host);
+
/*
* Scsi host template for megaraid_sas driver
*/
@@ -3510,7 +3512,7 @@ static struct scsi_host_template megasas_template = {
.eh_abort_handler = megasas_task_abort,
.eh_host_reset_handler = megasas_reset_bus_host,
.eh_timed_out = megasas_reset_timer,
- .shost_attrs = megaraid_host_attrs,
+ .shost_groups = megaraid_host_groups,
.bios_param = megasas_bios_param,
.map_queues = megasas_map_queues,
.mq_poll = megasas_blk_mq_poll,
@@ -3640,7 +3642,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
atomic_dec(&instance->fw_outstanding);
scsi_dma_unmap(cmd->scmd);
- cmd->scmd->scsi_done(cmd->scmd);
+ scsi_done(cmd->scmd);
megasas_return_cmd(instance, cmd);
break;
@@ -3686,7 +3688,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
atomic_dec(&instance->fw_outstanding);
scsi_dma_unmap(cmd->scmd);
- cmd->scmd->scsi_done(cmd->scmd);
+ scsi_done(cmd->scmd);
megasas_return_cmd(instance, cmd);
break;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 26d0cf9353dd..fc90a0a687b5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3493,11 +3493,46 @@ megasas_complete_r1_command(struct megasas_instance *instance,
megasas_return_cmd_fusion(instance, cmd);
scsi_dma_unmap(scmd_local);
megasas_sdev_busy_dec(instance, scmd_local);
- scmd_local->scsi_done(scmd_local);
+ scsi_done(scmd_local);
}
}
/**
+ * access_irq_context: Access to reply processing
+ * @irq_context: IRQ context
+ *
+ * Synchronize access to reply processing.
+ *
+ * Return: true on success, false on failure.
+ */
+static inline
+bool access_irq_context(struct megasas_irq_context *irq_context)
+{
+ if (!irq_context)
+ return true;
+
+ if (atomic_add_unless(&irq_context->in_used, 1, 1))
+ return true;
+
+ return false;
+}
+
+/**
+ * release_irq_context: Release reply processing
+ * @irq_context: IRQ context
+ *
+ * Release access of reply processing.
+ *
+ * Return: Nothing.
+ */
+static inline
+void release_irq_context(struct megasas_irq_context *irq_context)
+{
+ if (irq_context)
+ atomic_dec(&irq_context->in_used);
+}
+
+/**
* complete_cmd_fusion - Completes command
* @instance: Adapter soft state
* @MSIxIndex: MSI number
@@ -3530,6 +3565,9 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return IRQ_HANDLED;
+ if (!access_irq_context(irq_context))
+ return 0;
+
desc = fusion->reply_frames_desc[MSIxIndex] +
fusion->last_reply_idx[MSIxIndex];
@@ -3540,11 +3578,10 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
reply_descript_type = reply_desc->ReplyFlags &
MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
- if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+ release_irq_context(irq_context);
return IRQ_NONE;
-
- if (irq_context && !atomic_add_unless(&irq_context->in_used, 1, 1))
- return 0;
+ }
num_completed = 0;
@@ -3597,7 +3634,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
megasas_return_cmd_fusion(instance, cmd_fusion);
scsi_dma_unmap(scmd_local);
megasas_sdev_busy_dec(instance, scmd_local);
- scmd_local->scsi_done(scmd_local);
+ scsi_done(scmd_local);
} else /* Optimal VD - R1 FP command completion. */
megasas_complete_r1_command(instance, cmd_fusion);
break;
@@ -3660,7 +3697,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
irq_context->irq_line_enable = true;
irq_poll_sched(&irq_context->irqpoll);
}
- atomic_dec(&irq_context->in_used);
+ release_irq_context(irq_context);
return num_completed;
}
}
@@ -3679,8 +3716,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
megasas_check_and_restore_queue_depth(instance);
}
- if (irq_context)
- atomic_dec(&irq_context->in_used);
+ release_irq_context(irq_context);
return num_completed;
}
@@ -4977,7 +5013,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
atomic_dec(&instance->ldio_outstanding);
megasas_return_cmd_fusion(instance, cmd_fusion);
scsi_dma_unmap(scmd_local);
- scmd_local->scsi_done(scmd_local);
+ scsi_done(scmd_local);
}
}
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 78b72bcf58fe..ca133e0a140a 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -342,15 +342,6 @@ static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr)
}
-/*
- * Complete a SCSI command
- */
-static void mesh_completed(struct mesh_state *ms, struct scsi_cmnd *cmd)
-{
- (*cmd->scsi_done)(cmd);
-}
-
-
/* Called with meshinterrupt disabled, initialize the chipset
* and eventually do the initial bus reset. The lock must not be
* held since we can schedule.
@@ -613,7 +604,7 @@ static void mesh_done(struct mesh_state *ms, int start_next)
#endif
}
cmd->SCp.this_residual -= ms->data_ptr;
- mesh_completed(ms, cmd);
+ scsi_done(cmd);
}
if (start_next) {
out_8(&ms->mesh->sequence, SEQ_ENBRESEL);
@@ -996,7 +987,7 @@ static void handle_reset(struct mesh_state *ms)
if ((cmd = tp->current_req) != NULL) {
set_host_byte(cmd, DID_RESET);
tp->current_req = NULL;
- mesh_completed(ms, cmd);
+ scsi_done(cmd);
}
ms->tgts[tgt].sdtr_state = do_sdtr;
ms->tgts[tgt].sync_params = ASYNC_PARAMS;
@@ -1005,7 +996,7 @@ static void handle_reset(struct mesh_state *ms)
while ((cmd = ms->request_q) != NULL) {
ms->request_q = (struct scsi_cmnd *) cmd->host_scribble;
set_host_byte(cmd, DID_RESET);
- mesh_completed(ms, cmd);
+ scsi_done(cmd);
}
ms->phase = idle;
ms->msgphase = msg_none;
@@ -1630,11 +1621,10 @@ static void cmd_complete(struct mesh_state *ms)
* Called by midlayer with host locked to queue a new
* request
*/
-static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int mesh_queue_lck(struct scsi_cmnd *cmd)
{
struct mesh_state *ms;
- cmd->scsi_done = done;
cmd->host_scribble = NULL;
ms = (struct mesh_state *) cmd->device->host->hostdata;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 4a8316c6bd41..aa5d877df6f8 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -3018,11 +3018,10 @@ static const struct {
static void
mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
{
- int i = 0, bytes_wrote = 0;
+ int i = 0, bytes_written = 0;
char personality[16];
char protocol[50] = {0};
char capabilities[100] = {0};
- bool is_string_nonempty = false;
struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
switch (mrioc->facts.personality) {
@@ -3046,39 +3045,26 @@ mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
if (mrioc->facts.protocol_flags &
mpi3mr_protocols[i].protocol) {
- if (is_string_nonempty &&
- (bytes_wrote < sizeof(protocol)))
- bytes_wrote += snprintf(protocol + bytes_wrote,
- (sizeof(protocol) - bytes_wrote), ",");
-
- if (bytes_wrote < sizeof(protocol))
- bytes_wrote += snprintf(protocol + bytes_wrote,
- (sizeof(protocol) - bytes_wrote), "%s",
+ bytes_written += scnprintf(protocol + bytes_written,
+ sizeof(protocol) - bytes_written, "%s%s",
+ bytes_written ? "," : "",
mpi3mr_protocols[i].name);
- is_string_nonempty = true;
}
}
- bytes_wrote = 0;
- is_string_nonempty = false;
+ bytes_written = 0;
for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
if (mrioc->facts.protocol_flags &
mpi3mr_capabilities[i].capability) {
- if (is_string_nonempty &&
- (bytes_wrote < sizeof(capabilities)))
- bytes_wrote += snprintf(capabilities + bytes_wrote,
- (sizeof(capabilities) - bytes_wrote), ",");
-
- if (bytes_wrote < sizeof(capabilities))
- bytes_wrote += snprintf(capabilities + bytes_wrote,
- (sizeof(capabilities) - bytes_wrote), "%s",
+ bytes_written += scnprintf(capabilities + bytes_written,
+ sizeof(capabilities) - bytes_written, "%s%s",
+ bytes_written ? "," : "",
mpi3mr_capabilities[i].name);
- is_string_nonempty = true;
}
}
ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
- protocol, capabilities);
+ protocol, capabilities);
}
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 3cae8803383b..fe10f257b5a4 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -409,7 +409,7 @@ static bool mpi3mr_flush_scmd(struct request *rq,
scsi_dma_unmap(scmd);
scmd->result = DID_RESET << 16;
scsi_print_command(scmd);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
mrioc->flush_io_count++;
}
@@ -2312,7 +2312,7 @@ out_success:
}
mpi3mr_clear_scmd_priv(mrioc, scmd);
scsi_dma_unmap(scmd);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
out:
if (sense_buf)
mpi3mr_repost_sense_buf(mrioc,
@@ -3322,7 +3322,7 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
__func__);
scsi_print_command(scmd);
scmd->result = DID_OK << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return true;
}
@@ -3334,7 +3334,7 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
scmd->result = SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
0x1A, 0);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return true;
}
if (param_len != scsi_bufflen(scmd)) {
@@ -3345,7 +3345,7 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
scmd->result = SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
0x1A, 0);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return true;
}
buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
@@ -3354,7 +3354,7 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
scmd->result = SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
0x55, 0x03);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return true;
}
scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
@@ -3368,7 +3368,7 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
scmd->result = SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
0x26, 0);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
kfree(buf);
return true;
}
@@ -3438,14 +3438,14 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
sdev_priv_data = scmd->device->hostdata;
if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
goto out;
}
if (mrioc->stop_drv_processing &&
!(mpi3mr_allow_scmd_to_fw(scmd))) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
goto out;
}
@@ -3459,19 +3459,19 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
dev_handle = stgt_priv_data->dev_handle;
if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
goto out;
}
if (stgt_priv_data->dev_removed) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
goto out;
}
if (atomic_read(&stgt_priv_data->block_io)) {
if (mrioc->stop_drv_processing) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
goto out;
}
retval = SCSI_MLQUEUE_DEVICE_BUSY;
@@ -3486,7 +3486,7 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
if (host_tag == MPI3MR_HOSTTAG_INVALID) {
scmd->result = DID_ERROR << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
goto out;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index f87c0911f66a..db6a759de1e9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1939,8 +1939,8 @@ mpt3sas_config_update_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_MPI_TRIGGERS_T *mpi_tg, bool set);
/* ctl shared API */
-extern struct device_attribute *mpt3sas_host_attrs[];
-extern struct device_attribute *mpt3sas_dev_attrs[];
+extern const struct attribute_group *mpt3sas_host_groups[];
+extern const struct attribute_group *mpt3sas_dev_groups[];
void mpt3sas_ctl_init(ushort hbas_to_enumerate);
void mpt3sas_ctl_exit(ushort hbas_to_enumerate);
u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 1b79f01f03a4..05b6c6a073c3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3842,37 +3842,46 @@ enable_sdev_max_qd_store(struct device *cdev,
}
static DEVICE_ATTR_RW(enable_sdev_max_qd);
-struct device_attribute *mpt3sas_host_attrs[] = {
- &dev_attr_version_fw,
- &dev_attr_version_bios,
- &dev_attr_version_mpi,
- &dev_attr_version_product,
- &dev_attr_version_nvdata_persistent,
- &dev_attr_version_nvdata_default,
- &dev_attr_board_name,
- &dev_attr_board_assembly,
- &dev_attr_board_tracer,
- &dev_attr_io_delay,
- &dev_attr_device_delay,
- &dev_attr_logging_level,
- &dev_attr_fwfault_debug,
- &dev_attr_fw_queue_depth,
- &dev_attr_host_sas_address,
- &dev_attr_ioc_reset_count,
- &dev_attr_host_trace_buffer_size,
- &dev_attr_host_trace_buffer,
- &dev_attr_host_trace_buffer_enable,
- &dev_attr_reply_queue_count,
- &dev_attr_diag_trigger_master,
- &dev_attr_diag_trigger_event,
- &dev_attr_diag_trigger_scsi,
- &dev_attr_diag_trigger_mpi,
- &dev_attr_drv_support_bitmap,
- &dev_attr_BRM_status,
- &dev_attr_enable_sdev_max_qd,
+static struct attribute *mpt3sas_host_attrs[] = {
+ &dev_attr_version_fw.attr,
+ &dev_attr_version_bios.attr,
+ &dev_attr_version_mpi.attr,
+ &dev_attr_version_product.attr,
+ &dev_attr_version_nvdata_persistent.attr,
+ &dev_attr_version_nvdata_default.attr,
+ &dev_attr_board_name.attr,
+ &dev_attr_board_assembly.attr,
+ &dev_attr_board_tracer.attr,
+ &dev_attr_io_delay.attr,
+ &dev_attr_device_delay.attr,
+ &dev_attr_logging_level.attr,
+ &dev_attr_fwfault_debug.attr,
+ &dev_attr_fw_queue_depth.attr,
+ &dev_attr_host_sas_address.attr,
+ &dev_attr_ioc_reset_count.attr,
+ &dev_attr_host_trace_buffer_size.attr,
+ &dev_attr_host_trace_buffer.attr,
+ &dev_attr_host_trace_buffer_enable.attr,
+ &dev_attr_reply_queue_count.attr,
+ &dev_attr_diag_trigger_master.attr,
+ &dev_attr_diag_trigger_event.attr,
+ &dev_attr_diag_trigger_scsi.attr,
+ &dev_attr_diag_trigger_mpi.attr,
+ &dev_attr_drv_support_bitmap.attr,
+ &dev_attr_BRM_status.attr,
+ &dev_attr_enable_sdev_max_qd.attr,
NULL,
};
+static const struct attribute_group mpt3sas_host_attr_group = {
+ .attrs = mpt3sas_host_attrs
+};
+
+const struct attribute_group *mpt3sas_host_groups[] = {
+ &mpt3sas_host_attr_group,
+ NULL
+};
+
/* device attributes */
/**
@@ -3976,14 +3985,23 @@ sas_ncq_prio_enable_store(struct device *dev,
}
static DEVICE_ATTR_RW(sas_ncq_prio_enable);
-struct device_attribute *mpt3sas_dev_attrs[] = {
- &dev_attr_sas_address,
- &dev_attr_sas_device_handle,
- &dev_attr_sas_ncq_prio_supported,
- &dev_attr_sas_ncq_prio_enable,
+static struct attribute *mpt3sas_dev_attrs[] = {
+ &dev_attr_sas_address.attr,
+ &dev_attr_sas_device_handle.attr,
+ &dev_attr_sas_ncq_prio_supported.attr,
+ &dev_attr_sas_ncq_prio_enable.attr,
NULL,
};
+static const struct attribute_group mpt3sas_dev_attr_group = {
+ .attrs = mpt3sas_dev_attrs
+};
+
+const struct attribute_group *mpt3sas_dev_groups[] = {
+ &mpt3sas_dev_attr_group,
+ NULL
+};
+
/* file operations table for mpt3ctl device */
static const struct file_operations ctl_fops = {
.owner = THIS_MODULE,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index ad1b6c2b37a7..cee7170beae8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3314,7 +3314,7 @@ scsih_abort(struct scsi_cmnd *scmd)
sdev_printk(KERN_INFO, scmd->device,
"device been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
r = SUCCESS;
goto out;
}
@@ -3390,7 +3390,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
sdev_printk(KERN_INFO, scmd->device,
"device been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
r = SUCCESS;
goto out;
}
@@ -3470,7 +3470,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
starget_printk(KERN_INFO, starget,
"target been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
r = SUCCESS;
goto out;
}
@@ -5030,7 +5030,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
scmd->result = DID_NO_CONNECT << 16;
else
scmd->result = DID_RESET << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
}
dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
}
@@ -5142,13 +5142,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -5158,7 +5158,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
handle = sas_target_priv_data->handle;
if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -5169,7 +5169,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
} else if (sas_target_priv_data->deleted) {
/* device has been deleted */
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
} else if (sas_target_priv_data->tm_busy ||
sas_device_priv_data->block) {
@@ -5912,7 +5912,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
scsi_dma_unmap(scmd);
mpt3sas_base_free_smid(ioc, smid);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -11878,8 +11878,8 @@ static struct scsi_host_template mpt2sas_driver_template = {
.sg_tablesize = MPT2SAS_SG_DEPTH,
.max_sectors = 32767,
.cmd_per_lun = 7,
- .shost_attrs = mpt3sas_host_attrs,
- .sdev_attrs = mpt3sas_dev_attrs,
+ .shost_groups = mpt3sas_host_groups,
+ .sdev_groups = mpt3sas_dev_groups,
.track_queue_depth = 1,
.cmd_size = sizeof(struct scsiio_tracker),
};
@@ -11917,8 +11917,8 @@ static struct scsi_host_template mpt3sas_driver_template = {
.max_sectors = 32767,
.max_segment_size = 0xffffffff,
.cmd_per_lun = 7,
- .shost_attrs = mpt3sas_host_attrs,
- .sdev_attrs = mpt3sas_dev_attrs,
+ .shost_groups = mpt3sas_host_groups,
+ .sdev_groups = mpt3sas_dev_groups,
.track_queue_depth = 1,
.cmd_size = sizeof(struct scsiio_tracker),
.map_queues = scsih_map_queues,
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index f18dd9703595..dcae2d4464f9 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -25,7 +25,7 @@ static const struct mvs_chip_info mvs_chips[] = {
[chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
};
-static struct device_attribute *mvst_host_attrs[];
+static const struct attribute_group *mvst_host_groups[];
#define SOC_SAS_NUM 2
@@ -52,7 +52,7 @@ static struct scsi_host_template mvs_sht = {
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
- .shost_attrs = mvst_host_attrs,
+ .shost_groups = mvst_host_groups,
.track_queue_depth = 1,
};
@@ -773,12 +773,14 @@ static void __exit mvs_exit(void)
sas_release_transport(mvs_stt);
}
-static struct device_attribute *mvst_host_attrs[] = {
- &dev_attr_driver_version,
- &dev_attr_interrupt_coalescing,
+static struct attribute *mvst_host_attrs[] = {
+ &dev_attr_driver_version.attr,
+ &dev_attr_interrupt_coalescing.attr,
NULL,
};
+ATTRIBUTE_GROUPS(mvst_host);
+
module_init(mvs_init);
module_exit(mvs_exit);
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 4d251bf630a3..904de62c974c 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -1328,7 +1328,7 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
scsi_sg_count(scmd),
scmd->sc_data_direction);
- cmd->scmd->scsi_done(scmd);
+ scsi_done(scmd);
mvumi_return_cmd(mhba, cmd);
}
@@ -2104,7 +2104,7 @@ static int mvumi_queue_command(struct Scsi_Host *shost,
out_return_cmd:
mvumi_return_cmd(mhba, cmd);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
spin_unlock_irqrestore(shost->host_lock, irq_flags);
return 0;
}
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index a4a88323e020..2a4506a5083e 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1282,7 +1282,7 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
if (nsge > 1) {
dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
scmd->result = (DID_ERROR << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -1436,13 +1436,13 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
sdev->id, ldev_info ? ldev_info->state : 0xff);
scmd->result = (DID_BAD_TARGET << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
switch (scmd->cmnd[0]) {
case TEST_UNIT_READY:
scmd->result = (DID_OK << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case INQUIRY:
if (scmd->cmnd[1] & 1) {
@@ -1452,11 +1452,11 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
myrb_inquiry(cb, scmd);
scmd->result = (DID_OK << 16);
}
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case SYNCHRONIZE_CACHE:
scmd->result = (DID_OK << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case MODE_SENSE:
if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
@@ -1467,25 +1467,25 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
myrb_mode_sense(cb, scmd, ldev_info);
scmd->result = (DID_OK << 16);
}
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case READ_CAPACITY:
if ((scmd->cmnd[1] & 1) ||
(scmd->cmnd[8] & 1)) {
/* Illegal request, invalid field in CDB */
scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
lba = get_unaligned_be32(&scmd->cmnd[2]);
if (lba) {
/* Illegal request, invalid field in CDB */
scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
myrb_read_capacity(cb, scmd, ldev_info);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case REQUEST_SENSE:
myrb_request_sense(cb, scmd);
@@ -1499,13 +1499,13 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
/* Assume good status */
scmd->result = (DID_OK << 16);
}
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case READ_6:
if (ldev_info->state == MYRB_DEVICE_WO) {
/* Data protect, attempt to read invalid data */
scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
fallthrough;
@@ -1519,7 +1519,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
if (ldev_info->state == MYRB_DEVICE_WO) {
/* Data protect, attempt to read invalid data */
scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
fallthrough;
@@ -1533,7 +1533,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
if (ldev_info->state == MYRB_DEVICE_WO) {
/* Data protect, attempt to read invalid data */
scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
fallthrough;
@@ -1546,7 +1546,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
default:
/* Illegal request, invalid opcode */
scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -1610,7 +1610,7 @@ static int myrb_queuecommand(struct Scsi_Host *shost,
if (sdev->channel > myrb_logical_channel(shost)) {
scmd->result = (DID_BAD_TARGET << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
if (sdev->channel == myrb_logical_channel(shost))
@@ -2182,22 +2182,26 @@ static ssize_t flush_cache_store(struct device *dev,
}
static DEVICE_ATTR_WO(flush_cache);
-static struct device_attribute *myrb_sdev_attrs[] = {
- &dev_attr_rebuild,
- &dev_attr_consistency_check,
- &dev_attr_raid_state,
- &dev_attr_raid_level,
+static struct attribute *myrb_sdev_attrs[] = {
+ &dev_attr_rebuild.attr,
+ &dev_attr_consistency_check.attr,
+ &dev_attr_raid_state.attr,
+ &dev_attr_raid_level.attr,
NULL,
};
-static struct device_attribute *myrb_shost_attrs[] = {
- &dev_attr_ctlr_num,
- &dev_attr_model,
- &dev_attr_firmware,
- &dev_attr_flush_cache,
+ATTRIBUTE_GROUPS(myrb_sdev);
+
+static struct attribute *myrb_shost_attrs[] = {
+ &dev_attr_ctlr_num.attr,
+ &dev_attr_model.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_flush_cache.attr,
NULL,
};
+ATTRIBUTE_GROUPS(myrb_shost);
+
static struct scsi_host_template myrb_template = {
.module = THIS_MODULE,
.name = "DAC960",
@@ -2209,8 +2213,8 @@ static struct scsi_host_template myrb_template = {
.slave_destroy = myrb_slave_destroy,
.bios_param = myrb_biosparam,
.cmd_size = sizeof(struct myrb_cmdblk),
- .shost_attrs = myrb_shost_attrs,
- .sdev_attrs = myrb_sdev_attrs,
+ .shost_groups = myrb_shost_groups,
+ .sdev_groups = myrb_sdev_groups,
.this_id = -1,
};
@@ -2361,7 +2365,7 @@ static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
scmd->result = (DID_ERROR << 16);
break;
}
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
}
static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 07f274afd7e5..6ea323e9a2e3 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -1286,14 +1286,16 @@ static ssize_t consistency_check_store(struct device *dev,
}
static DEVICE_ATTR_RW(consistency_check);
-static struct device_attribute *myrs_sdev_attrs[] = {
- &dev_attr_consistency_check,
- &dev_attr_rebuild,
- &dev_attr_raid_state,
- &dev_attr_raid_level,
+static struct attribute *myrs_sdev_attrs[] = {
+ &dev_attr_consistency_check.attr,
+ &dev_attr_rebuild.attr,
+ &dev_attr_raid_state.attr,
+ &dev_attr_raid_level.attr,
NULL,
};
+ATTRIBUTE_GROUPS(myrs_sdev);
+
static ssize_t serial_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1510,20 +1512,22 @@ static ssize_t disable_enclosure_messages_store(struct device *dev,
}
static DEVICE_ATTR_RW(disable_enclosure_messages);
-static struct device_attribute *myrs_shost_attrs[] = {
- &dev_attr_serial,
- &dev_attr_ctlr_num,
- &dev_attr_processor,
- &dev_attr_model,
- &dev_attr_ctlr_type,
- &dev_attr_cache_size,
- &dev_attr_firmware,
- &dev_attr_discovery,
- &dev_attr_flush_cache,
- &dev_attr_disable_enclosure_messages,
+static struct attribute *myrs_shost_attrs[] = {
+ &dev_attr_serial.attr,
+ &dev_attr_ctlr_num.attr,
+ &dev_attr_processor.attr,
+ &dev_attr_model.attr,
+ &dev_attr_ctlr_type.attr,
+ &dev_attr_cache_size.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_discovery.attr,
+ &dev_attr_flush_cache.attr,
+ &dev_attr_disable_enclosure_messages.attr,
NULL,
};
+ATTRIBUTE_GROUPS(myrs_shost);
+
/*
* SCSI midlayer interface
*/
@@ -1595,14 +1599,14 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
if (!scmd->device->hostdata) {
scmd->result = (DID_NO_CONNECT << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
switch (scmd->cmnd[0]) {
case REPORT_LUNS:
scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0x0);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case MODE_SENSE:
if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
@@ -1616,7 +1620,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
myrs_mode_sense(cs, scmd, ldev_info);
scmd->result = (DID_OK << 16);
}
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
break;
@@ -1756,7 +1760,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
if (WARN_ON(!hw_sgl)) {
scsi_dma_unmap(scmd);
scmd->result = (DID_ERROR << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
@@ -1923,8 +1927,8 @@ static struct scsi_host_template myrs_template = {
.slave_configure = myrs_slave_configure,
.slave_destroy = myrs_slave_destroy,
.cmd_size = sizeof(struct myrs_cmdblk),
- .shost_attrs = myrs_shost_attrs,
- .sdev_attrs = myrs_sdev_attrs,
+ .shost_groups = myrs_shost_groups,
+ .sdev_groups = myrs_sdev_groups,
.this_id = -1,
};
@@ -2083,7 +2087,7 @@ static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
scmd->result = (DID_BAD_TARGET << 16);
else
scmd->result = (DID_OK << 16) | status;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
}
static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 2b8c6fa5e775..fc8abe05fa8f 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4003,7 +4003,7 @@ static inline void ncr_flush_done_cmds(struct scsi_cmnd *lcmd)
while (lcmd) {
cmd = lcmd;
lcmd = (struct scsi_cmnd *) cmd->host_scribble;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
@@ -7852,8 +7852,9 @@ static int ncr53c8xx_slave_configure(struct scsi_device *device)
return 0;
}
-static int ncr53c8xx_queue_command_lck (struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int ncr53c8xx_queue_command_lck(struct scsi_cmnd *cmd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb;
unsigned long flags;
int sts;
@@ -7862,7 +7863,6 @@ static int ncr53c8xx_queue_command_lck (struct scsi_cmnd *cmd, void (*done)(stru
printk("ncr53c8xx_queue_command\n");
#endif
- cmd->scsi_done = done;
cmd->host_scribble = NULL;
cmd->__data_mapped = 0;
cmd->__data_mapping = 0;
@@ -8039,11 +8039,13 @@ static struct device_attribute ncr53c8xx_revision_attr = {
.show = show_ncr53c8xx_revision,
};
-static struct device_attribute *ncr53c8xx_host_attrs[] = {
- &ncr53c8xx_revision_attr,
+static struct attribute *ncr53c8xx_host_attrs[] = {
+ &ncr53c8xx_revision_attr.attr,
NULL
};
+ATTRIBUTE_GROUPS(ncr53c8xx_host);
+
/*==========================================================
**
** Boot command line.
@@ -8085,8 +8087,8 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
if (!tpnt->name)
tpnt->name = SCSI_NCR_DRIVER_NAME;
- if (!tpnt->shost_attrs)
- tpnt->shost_attrs = ncr53c8xx_host_attrs;
+ if (!tpnt->shost_groups)
+ tpnt->shost_groups = ncr53c8xx_host_groups;
tpnt->queuecommand = ncr53c8xx_queue_command;
tpnt->slave_configure = ncr53c8xx_slave_configure;
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index bc9d29e5fdba..bd3ee3bf08ee 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -904,9 +904,9 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
return TRUE;
}
-static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
nsp32_target *target;
nsp32_lunt *cur_lunt;
@@ -945,7 +945,6 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt,
show_command(SCpnt);
- SCpnt->scsi_done = done;
data->CurrentSC = SCpnt;
SCpnt->SCp.Status = SAM_STAT_CHECK_CONDITION;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
@@ -1546,7 +1545,7 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
/*
* call scsi_done
*/
- (*SCpnt->scsi_done)(SCpnt);
+ scsi_done(SCpnt);
/*
* reset parameters
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 7c0f931e55e8..8b9e889bc306 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -178,11 +178,10 @@ static void nsp_scsi_done(struct scsi_cmnd *SCpnt)
data->CurrentSC = NULL;
- SCpnt->scsi_done(SCpnt);
+ scsi_done(SCpnt);
}
-static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt)
{
#ifdef NSP_DEBUG
/*unsigned int host_id = SCpnt->device->host->this_id;*/
@@ -197,8 +196,6 @@ static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt,
scsi_bufflen(SCpnt), scsi_sg_count(SCpnt));
//nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC);
- SCpnt->scsi_done = done;
-
if (data->CurrentSC != NULL) {
nsp_msg(KERN_DEBUG, "CurrentSC!=NULL this can't be happen");
SCpnt->result = DID_BAD_TARGET << 16;
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index a366ff1a3959..fc93d2a57e1e 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -492,7 +492,7 @@ out:
idle_out:
curSC->SCp.phase = idle;
- curSC->scsi_done(curSC);
+ scsi_done(curSC);
goto out;
}
@@ -537,8 +537,7 @@ SYM53C500_info(struct Scsi_Host *SChost)
return (info_msg);
}
-static int
-SYM53C500_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
{
int i;
int port_base = SCpnt->device->host->io_port;
@@ -556,7 +555,6 @@ SYM53C500_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
VDEB(printk("\n"));
data->current_SC = SCpnt;
- data->current_SC->scsi_done = done;
data->current_SC->SCp.phase = command_ph;
data->current_SC->SCp.Status = 0;
data->current_SC->SCp.Message = 0;
@@ -652,11 +650,13 @@ static struct device_attribute SYM53C500_pio_attr = {
.store = SYM53C500_store_pio,
};
-static struct device_attribute *SYM53C500_shost_attrs[] = {
- &SYM53C500_pio_attr,
+static struct attribute *SYM53C500_shost_attrs[] = {
+ &SYM53C500_pio_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(SYM53C500_shost);
+
/*
* scsi_host_template initializer
*/
@@ -671,7 +671,7 @@ static struct scsi_host_template sym53c500_driver_template = {
.can_queue = 1,
.this_id = 7,
.sg_tablesize = 32,
- .shost_attrs = SYM53C500_shost_attrs
+ .shost_groups = SYM53C500_shost_groups
};
static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index ec05c42e8ee6..397eb9f6a1dd 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -409,6 +409,7 @@ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
char *str = buf;
int start = 0;
u32 ib_offset = pm8001_ha->ib_offset;
+ u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128;
#define IB_MEMMAP(c) \
(*(u32 *)((u8 *)pm8001_ha-> \
memoryMap.region[ib_offset].virt_ptr + \
@@ -419,7 +420,7 @@ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
start = start + 4;
}
pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
- if (((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+ if (((pm8001_ha->evtlog_ib_offset) % queue_size) == 0)
pm8001_ha->evtlog_ib_offset = 0;
return str - buf;
@@ -445,6 +446,7 @@ static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
char *str = buf;
int start = 0;
u32 ob_offset = pm8001_ha->ob_offset;
+ u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128;
#define OB_MEMMAP(c) \
(*(u32 *)((u8 *)pm8001_ha-> \
memoryMap.region[ob_offset].virt_ptr + \
@@ -455,7 +457,7 @@ static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
start = start + 4;
}
pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
- if (((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+ if (((pm8001_ha->evtlog_ob_offset) % queue_size) == 0)
pm8001_ha->evtlog_ob_offset = 0;
return str - buf;
@@ -1000,34 +1002,42 @@ static ssize_t ctl_iop1_count_show(struct device *cdev,
}
static DEVICE_ATTR_RO(ctl_iop1_count);
-struct device_attribute *pm8001_host_attrs[] = {
- &dev_attr_interface_rev,
- &dev_attr_controller_fatal_error,
- &dev_attr_fw_version,
- &dev_attr_update_fw,
- &dev_attr_aap_log,
- &dev_attr_iop_log,
- &dev_attr_fatal_log,
- &dev_attr_non_fatal_log,
- &dev_attr_non_fatal_count,
- &dev_attr_gsm_log,
- &dev_attr_max_out_io,
- &dev_attr_max_devices,
- &dev_attr_max_sg_list,
- &dev_attr_sas_spec_support,
- &dev_attr_logging_level,
- &dev_attr_event_log_size,
- &dev_attr_host_sas_address,
- &dev_attr_bios_version,
- &dev_attr_ib_log,
- &dev_attr_ob_log,
- &dev_attr_ila_version,
- &dev_attr_inc_fw_ver,
- &dev_attr_ctl_mpi_state,
- &dev_attr_ctl_hmi_error,
- &dev_attr_ctl_raae_count,
- &dev_attr_ctl_iop0_count,
- &dev_attr_ctl_iop1_count,
+static struct attribute *pm8001_host_attrs[] = {
+ &dev_attr_interface_rev.attr,
+ &dev_attr_controller_fatal_error.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_update_fw.attr,
+ &dev_attr_aap_log.attr,
+ &dev_attr_iop_log.attr,
+ &dev_attr_fatal_log.attr,
+ &dev_attr_non_fatal_log.attr,
+ &dev_attr_non_fatal_count.attr,
+ &dev_attr_gsm_log.attr,
+ &dev_attr_max_out_io.attr,
+ &dev_attr_max_devices.attr,
+ &dev_attr_max_sg_list.attr,
+ &dev_attr_sas_spec_support.attr,
+ &dev_attr_logging_level.attr,
+ &dev_attr_event_log_size.attr,
+ &dev_attr_host_sas_address.attr,
+ &dev_attr_bios_version.attr,
+ &dev_attr_ib_log.attr,
+ &dev_attr_ob_log.attr,
+ &dev_attr_ila_version.attr,
+ &dev_attr_inc_fw_ver.attr,
+ &dev_attr_ctl_mpi_state.attr,
+ &dev_attr_ctl_hmi_error.attr,
+ &dev_attr_ctl_raae_count.attr,
+ &dev_attr_ctl_iop0_count.attr,
+ &dev_attr_ctl_iop1_count.attr,
NULL,
};
+static const struct attribute_group pm8001_host_attr_group = {
+ .attrs = pm8001_host_attrs
+};
+
+const struct attribute_group *pm8001_host_groups[] = {
+ &pm8001_host_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 63690508313b..124cb69740c6 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3169,7 +3169,7 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
* fw_control_context->usrAddr
*/
complete(pm8001_ha->nvmd_completion);
- pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n");
+ pm8001_dbg(pm8001_ha, MSG, "Get nvmd data complete!\n");
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
pm8001_tag_free(pm8001_ha, tag);
@@ -3358,6 +3358,8 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
+ phy->port = port;
+ port->port_id = port_id;
port->port_state = portstate;
phy->phy_state = PHY_STATE_LINK_UP_SPC;
pm8001_dbg(pm8001_ha, MSG,
@@ -3434,6 +3436,8 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
unsigned long flags;
pm8001_dbg(pm8001_ha, DEVIO, "HW_EVENT_SATA_PHY_UP port id = %d, phy id = %d\n",
port_id, phy_id);
+ phy->port = port;
+ port->port_id = port_id;
port->port_state = portstate;
phy->phy_state = PHY_STATE_LINK_UP_SPC;
port->port_attached = 1;
@@ -4460,6 +4464,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
struct domain_device *parent_dev = dev->parent;
+ struct pm8001_port *port = dev->port->lldd_port;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
@@ -4476,8 +4481,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
if (pm8001_dev->dev_type == SAS_SATA_DEV)
stp_sspsmp_sata = 0x00; /* stp*/
else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
- pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
- pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ dev_is_expander(pm8001_dev->dev_type))
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
if (parent_dev && dev_is_expander(parent_dev->dev_type))
@@ -4488,7 +4492,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
pm8001_dev->sas_device->linkrate : dev->port->linkrate;
payload.phyid_portid =
- cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0x0F) |
+ cpu_to_le32(((port->port_id) & 0x0F) |
((phy_id & 0x0F) << 4));
payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) |
((linkrate & 0x0F) * 0x1000000) |
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 47db7e0beae6..bed8cc125544 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -107,7 +107,7 @@ static struct scsi_host_template pm8001_sht = {
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
- .shost_attrs = pm8001_host_attrs,
+ .shost_groups = pm8001_host_groups,
.track_queue_depth = 1,
};
@@ -128,6 +128,7 @@ static struct sas_domain_function_template pm8001_transport_ops = {
.lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset,
.lldd_lu_reset = pm8001_lu_reset,
.lldd_query_task = pm8001_query_task,
+ .lldd_port_formed = pm8001_port_formed,
};
/**
@@ -1198,6 +1199,7 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
goto err_out;
/* Memory region for ccb_info*/
+ pm8001_ha->ccb_count = ccb_count;
pm8001_ha->ccb_info =
kcalloc(ccb_count, sizeof(struct pm8001_ccb_info), GFP_KERNEL);
if (!pm8001_ha->ccb_info) {
@@ -1259,6 +1261,16 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
tasklet_kill(&pm8001_ha->tasklet[j]);
#endif
scsi_host_put(pm8001_ha->shost);
+
+ for (i = 0; i < pm8001_ha->ccb_count; i++) {
+ dma_free_coherent(&pm8001_ha->pdev->dev,
+ sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
+ pm8001_ha->ccb_info[i].buf_prd,
+ pm8001_ha->ccb_info[i].ccb_dma_handle);
+ }
+ kfree(pm8001_ha->ccb_info);
+ kfree(pm8001_ha->devices);
+
pm8001_free(pm8001_ha);
kfree(sha->sas_phy);
kfree(sha->sas_port);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 32e60f0c3b14..83e73009db5c 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1355,3 +1355,18 @@ int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
tmf_task.tmf = TMF_CLEAR_TASK_SET;
return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
}
+
+void pm8001_port_formed(struct asd_sas_phy *sas_phy)
+{
+ struct sas_ha_struct *sas_ha = sas_phy->ha;
+ struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha;
+ struct pm8001_phy *phy = sas_phy->lldd_phy;
+ struct asd_sas_port *sas_port = sas_phy->port;
+ struct pm8001_port *port = phy->port;
+
+ if (!sas_port) {
+ pm8001_dbg(pm8001_ha, FAIL, "Received null port\n");
+ return;
+ }
+ sas_port->lldd_port = port;
+}
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 62d08b535a4b..83eec16d021d 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -230,6 +230,7 @@ struct pm8001_port {
u8 port_attached;
u16 wide_port_phymap;
u8 port_state;
+ u8 port_id;
struct list_head list;
};
@@ -457,6 +458,7 @@ struct outbound_queue_table {
__le32 producer_index;
u32 consumer_idx;
spinlock_t oq_lock;
+ unsigned long lock_flags;
};
struct pm8001_hba_memspace {
void __iomem *memvirtaddr;
@@ -516,6 +518,7 @@ struct pm8001_hba_info {
u32 iomb_size; /* SPC and SPCV IOMB size */
struct pm8001_device *devices;
struct pm8001_ccb_info *ccb_info;
+ u32 ccb_count;
#ifdef PM8001_USE_MSIX
int number_of_intr;/*will be used in remove()*/
char intr_drvname[PM8001_MAX_MSIX_VEC]
@@ -651,6 +654,7 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
int pm8001_I_T_nexus_reset(struct domain_device *dev);
int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
int pm8001_query_task(struct sas_task *task);
+void pm8001_port_formed(struct asd_sas_phy *sas_phy);
void pm8001_open_reject_retry(
struct pm8001_hba_info *pm8001_ha,
struct sas_task *task_to_close,
@@ -729,7 +733,7 @@ ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
int pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha);
void pm8001_free_dev(struct pm8001_device *pm8001_dev);
/* ctl shared API */
-extern struct device_attribute *pm8001_host_attrs[];
+extern const struct attribute_group *pm8001_host_groups[];
static inline void
pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
@@ -738,9 +742,7 @@ pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
{
pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
smp_mb(); /*in order to force CPU ordering*/
- spin_unlock(&pm8001_ha->lock);
task->task_done(task);
- spin_lock(&pm8001_ha->lock);
}
#endif
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 6ffe17b849ae..b9f6d83ff380 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2379,7 +2379,8 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
/*See the comments for mpi_ssp_completion */
static void
-mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
+ struct outbound_queue_table *circularQ, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
@@ -2616,7 +2617,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
@@ -2632,7 +2637,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
@@ -2656,7 +2665,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
@@ -2727,7 +2740,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_NON_OPERATIONAL);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
@@ -2747,7 +2764,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_IN_ERROR);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
@@ -2785,12 +2806,17 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
}
}
/*See the comments for mpi_ssp_completion */
-static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
+ struct outbound_queue_table *circularQ, void *piomb)
{
struct sas_task *t;
struct task_status_struct *ts;
@@ -2890,7 +2916,11 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
@@ -3002,7 +3032,11 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
}
}
@@ -3299,6 +3333,8 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
+ phy->port = port;
+ port->port_id = port_id;
port->port_state = portstate;
port->wide_port_phymap |= (1U << phy_id);
phy->phy_state = PHY_STATE_LINK_UP_SPCV;
@@ -3380,6 +3416,8 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
"port id %d, phy id %d link_rate %d portstate 0x%x\n",
port_id, phy_id, link_rate, portstate);
+ phy->port = port;
+ port->port_id = port_id;
port->port_state = portstate;
phy->phy_state = PHY_STATE_LINK_UP_SPCV;
port->port_attached = 1;
@@ -3902,7 +3940,8 @@ static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
-static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
+static void process_one_iomb(struct pm8001_hba_info *pm8001_ha,
+ struct outbound_queue_table *circularQ, void *piomb)
{
__le32 pHeader = *(__le32 *)piomb;
u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF);
@@ -3944,11 +3983,11 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
case OPC_OUB_SATA_COMP:
pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n");
- mpi_sata_completion(pm8001_ha, piomb);
+ mpi_sata_completion(pm8001_ha, circularQ, piomb);
break;
case OPC_OUB_SATA_EVENT:
pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n");
- mpi_sata_event(pm8001_ha, piomb);
+ mpi_sata_event(pm8001_ha, circularQ, piomb);
break;
case OPC_OUB_SSP_EVENT:
pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n");
@@ -4117,7 +4156,6 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
void *pMsg1 = NULL;
u8 bc;
u32 ret = MPI_IO_STATUS_FAIL;
- unsigned long flags;
u32 regval;
if (vec == (pm8001_ha->max_q_num - 1)) {
@@ -4134,7 +4172,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
}
}
circularQ = &pm8001_ha->outbnd_q_tbl[vec];
- spin_lock_irqsave(&circularQ->oq_lock, flags);
+ spin_lock_irqsave(&circularQ->oq_lock, circularQ->lock_flags);
do {
/* spurious interrupt during setup if kexec-ing and
* driver doing a doorbell access w/ the pre-kexec oq
@@ -4145,7 +4183,8 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
if (MPI_IO_STATUS_SUCCESS == ret) {
/* process the outbound message */
- process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
+ process_one_iomb(pm8001_ha, circularQ,
+ (void *)(pMsg1 - 4));
/* free the message from the outbound circular buffer */
pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
circularQ, bc);
@@ -4160,7 +4199,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
break;
}
} while (1);
- spin_unlock_irqrestore(&circularQ->oq_lock, flags);
+ spin_unlock_irqrestore(&circularQ->oq_lock, circularQ->lock_flags);
return ret;
}
@@ -4808,6 +4847,7 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
struct domain_device *parent_dev = dev->parent;
+ struct pm8001_port *port = dev->port->lldd_port;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
@@ -4825,8 +4865,7 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
if (pm8001_dev->dev_type == SAS_SATA_DEV)
stp_sspsmp_sata = 0x00; /* stp*/
else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
- pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
- pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ dev_is_expander(pm8001_dev->dev_type))
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
if (parent_dev && dev_is_expander(parent_dev->dev_type))
@@ -4840,7 +4879,7 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
pm8001_dev->sas_device->linkrate : dev->port->linkrate;
payload.phyid_portid =
- cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) |
+ cpu_to_le32(((port->port_id) & 0xFF) |
((phy_id & 0xFF) << 8));
payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) |
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index bffd9a9349e7..88046a793767 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -837,7 +837,7 @@ static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
scsi_dma_unmap(scsi_cmd);
pmcraid_return_cmd(cmd);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
}
/**
@@ -2017,7 +2017,7 @@ static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
le32_to_cpu(resp) >> 2,
cmd->ioa_cb->ioarcb.cdb[0],
scsi_cmd->result);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
} else if (cmd->cmd_done == pmcraid_internal_done ||
cmd->cmd_done == pmcraid_erp_done) {
cmd->cmd_done(cmd);
@@ -2814,7 +2814,7 @@ static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc)
if (rc == 0) {
scsi_dma_unmap(scsi_cmd);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
}
return rc;
@@ -3313,10 +3313,7 @@ static int pmcraid_copy_sglist(
* SCSI_MLQUEUE_DEVICE_BUSY if device is busy
* SCSI_MLQUEUE_HOST_BUSY if host is busy
*/
-static int pmcraid_queuecommand_lck(
- struct scsi_cmnd *scsi_cmd,
- void (*done) (struct scsi_cmnd *)
-)
+static int pmcraid_queuecommand_lck(struct scsi_cmnd *scsi_cmd)
{
struct pmcraid_instance *pinstance;
struct pmcraid_resource_entry *res;
@@ -3328,7 +3325,6 @@ static int pmcraid_queuecommand_lck(
pinstance =
(struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
- scsi_cmd->scsi_done = done;
res = scsi_cmd->device->hostdata;
scsi_cmd->result = (DID_OK << 16);
@@ -3338,7 +3334,7 @@ static int pmcraid_queuecommand_lck(
if (pinstance->ioa_state == IOA_STATE_DEAD) {
pmcraid_info("IOA is dead, but queuecommand is scheduled\n");
scsi_cmd->result = (DID_NO_CONNECT << 16);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
return 0;
}
@@ -3351,7 +3347,7 @@ static int pmcraid_queuecommand_lck(
*/
if (scsi_cmd->cmnd[0] == SYNCHRONIZE_CACHE) {
pmcraid_info("SYNC_CACHE(0x35), completing in driver itself\n");
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
return 0;
}
@@ -4097,13 +4093,14 @@ static struct device_attribute pmcraid_adapter_id_attr = {
.show = pmcraid_show_adapter_id,
};
-static struct device_attribute *pmcraid_host_attrs[] = {
- &pmcraid_log_level_attr,
- &pmcraid_driver_version_attr,
- &pmcraid_adapter_id_attr,
+static struct attribute *pmcraid_host_attrs[] = {
+ &pmcraid_log_level_attr.attr,
+ &pmcraid_driver_version_attr.attr,
+ &pmcraid_adapter_id_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(pmcraid_host);
/* host template structure for pmcraid driver */
static struct scsi_host_template pmcraid_host_template = {
@@ -4126,7 +4123,7 @@ static struct scsi_host_template pmcraid_host_template = {
.max_sectors = PMCRAID_IOA_MAX_SECTORS,
.no_write_same = 1,
.cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
- .shost_attrs = pmcraid_host_attrs,
+ .shost_groups = pmcraid_host_groups,
.proc_name = PMCRAID_DRIVER_NAME,
};
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 977315fdc254..003043de23a5 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -665,7 +665,7 @@ static void ppa_interrupt(struct work_struct *work)
dev->cur_cmd = NULL;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
@@ -786,8 +786,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
-static int ppa_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done) (struct scsi_cmnd *))
+static int ppa_queuecommand_lck(struct scsi_cmnd *cmd)
{
ppa_struct *dev = ppa_dev(cmd->device->host);
@@ -798,7 +797,6 @@ static int ppa_queuecommand_lck(struct scsi_cmnd *cmd,
dev->failed = 0;
dev->jstart = jiffies;
dev->cur_cmd = cmd;
- cmd->scsi_done = done;
cmd->result = DID_ERROR << 16; /* default return code */
cmd->SCp.phase = 0; /* bus free */
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 0f4b99d92f12..2b80cab70333 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -200,8 +200,7 @@ static int ps3rom_write_request(struct ps3_storage_device *dev,
return 0;
}
-static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct ps3rom_private *priv = shost_priv(cmd->device->host);
struct ps3_storage_device *dev = priv->dev;
@@ -209,7 +208,6 @@ static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd,
int res;
priv->curr_cmd = cmd;
- cmd->scsi_done = done;
opcode = cmd->cmnd[0];
/*
@@ -237,7 +235,7 @@ static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd,
scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0, 0);
cmd->result = res;
priv->curr_cmd = NULL;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
return 0;
@@ -321,7 +319,7 @@ static irqreturn_t ps3rom_interrupt(int irq, void *data)
done:
priv->curr_cmd = NULL;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 631a15969d21..ca987451b17e 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -498,7 +498,7 @@ extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport,
u8 cmd_type);
-extern struct device_attribute *qedf_host_attrs[];
+extern const struct attribute_group *qedf_host_groups[];
extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
index 461c0c9180c4..fdc66d294813 100644
--- a/drivers/scsi/qedf/qedf_attr.c
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -60,12 +60,21 @@ static ssize_t fka_period_show(struct device *dev,
static DEVICE_ATTR_RO(fcoe_mac);
static DEVICE_ATTR_RO(fka_period);
-struct device_attribute *qedf_host_attrs[] = {
- &dev_attr_fcoe_mac,
- &dev_attr_fka_period,
+static struct attribute *qedf_host_attrs[] = {
+ &dev_attr_fcoe_mac.attr,
+ &dev_attr_fka_period.attr,
NULL,
};
+static const struct attribute_group qedf_host_attr_group = {
+ .attrs = qedf_host_attrs
+};
+
+const struct attribute_group *qedf_host_groups[] = {
+ &qedf_host_attr_group,
+ NULL
+};
+
extern const struct qed_fcoe_ops *qed_ops;
void qedf_capture_grc_dump(struct qedf_ctx *qedf)
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index b649f835d436..99a56ca1fb16 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -947,7 +947,7 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
"Number of SG elements %d exceeds what hardware limitation of %d.\n",
num_sgs, QEDF_MAX_BDS_PER_CMD);
sc_cmd->result = DID_ERROR;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
return 0;
}
@@ -957,7 +957,7 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
"Returning DNC as unloading or stop io, flags 0x%lx.\n",
qedf->flags);
sc_cmd->result = DID_NO_CONNECT << 16;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
return 0;
}
@@ -966,7 +966,7 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
"Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
sc_cmd);
sc_cmd->result = DID_NO_CONNECT << 16;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
return 0;
}
@@ -976,7 +976,7 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
"fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
rval, rport->port_id);
sc_cmd->result = rval;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
return 0;
}
@@ -1313,7 +1313,7 @@ out:
io_req->sc_cmd = NULL;
sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
kref_put(&io_req->refcount, qedf_release_cmd);
}
@@ -1386,13 +1386,6 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
goto bad_scsi_ptr;
}
- if (!sc_cmd->scsi_done) {
- QEDF_ERR(&qedf->dbg_ctx,
- "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
- sc_cmd);
- goto bad_scsi_ptr;
- }
-
qedf_unmap_sg_list(qedf, io_req);
sc_cmd->result = result << 16;
@@ -1417,7 +1410,7 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
io_req->sc_cmd = NULL;
sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
kref_put(&io_req->refcount, qedf_release_cmd);
return;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 0da32fd3302e..1bf7a22d4948 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -986,7 +986,7 @@ static struct scsi_host_template qedf_host_template = {
.cmd_per_lun = 32,
.max_sectors = 0xffff,
.queuecommand = qedf_queuecommand,
- .shost_attrs = qedf_host_attrs,
+ .shost_groups = qedf_host_groups,
.eh_abort_handler = qedf_eh_abort,
.eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
.eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 9f8e8ef405a1..72942772b198 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -22,7 +22,7 @@ extern struct iscsi_transport qedi_iscsi_transport;
extern const struct qed_iscsi_ops *qedi_ops;
extern const struct qedi_debugfs_ops qedi_debugfs_ops[];
extern const struct file_operations qedi_dbg_fops[];
-extern struct device_attribute *qedi_shost_attrs[];
+extern const struct attribute_group *qedi_shost_groups[];
int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index c5260429c637..88aa7d8b11c9 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -58,7 +58,7 @@ struct scsi_host_template qedi_host_template = {
.max_sectors = 0xffff,
.dma_boundary = QEDI_HW_DMA_BOUNDARY,
.cmd_per_lun = 128,
- .shost_attrs = qedi_shost_attrs,
+ .shost_groups = qedi_shost_groups,
};
static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c
index be174d30eb7c..b00a7e08ef53 100644
--- a/drivers/scsi/qedi/qedi_sysfs.c
+++ b/drivers/scsi/qedi/qedi_sysfs.c
@@ -42,8 +42,17 @@ static ssize_t speed_show(struct device *dev,
static DEVICE_ATTR_RO(port_state);
static DEVICE_ATTR_RO(speed);
-struct device_attribute *qedi_shost_attrs[] = {
- &dev_attr_port_state,
- &dev_attr_speed,
+static struct attribute *qedi_shost_attrs[] = {
+ &dev_attr_port_state.attr,
+ &dev_attr_speed.attr,
+ NULL
+};
+
+static const struct attribute_group qedi_shost_attr_group = {
+ .attrs = qedi_shost_attrs
+};
+
+const struct attribute_group *qedi_shost_groups[] = {
+ &qedi_shost_attr_group,
NULL
};
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index d0b4e063bfe1..1dc56f4c89d8 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -689,15 +689,13 @@ qla1280_info(struct Scsi_Host *host)
* handling). Unfortunately, it sometimes calls the scheduler in interrupt
* context which is a big NO! NO!.
**************************************************************************/
-static int
-qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
+static int qla1280_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
struct srb *sp = (struct srb *)CMD_SP(cmd);
int status;
- cmd->scsi_done = fn;
sp->cmd = cmd;
sp->flags = 0;
sp->wait = NULL;
@@ -755,7 +753,7 @@ _qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
sp->wait = NULL;
if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
status = SUCCESS;
- (*cmd->scsi_done)(cmd);
+ scsi_done(cmd);
}
return status;
}
@@ -1277,7 +1275,7 @@ qla1280_done(struct scsi_qla_host *ha)
ha->actthreads--;
if (sp->wait == NULL)
- (*(cmd)->scsi_done)(cmd);
+ scsi_done(cmd);
else
complete(sp->wait);
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d09776b77af2..30f9545d2285 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1868,6 +1868,18 @@ qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
return strlen(buf);
}
+static const struct {
+ u16 rate;
+ char *str;
+} port_speed_str[] = {
+ { PORT_SPEED_4GB, "4" },
+ { PORT_SPEED_8GB, "8" },
+ { PORT_SPEED_16GB, "16" },
+ { PORT_SPEED_32GB, "32" },
+ { PORT_SPEED_64GB, "64" },
+ { PORT_SPEED_10GB, "10" },
+};
+
static ssize_t
qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1875,7 +1887,8 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
ssize_t rval;
- char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
+ u16 i;
+ char *speed = "Unknown";
rval = qla2x00_get_data_rate(vha);
if (rval != QLA_SUCCESS) {
@@ -1884,7 +1897,14 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
+ for (i = 0; i < ARRAY_SIZE(port_speed_str); i++) {
+ if (port_speed_str[i].rate != ha->link_data_rate)
+ continue;
+ speed = port_speed_str[i].str;
+ break;
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", speed);
}
static ssize_t
@@ -2461,72 +2481,77 @@ static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
static DEVICE_ATTR_RO(edif_doorbell);
-
-struct device_attribute *qla2x00_host_attrs[] = {
- &dev_attr_driver_version,
- &dev_attr_fw_version,
- &dev_attr_serial_num,
- &dev_attr_isp_name,
- &dev_attr_isp_id,
- &dev_attr_model_name,
- &dev_attr_model_desc,
- &dev_attr_pci_info,
- &dev_attr_link_state,
- &dev_attr_zio,
- &dev_attr_zio_timer,
- &dev_attr_beacon,
- &dev_attr_beacon_config,
- &dev_attr_optrom_bios_version,
- &dev_attr_optrom_efi_version,
- &dev_attr_optrom_fcode_version,
- &dev_attr_optrom_fw_version,
- &dev_attr_84xx_fw_version,
- &dev_attr_total_isp_aborts,
- &dev_attr_serdes_version,
- &dev_attr_mpi_version,
- &dev_attr_phy_version,
- &dev_attr_flash_block_size,
- &dev_attr_vlan_id,
- &dev_attr_vn_port_mac_address,
- &dev_attr_fabric_param,
- &dev_attr_fw_state,
- &dev_attr_optrom_gold_fw_version,
- &dev_attr_thermal_temp,
- &dev_attr_diag_requests,
- &dev_attr_diag_megabytes,
- &dev_attr_fw_dump_size,
- &dev_attr_allow_cna_fw_dump,
- &dev_attr_pep_version,
- &dev_attr_min_supported_speed,
- &dev_attr_max_supported_speed,
- &dev_attr_zio_threshold,
- &dev_attr_dif_bundle_statistics,
- &dev_attr_port_speed,
- &dev_attr_port_no,
- &dev_attr_fw_attr,
- &dev_attr_dport_diagnostics,
- &dev_attr_edif_doorbell,
- &dev_attr_mpi_pause,
- NULL, /* reserve for qlini_mode */
- NULL, /* reserve for ql2xiniexchg */
- NULL, /* reserve for ql2xexchoffld */
+static struct attribute *qla2x00_host_attrs[] = {
+ &dev_attr_driver_version.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_serial_num.attr,
+ &dev_attr_isp_name.attr,
+ &dev_attr_isp_id.attr,
+ &dev_attr_model_name.attr,
+ &dev_attr_model_desc.attr,
+ &dev_attr_pci_info.attr,
+ &dev_attr_link_state.attr,
+ &dev_attr_zio.attr,
+ &dev_attr_zio_timer.attr,
+ &dev_attr_beacon.attr,
+ &dev_attr_beacon_config.attr,
+ &dev_attr_optrom_bios_version.attr,
+ &dev_attr_optrom_efi_version.attr,
+ &dev_attr_optrom_fcode_version.attr,
+ &dev_attr_optrom_fw_version.attr,
+ &dev_attr_84xx_fw_version.attr,
+ &dev_attr_total_isp_aborts.attr,
+ &dev_attr_serdes_version.attr,
+ &dev_attr_mpi_version.attr,
+ &dev_attr_phy_version.attr,
+ &dev_attr_flash_block_size.attr,
+ &dev_attr_vlan_id.attr,
+ &dev_attr_vn_port_mac_address.attr,
+ &dev_attr_fabric_param.attr,
+ &dev_attr_fw_state.attr,
+ &dev_attr_optrom_gold_fw_version.attr,
+ &dev_attr_thermal_temp.attr,
+ &dev_attr_diag_requests.attr,
+ &dev_attr_diag_megabytes.attr,
+ &dev_attr_fw_dump_size.attr,
+ &dev_attr_allow_cna_fw_dump.attr,
+ &dev_attr_pep_version.attr,
+ &dev_attr_min_supported_speed.attr,
+ &dev_attr_max_supported_speed.attr,
+ &dev_attr_zio_threshold.attr,
+ &dev_attr_dif_bundle_statistics.attr,
+ &dev_attr_port_speed.attr,
+ &dev_attr_port_no.attr,
+ &dev_attr_fw_attr.attr,
+ &dev_attr_dport_diagnostics.attr,
+ &dev_attr_edif_doorbell.attr,
+ &dev_attr_mpi_pause.attr,
+ &dev_attr_qlini_mode.attr,
+ &dev_attr_ql2xiniexchg.attr,
+ &dev_attr_ql2xexchoffld.attr,
NULL,
};
-void qla_insert_tgt_attrs(void)
+static umode_t qla_host_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
{
- struct device_attribute **attr;
+ if (ql2x_ini_mode != QLA2XXX_INI_MODE_DUAL &&
+ (attr == &dev_attr_qlini_mode.attr ||
+ attr == &dev_attr_ql2xiniexchg.attr ||
+ attr == &dev_attr_ql2xexchoffld.attr))
+ return 0;
+ return attr->mode;
+}
- /* advance to empty slot */
- for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
- continue;
+static const struct attribute_group qla2x00_host_attr_group = {
+ .is_visible = qla_host_attr_is_visible,
+ .attrs = qla2x00_host_attrs
+};
- *attr = &dev_attr_qlini_mode;
- attr++;
- *attr = &dev_attr_ql2xiniexchg;
- attr++;
- *attr = &dev_attr_ql2xexchoffld;
-}
+const struct attribute_group *qla2x00_host_groups[] = {
+ &qla2x00_host_attr_group,
+ NULL
+};
/* Host attributes. */
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 655cf5de604b..9da8034ccad4 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2877,6 +2877,9 @@ qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_j
case QL_VND_MANAGE_HOST_PORT:
return qla2x00_manage_host_port(bsg_job);
+ case QL_VND_MBX_PASSTHRU:
+ return qla2x00_mailbox_passthru(bsg_job);
+
default:
return -ENOSYS;
}
@@ -3013,3 +3016,48 @@ done:
sp->free(sp);
return 0;
}
+
+int qla2x00_mailbox_passthru(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ int ret = -EINVAL;
+ int ptsize = sizeof(struct qla_mbx_passthru);
+ struct qla_mbx_passthru *req_data = NULL;
+ uint32_t req_data_len;
+
+ req_data_len = bsg_job->request_payload.payload_len;
+ if (req_data_len != ptsize) {
+ ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n");
+ return -EIO;
+ }
+ req_data = kzalloc(ptsize, GFP_KERNEL);
+ if (!req_data) {
+ ql_log(ql_log_warn, vha, 0xf0a4,
+ "req_data memory allocation failure.\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the request buffer in req_data */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data, ptsize);
+ ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out);
+
+ /* Copy the req_data in request buffer */
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, req_data, ptsize);
+
+ bsg_reply->reply_payload_rcv_len = ptsize;
+ if (ret == QLA_SUCCESS)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ else
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR;
+
+ bsg_job->reply_len = sizeof(*bsg_job->reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+ kfree(req_data);
+
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index dd793cf8bc1e..0f8a4c7e52a2 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -36,6 +36,7 @@
#define QL_VND_GET_HOST_STATS 0x24
#define QL_VND_GET_TGT_STATS 0x25
#define QL_VND_MANAGE_HOST_PORT 0x26
+#define QL_VND_MBX_PASSTHRU 0x2B
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -187,6 +188,12 @@ struct qla_port_param {
uint16_t speed;
} __attribute__ ((packed));
+struct qla_mbx_passthru {
+ uint16_t reserved1[2];
+ uint16_t mbx_in[32];
+ uint16_t mbx_out[32];
+ uint32_t reserved2[16];
+} __packed;
/* FRU VPD */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index be2eb75ee1a3..8924eeb9367d 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3750,6 +3750,7 @@ struct qla_qpair {
struct qla_fw_resources fwres ____cacheline_aligned;
u32 cmd_cnt;
u32 cmd_completion_cnt;
+ u32 prev_completion_cnt;
};
/* Place holder for FW buffer parameters */
@@ -4607,6 +4608,7 @@ struct qla_hw_data {
struct qla_chip_state_84xx *cs84xx;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
+ struct work_struct heartbeat_work;
struct qlfc_fw fw_buf;
/* FCP_CMND priority support */
@@ -4708,7 +4710,6 @@ struct qla_hw_data {
struct qla_hw_data_stat stat;
pci_error_state_t pci_error_state;
- u64 prev_cmd_cnt;
struct dma_pool *purex_dma_pool;
struct btree_head32 host_map;
@@ -4854,7 +4855,6 @@ typedef struct scsi_qla_host {
#define SET_ZIO_THRESHOLD_NEEDED 32
#define ISP_ABORT_TO_ROM 33
#define VPORT_DELETE 34
-#define HEARTBEAT_CHK 38
#define PROCESS_PUREX_IOCB 63
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1c3f055d41b8..3c4fa8bac88d 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -662,9 +662,13 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
+extern int qla2x00_mailbox_passthru(struct bsg_job *bsg_job);
int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt,
struct rsp_que **rsp, u8 *buf, u32 buf_len);
+int qla_mailbox_passthru(scsi_qla_host_t *vha, uint16_t *mbx_in,
+ uint16_t *mbx_out);
+
/*
* Global Function Prototypes in qla_dbg.c source file.
*/
@@ -738,8 +742,7 @@ uint qla25xx_fdmi_port_speed_currently(struct qla_hw_data *);
* Global Function Prototypes in qla_attr.c source file.
*/
struct device_attribute;
-extern struct device_attribute *qla2x00_host_attrs[];
-extern struct device_attribute *qla2x00_host_attrs_dm[];
+extern const struct attribute_group *qla2x00_host_groups[];
struct fc_function_template;
extern struct fc_function_template qla2xxx_transport_functions;
extern struct fc_function_template qla2xxx_transport_vport_functions;
@@ -753,7 +756,6 @@ extern int qla2x00_echo_test(scsi_qla_host_t *,
extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
struct qla_fcp_prio_cfg *, uint8_t);
-void qla_insert_tgt_attrs(void);
/*
* Global Function Prototypes in qla_dfs.c source file.
*/
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index ebc8fdb0b43d..28b574e20ef3 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1537,7 +1537,8 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
}
if (IS_QLA2031(ha)) {
if ((ha->pdev->subsystem_vendor == 0x103C) &&
- (ha->pdev->subsystem_device == 0x8002)) {
+ ((ha->pdev->subsystem_device == 0x8002) ||
+ (ha->pdev->subsystem_device == 0x8086))) {
speeds = FDMI_PORT_SPEED_16GB;
} else {
speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 5fc7697f0af4..908a72ebf7c2 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5335,15 +5335,14 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
"LOOP READY.\n");
ha->flags.fw_init_done = 1;
+ /*
+ * use link up to wake up app to get ready for
+ * authentication.
+ */
if (ha->flags.edif_enabled &&
- !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
- N2N_TOPO(vha->hw)) {
- /*
- * use port online to wake up app to get ready
- * for authentication
- */
- qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 0);
- }
+ !(vha->e_dbell.db_flags & EDB_ACTIVE))
+ qla2x00_post_aen_work(vha, FCH_EVT_LINKUP,
+ ha->link_data_rate);
/*
* Process any ATIO queue entries that came in
@@ -7026,12 +7025,14 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->chip_reset++;
ha->base_qpair->chip_reset = ha->chip_reset;
ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0;
+ ha->base_qpair->prev_completion_cnt = 0;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i]) {
ha->queue_pair_map[i]->chip_reset =
ha->base_qpair->chip_reset;
ha->queue_pair_map[i]->cmd_cnt =
ha->queue_pair_map[i]->cmd_completion_cnt = 0;
+ ha->base_qpair->prev_completion_cnt = 0;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7811c4952035..73a353153d33 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3236,7 +3236,7 @@ qla24xx_abort_command(srb_t *sp)
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
+ struct req_que *req;
struct qla_qpair *qpair = sp->qpair;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
@@ -7011,3 +7011,36 @@ void qla_no_op_mb(struct scsi_qla_host *vha)
"Failed %s %x\n", __func__, rval);
}
}
+
+int qla_mailbox_passthru(scsi_qla_host_t *vha,
+ uint16_t *mbx_in, uint16_t *mbx_out)
+{
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval = -EINVAL;
+
+ memset(&mc, 0, sizeof(mc));
+ /* Receiving all 32 register's contents */
+ memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t)));
+
+ mcp->out_mb = 0xFFFFFFFF;
+ mcp->in_mb = 0xFFFFFFFF;
+
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ mcp->bufp = NULL;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xf0a2,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n",
+ __func__);
+ /* passing all 32 register's contents */
+ memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t));
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 253055cf9daf..138ffdb5c92c 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -230,6 +230,8 @@ static void qla_nvme_abort_work(struct work_struct *work)
fc_port_t *fcport = sp->fcport;
struct qla_hw_data *ha = fcport->vha->hw;
int rval, abts_done_called = 1;
+ bool io_wait_for_abort_done;
+ uint32_t handle;
ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
"%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
@@ -246,12 +248,20 @@ static void qla_nvme_abort_work(struct work_struct *work)
goto out;
}
+ /*
+ * sp may not be valid after abort_command if return code is either
+ * SUCCESS or ERR_FROM_FW codes, so cache the value here.
+ */
+ io_wait_for_abort_done = ql2xabts_wait_nvme &&
+ QLA_ABTS_WAIT_ENABLED(sp);
+ handle = sp->handle;
+
rval = ha->isp_ops->abort_command(sp);
ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
"%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
__func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
- sp, sp->handle, fcport, rval);
+ sp, handle, fcport, rval);
/*
* If async tmf is enabled, the abort callback is called only on
@@ -266,7 +276,7 @@ static void qla_nvme_abort_work(struct work_struct *work)
* are waited until ABTS complete. This kref is decreased
* at qla24xx_abort_sp_done function.
*/
- if (abts_done_called && ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp))
+ if (abts_done_called && io_wait_for_abort_done)
return;
out:
/* kref_get was done before work was schedule. */
@@ -391,6 +401,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
uint16_t avail_dsds;
struct dsd64 *cur_dsd;
struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair = sp->qpair;
@@ -402,6 +413,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* Setup qpair pointers */
req = qpair->req;
+ rsp = qpair->rsp;
tot_dsds = fd->sg_cnt;
/* Acquire qpair specific lock */
@@ -563,6 +575,10 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* Set chip new ring index. */
wrt_reg_dword(req->req_q_in, req->ring_index);
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
queuing_error:
spin_unlock_irqrestore(&qpair->qp_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 836fedcea241..c3ff50ffe205 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -737,7 +737,7 @@ void qla2x00_sp_compl(srb_t *sp, int res)
sp->free(sp);
cmd->result = res;
CMD_SP(cmd) = NULL;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
if (comp)
complete(comp);
}
@@ -828,7 +828,7 @@ void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
sp->free(sp);
cmd->result = res;
CMD_SP(cmd) = NULL;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
if (comp)
complete(comp);
}
@@ -950,7 +950,7 @@ qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
qc24_fail_command:
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
@@ -1038,7 +1038,7 @@ qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
qc24_fail_command:
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
@@ -1258,6 +1258,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
uint32_t ratov_j;
struct qla_qpair *qpair;
unsigned long flags;
+ int fast_fail_status = SUCCESS;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
@@ -1266,9 +1267,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return FAILED;
}
+ /* Save any FAST_IO_FAIL value to return later if abort succeeds */
ret = fc_block_scsi_eh(cmd);
if (ret != 0)
- return ret;
+ fast_fail_status = ret;
sp = scsi_cmd_priv(cmd);
qpair = sp->qpair;
@@ -1276,7 +1278,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
vha->cmd_timeout_cnt++;
if ((sp->fcport && sp->fcport->deleted) || !qpair)
- return SUCCESS;
+ return fast_fail_status != SUCCESS ? fast_fail_status : FAILED;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
sp->comp = &comp;
@@ -1311,7 +1313,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
__func__, ha->r_a_tov/10);
ret = FAILED;
} else {
- ret = SUCCESS;
+ ret = fast_fail_status;
}
break;
default:
@@ -2794,6 +2796,16 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
return atomic_read(&vha->loop_state) == LOOP_READY;
}
+static void qla_heartbeat_work_fn(struct work_struct *work)
+{
+ struct qla_hw_data *ha = container_of(work,
+ struct qla_hw_data, heartbeat_work);
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ if (!ha->flags.mbox_busy && base_vha->flags.init_done)
+ qla_no_op_mb(base_vha);
+}
+
static void qla2x00_iocb_work_fn(struct work_struct *work)
{
struct scsi_qla_host *vha = container_of(work,
@@ -3232,6 +3244,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->transportt, sht->vendor_id);
INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
+ INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn);
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
@@ -3364,6 +3377,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+ /* Check if FW supports MQ or not for ISP25xx */
+ if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6))
+ ha->mqenable = 0;
+
if (ha->mqenable) {
bool startit = false;
@@ -7114,17 +7131,6 @@ intr_on_check:
qla2x00_lip_reset(base_vha);
}
- if (test_bit(HEARTBEAT_CHK, &base_vha->dpc_flags)) {
- /*
- * if there is a mb in progress then that's
- * enough of a check to see if fw is still ticking.
- */
- if (!ha->flags.mbox_busy && base_vha->flags.init_done)
- qla_no_op_mb(base_vha);
-
- clear_bit(HEARTBEAT_CHK, &base_vha->dpc_flags);
- }
-
ha->dpc_active = 0;
end_loop:
set_current_state(TASK_INTERRUPTIBLE);
@@ -7183,57 +7189,51 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
static bool qla_do_heartbeat(struct scsi_qla_host *vha)
{
- u64 cmd_cnt, prev_cmd_cnt;
- bool do_hb = false;
struct qla_hw_data *ha = vha->hw;
- int i;
+ u32 cmpl_cnt;
+ u16 i;
+ bool do_heartbeat = false;
- /* if cmds are still pending down in fw, then do hb */
- if (ha->base_qpair->cmd_cnt != ha->base_qpair->cmd_completion_cnt) {
- do_hb = true;
+ /*
+ * Allow do_heartbeat only if we don’t have any active interrupts,
+ * but there are still IOs outstanding with firmware.
+ */
+ cmpl_cnt = ha->base_qpair->cmd_completion_cnt;
+ if (cmpl_cnt == ha->base_qpair->prev_completion_cnt &&
+ cmpl_cnt != ha->base_qpair->cmd_cnt) {
+ do_heartbeat = true;
goto skip;
}
+ ha->base_qpair->prev_completion_cnt = cmpl_cnt;
for (i = 0; i < ha->max_qpairs; i++) {
- if (ha->queue_pair_map[i] &&
- ha->queue_pair_map[i]->cmd_cnt !=
- ha->queue_pair_map[i]->cmd_completion_cnt) {
- do_hb = true;
- break;
+ if (ha->queue_pair_map[i]) {
+ cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt;
+ if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt &&
+ cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) {
+ do_heartbeat = true;
+ break;
+ }
+ ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt;
}
}
skip:
- prev_cmd_cnt = ha->prev_cmd_cnt;
- cmd_cnt = ha->base_qpair->cmd_cnt;
- for (i = 0; i < ha->max_qpairs; i++) {
- if (ha->queue_pair_map[i])
- cmd_cnt += ha->queue_pair_map[i]->cmd_cnt;
- }
- ha->prev_cmd_cnt = cmd_cnt;
-
- if (!do_hb && ((cmd_cnt - prev_cmd_cnt) > 50))
- /*
- * IOs are completing before periodic hb check.
- * IOs seems to be running, do hb for sanity check.
- */
- do_hb = true;
-
- return do_hb;
+ return do_heartbeat;
}
static void qla_heart_beat(struct scsi_qla_host *vha)
{
+ struct qla_hw_data *ha = vha->hw;
+
if (vha->vp_idx)
return;
if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
return;
- if (qla_do_heartbeat(vha)) {
- set_bit(HEARTBEAT_CHK, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- }
+ if (qla_do_heartbeat(vha))
+ queue_work(ha->wq, &ha->heartbeat_work);
}
/**************************************************************************
@@ -7943,7 +7943,7 @@ struct scsi_host_template qla2xxx_driver_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
- .shost_attrs = qla2x00_host_attrs,
+ .shost_groups = qla2x00_host_groups,
.supported_mode = MODE_INITIATOR,
.track_queue_depth = 1,
@@ -8131,9 +8131,6 @@ qla2x00_module_init(void)
if (ql2xextended_error_logging == 1)
ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
- if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL)
- qla_insert_tgt_attrs();
-
qla2xxx_transport_template =
fc_attach_transport(&qla2xxx_transport_functions);
if (!qla2xxx_transport_template) {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 055040cbef9b..4b117165bf8b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.06.200-k"
+#define QLA2XXX_VERSION "10.02.07.100-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
-#define QLA_DRIVER_PATCH_VER 6
-#define QLA_DRIVER_BETA_VER 200
+#define QLA_DRIVER_PATCH_VER 7
+#define QLA_DRIVER_BETA_VER 100
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 03de1bcf1461..8fa0056b56dd 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -915,40 +915,17 @@ static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
/* End items for tcm_qla2xxx_tpg_attrib_cit */
-static ssize_t tcm_qla2xxx_tpg_enable_show(struct config_item *item,
- char *page)
-{
- struct se_portal_group *se_tpg = to_tpg(item);
- struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
- struct tcm_qla2xxx_tpg, se_tpg);
-
- return snprintf(page, PAGE_SIZE, "%d\n",
- atomic_read(&tpg->lport_tpg_enabled));
-}
-
-static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
- const char *page, size_t count)
+static int tcm_qla2xxx_enable_tpg(struct se_portal_group *se_tpg,
+ bool enable)
{
- struct se_portal_group *se_tpg = to_tpg(item);
struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct scsi_qla_host *vha = lport->qla_vha;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- unsigned long op;
- int rc;
- rc = kstrtoul(page, 0, &op);
- if (rc < 0) {
- pr_err("kstrtoul() returned %d\n", rc);
- return -EINVAL;
- }
- if ((op != 1) && (op != 0)) {
- pr_err("Illegal value for tpg_enable: %lu\n", op);
- return -EINVAL;
- }
- if (op) {
+ if (enable) {
if (atomic_read(&tpg->lport_tpg_enabled))
return -EEXIST;
@@ -956,14 +933,14 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
qlt_enable_vha(vha);
} else {
if (!atomic_read(&tpg->lport_tpg_enabled))
- return count;
+ return 0;
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
- return count;
+ return 0;
}
static ssize_t tcm_qla2xxx_tpg_dynamic_sessions_show(struct config_item *item,
@@ -1004,12 +981,10 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item,
return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
}
-CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable);
CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions);
CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type);
static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
- &tcm_qla2xxx_tpg_attr_enable,
&tcm_qla2xxx_tpg_attr_dynamic_sessions,
&tcm_qla2xxx_tpg_attr_fabric_prot_type,
NULL,
@@ -1083,35 +1058,17 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
kfree(tpg);
}
-static ssize_t tcm_qla2xxx_npiv_tpg_enable_show(struct config_item *item,
- char *page)
-{
- return tcm_qla2xxx_tpg_enable_show(item, page);
-}
-
-static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item,
- const char *page, size_t count)
+static int tcm_qla2xxx_npiv_enable_tpg(struct se_portal_group *se_tpg,
+ bool enable)
{
- struct se_portal_group *se_tpg = to_tpg(item);
struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct scsi_qla_host *vha = lport->qla_vha;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- unsigned long op;
- int rc;
- rc = kstrtoul(page, 0, &op);
- if (rc < 0) {
- pr_err("kstrtoul() returned %d\n", rc);
- return -EINVAL;
- }
- if ((op != 1) && (op != 0)) {
- pr_err("Illegal value for tpg_enable: %lu\n", op);
- return -EINVAL;
- }
- if (op) {
+ if (enable) {
if (atomic_read(&tpg->lport_tpg_enabled))
return -EEXIST;
@@ -1119,23 +1076,16 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item,
qlt_enable_vha(vha);
} else {
if (!atomic_read(&tpg->lport_tpg_enabled))
- return count;
+ return 0;
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
- return count;
+ return 0;
}
-CONFIGFS_ATTR(tcm_qla2xxx_npiv_tpg_, enable);
-
-static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
- &tcm_qla2xxx_npiv_tpg_attr_enable,
- NULL,
-};
-
static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn,
const char *name)
{
@@ -1878,6 +1828,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.fabric_make_wwn = tcm_qla2xxx_make_lport,
.fabric_drop_wwn = tcm_qla2xxx_drop_lport,
.fabric_make_tpg = tcm_qla2xxx_make_tpg,
+ .fabric_enable_tpg = tcm_qla2xxx_enable_tpg,
.fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
.fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl,
@@ -1918,11 +1869,11 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
.fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
.fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
+ .fabric_enable_tpg = tcm_qla2xxx_npiv_enable_tpg,
.fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
.fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl,
.tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
- .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs,
};
static int tcm_qla2xxx_register_configfs(void)
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index ec4352818fbf..abfa6ef60480 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -330,21 +330,30 @@ static DEVICE_ATTR(fw_ext_timestamp, S_IRUGO, qla4xxx_fw_ext_timestamp_show,
static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL);
static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL);
-struct device_attribute *qla4xxx_host_attrs[] = {
- &dev_attr_fw_version,
- &dev_attr_serial_num,
- &dev_attr_iscsi_version,
- &dev_attr_optrom_version,
- &dev_attr_board_id,
- &dev_attr_fw_state,
- &dev_attr_phy_port_cnt,
- &dev_attr_phy_port_num,
- &dev_attr_iscsi_func_cnt,
- &dev_attr_hba_model,
- &dev_attr_fw_timestamp,
- &dev_attr_fw_build_user,
- &dev_attr_fw_ext_timestamp,
- &dev_attr_fw_load_src,
- &dev_attr_fw_uptime,
+static struct attribute *qla4xxx_host_attrs[] = {
+ &dev_attr_fw_version.attr,
+ &dev_attr_serial_num.attr,
+ &dev_attr_iscsi_version.attr,
+ &dev_attr_optrom_version.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_fw_state.attr,
+ &dev_attr_phy_port_cnt.attr,
+ &dev_attr_phy_port_num.attr,
+ &dev_attr_iscsi_func_cnt.attr,
+ &dev_attr_hba_model.attr,
+ &dev_attr_fw_timestamp.attr,
+ &dev_attr_fw_build_user.attr,
+ &dev_attr_fw_ext_timestamp.attr,
+ &dev_attr_fw_load_src.attr,
+ &dev_attr_fw_uptime.attr,
NULL,
};
+
+static const struct attribute_group qla4xxx_host_attr_group = {
+ .attrs = qla4xxx_host_attrs
+};
+
+const struct attribute_group *qla4xxx_host_groups[] = {
+ &qla4xxx_host_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index ea60057b2e20..c0873381508d 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -286,5 +286,6 @@ extern int ql4xenablemsix;
extern int ql4xmdcapmask;
extern int ql4xenablemd;
-extern struct device_attribute *qla4xxx_host_attrs[];
+extern const struct attribute_group *qla4xxx_host_groups[];
+
#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index f1ea65c6e5f5..8987acc24dac 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -241,7 +241,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
- .shost_attrs = qla4xxx_host_attrs,
+ .shost_groups = qla4xxx_host_groups,
.host_reset = qla4xxx_host_reset,
.vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
};
@@ -4080,7 +4080,7 @@ void qla4xxx_srb_compl(struct kref *ref)
mempool_free(srb, ha->srb_mempool);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
/**
@@ -4154,7 +4154,7 @@ qc_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc_fail_command:
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 3bbe0b5545d9..30a88849a626 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -442,7 +442,7 @@ static void ql_ihandl(void *dev_id)
* If result is CHECK CONDITION done calls qcommand to request
* sense
*/
- (icmd->scsi_done) (icmd);
+ scsi_done(icmd);
}
irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id)
@@ -460,9 +460,9 @@ irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id)
* Queued command
*/
-static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done) (struct scsi_cmnd *))
+static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
set_host_byte(cmd, DID_OK);
@@ -473,7 +473,6 @@ static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd,
return 0;
}
- cmd->scsi_done = done;
/* wait for the last command's interrupt to finish */
while (priv->qlcmd != NULL) {
barrier();
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 8e7e833a36cc..57f2f4135a06 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1013,16 +1013,15 @@ static int qlogicpti_slave_configure(struct scsi_device *sdev)
*
* "This code must fly." -davem
*/
-static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *))
+static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct Scsi_Host *host = Cmnd->device->host;
struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
struct Command_Entry *cmd;
u_int out_ptr;
int in_ptr;
- Cmnd->scsi_done = done;
-
in_ptr = qpti->req_in_ptr;
cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
out_ptr = sbus_readw(qpti->qregs + MBOX4);
@@ -1214,7 +1213,7 @@ static irqreturn_t qpti_intr(int irq, void *dev_id)
struct scsi_cmnd *next;
next = (struct scsi_cmnd *) dq->host_scribble;
- dq->scsi_done(dq);
+ scsi_done(dq);
dq = next;
} while (dq != NULL);
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 291ecc33b1fe..f6af1562cba4 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -86,14 +86,6 @@ unsigned int scsi_logging_level;
EXPORT_SYMBOL(scsi_logging_level);
#endif
-/*
- * Domain for asynchronous system resume operations. It is marked 'exclusive'
- * to avoid being included in the async_synchronize_full() that is invoked by
- * dpm_resume().
- */
-ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
-EXPORT_SYMBOL(scsi_sd_pm_domain);
-
#ifdef CONFIG_SCSI_LOGGING
void scsi_log_send(struct scsi_cmnd *cmd)
{
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 40b473eea357..06e7266018c7 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1856,7 +1856,7 @@ static int resp_readcap16(struct scsi_cmnd *scp,
{
unsigned char *cmd = scp->cmnd;
unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
- int alloc_len;
+ u32 alloc_len;
alloc_len = get_unaligned_be32(cmd + 10);
/* following just in case virtual_gb changed */
@@ -1885,7 +1885,7 @@ static int resp_readcap16(struct scsi_cmnd *scp,
}
return fill_from_dev_buffer(scp, arr,
- min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
+ min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
}
#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
@@ -1896,8 +1896,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd *scp,
unsigned char *cmd = scp->cmnd;
unsigned char *arr;
int host_no = devip->sdbg_host->shost->host_no;
- int n, ret, alen, rlen;
int port_group_a, port_group_b, port_a, port_b;
+ u32 alen, n, rlen;
+ int ret;
alen = get_unaligned_be32(cmd + 6);
arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
@@ -1959,9 +1960,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd *scp,
* - The constructed command length
* - The maximum array size
*/
- rlen = min_t(int, alen, n);
+ rlen = min(alen, n);
ret = fill_from_dev_buffer(scp, arr,
- min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
+ min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
kfree(arr);
return ret;
}
@@ -4809,7 +4810,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
pr_info("bypassing scsi_done() due to aborted cmd\n");
return;
}
- scp->scsi_done(scp); /* callback to mid level */
+ scsi_done(scp); /* callback to mid level */
}
/* When high resolution timer goes off this function is called. */
@@ -5524,7 +5525,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (new_sd_dp)
kfree(sd_dp);
/* call scsi_done() from this thread */
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return 0;
}
/* otherwise reduce kt by elapsed time */
@@ -5604,7 +5605,7 @@ respond_in_thread: /* call back to mid-layer using invocation thread */
cmnd->result &= ~SDEG_RES_IMMED_MASK;
if (cmnd->result == 0 && scsi_result != 0)
cmnd->result = scsi_result;
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return 0;
}
@@ -7363,7 +7364,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
}
sd_dp->defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- scp->scsi_done(scp); /* callback to mid level */
+ scsi_done(scp); /* callback to mid level */
spin_lock_irqsave(&sqp->qc_lock, iflags);
num_entries++;
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 36870b41c888..a531336f6a0a 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,8 +50,6 @@
#include <asm/unaligned.h>
-static void scsi_eh_done(struct scsi_cmnd *scmd);
-
/*
* These should *probably* be handled by the host itself.
* Since it is allowed to sleep, it probably should.
@@ -520,7 +518,8 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
/* handler does not care. Drop down to default handling */
}
- if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
+ if (scmd->cmnd[0] == TEST_UNIT_READY &&
+ scmd->submitter != SUBMITTED_BY_SCSI_ERROR_HANDLER)
/*
* nasty: for mid-layer issued TURs, we need to return the
* actual sense data without any recovery attempt. For eh
@@ -782,7 +781,7 @@ static enum scsi_disposition scsi_eh_completed_normally(struct scsi_cmnd *scmd)
* scsi_eh_done - Completion function for error handling.
* @scmd: Cmd that is done.
*/
-static void scsi_eh_done(struct scsi_cmnd *scmd)
+void scsi_eh_done(struct scsi_cmnd *scmd)
{
struct completion *eh_action;
@@ -1082,7 +1081,7 @@ retry:
shost->eh_action = &done;
scsi_log_send(scmd);
- scmd->scsi_done = scsi_eh_done;
+ scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER;
/*
* Lock sdev->state_mutex to avoid that scsi_device_quiesce() can
@@ -1109,6 +1108,7 @@ retry:
if (rtn) {
if (timeleft > stall_for) {
scsi_eh_restore_cmnd(scmd, &ses);
+
timeleft -= stall_for;
msleep(jiffies_to_msecs(stall_for));
goto retry;
@@ -2338,11 +2338,6 @@ void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
}
EXPORT_SYMBOL(scsi_report_device_reset);
-static void
-scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
-{
-}
-
/**
* scsi_ioctl_reset: explicitly reset a host/bus/target/device
* @dev: scsi_device to operate on
@@ -2379,7 +2374,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
scsi_init_command(dev, scmd);
scmd->cmnd = scsi_req(rq)->cmd;
- scmd->scsi_done = scsi_reset_provider_done_command;
+ scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL;
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->cmd_len = 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9c2b99e12ce3..1344553afe70 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -950,7 +950,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
/*
* If there had been no error, but we have leftover bytes in the
- * requeues just queue the command up again.
+ * request just queue the command up again.
*/
if (likely(result == 0))
scsi_io_completion_reprep(cmd, q);
@@ -1530,7 +1530,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
return rtn;
done:
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
@@ -1585,8 +1585,17 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
-static void scsi_mq_done(struct scsi_cmnd *cmd)
+void scsi_done(struct scsi_cmnd *cmd)
{
+ switch (cmd->submitter) {
+ case SUBMITTED_BY_BLOCK_LAYER:
+ break;
+ case SUBMITTED_BY_SCSI_ERROR_HANDLER:
+ return scsi_eh_done(cmd);
+ case SUBMITTED_BY_SCSI_RESET_IOCTL:
+ return;
+ }
+
if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q)))
return;
if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
@@ -1594,6 +1603,7 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
trace_scsi_dispatch_cmd_done(cmd);
blk_mq_complete_request(scsi_cmd_to_rq(cmd));
}
+EXPORT_SYMBOL(scsi_done);
static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
{
@@ -1693,7 +1703,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
scsi_set_resid(cmd, 0);
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- cmd->scsi_done = scsi_mq_done;
+ cmd->submitter = SUBMITTED_BY_BLOCK_LAYER;
blk_mq_start_request(req);
reason = scsi_dispatch_cmd(cmd);
@@ -2042,8 +2052,15 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
memset(cmd, 0, sizeof(cmd));
cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
- if (sdev->use_10_for_ms) {
- if (len > 65535)
+ /*
+ * Use MODE SELECT(10) if the device asked for it or if the mode page
+ * and the mode select header cannot fit within the maximumm 255 bytes
+ * of the MODE SELECT(6) command.
+ */
+ if (sdev->use_10_for_ms ||
+ len + 4 > 255 ||
+ data->block_descriptor_length > 255) {
+ if (len > 65535 - 8)
return -EINVAL;
real_buffer = kmalloc(8 + len, GFP_KERNEL);
if (!real_buffer)
@@ -2056,15 +2073,13 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
real_buffer[3] = data->device_specific;
real_buffer[4] = data->longlba ? 0x01 : 0;
real_buffer[5] = 0;
- real_buffer[6] = data->block_descriptor_length >> 8;
- real_buffer[7] = data->block_descriptor_length;
+ put_unaligned_be16(data->block_descriptor_length,
+ &real_buffer[6]);
cmd[0] = MODE_SELECT_10;
- cmd[7] = len >> 8;
- cmd[8] = len;
+ put_unaligned_be16(len, &cmd[7]);
} else {
- if (len > 255 || data->block_descriptor_length > 255 ||
- data->longlba)
+ if (data->longlba)
return -EINVAL;
real_buffer = kmalloc(4 + len, GFP_KERNEL);
@@ -2091,7 +2106,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select);
/**
* scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
* @sdev: SCSI device to be queried
- * @dbd: set if mode sense will allow block descriptors to be returned
+ * @dbd: set to prevent mode sense from returning block descriptors
* @modepage: mode page being requested
* @buffer: request buffer (may not be smaller than eight bytes)
* @len: length of request buffer.
@@ -2126,18 +2141,18 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
sshdr = &my_sshdr;
retry:
- use_10_for_ms = sdev->use_10_for_ms;
+ use_10_for_ms = sdev->use_10_for_ms || len > 255;
if (use_10_for_ms) {
- if (len < 8)
- len = 8;
+ if (len < 8 || len > 65535)
+ return -EINVAL;
cmd[0] = MODE_SENSE_10;
- cmd[8] = len;
+ put_unaligned_be16(len, &cmd[7]);
header_length = 8;
} else {
if (len < 4)
- len = 4;
+ return -EINVAL;
cmd[0] = MODE_SENSE;
cmd[4] = len;
@@ -2161,9 +2176,15 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
(sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
/*
- * Invalid command operation code
+ * Invalid command operation code: retry using
+ * MODE SENSE(6) if this was a MODE SENSE(10)
+ * request, except if the request mode page is
+ * too large for MODE SENSE single byte
+ * allocation length field.
*/
if (use_10_for_ms) {
+ if (len > 255)
+ return -EIO;
sdev->use_10_for_ms = 0;
goto retry;
}
@@ -2187,12 +2208,11 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
data->longlba = 0;
data->block_descriptor_length = 0;
} else if (use_10_for_ms) {
- data->length = buffer[0]*256 + buffer[1] + 2;
+ data->length = get_unaligned_be16(&buffer[0]) + 2;
data->medium_type = buffer[2];
data->device_specific = buffer[3];
data->longlba = buffer[4] & 0x01;
- data->block_descriptor_length = buffer[6]*256
- + buffer[7];
+ data->block_descriptor_length = get_unaligned_be16(&buffer[6]);
} else {
data->length = buffer[0] + 1;
data->medium_type = buffer[1];
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 3717eea37ecb..b5a858c29488 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -56,9 +56,6 @@ static int scsi_dev_type_suspend(struct device *dev,
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err;
- /* flush pending in-flight resume operations, suspend is synchronous */
- async_synchronize_full_domain(&scsi_sd_pm_domain);
-
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
err = cb(dev, pm);
@@ -69,108 +66,30 @@ static int scsi_dev_type_suspend(struct device *dev,
return err;
}
-static int scsi_dev_type_resume(struct device *dev,
- int (*cb)(struct device *, const struct dev_pm_ops *))
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int err = 0;
-
- err = cb(dev, pm);
- scsi_device_resume(to_scsi_device(dev));
- dev_dbg(dev, "scsi resume: %d\n", err);
-
- if (err == 0) {
- pm_runtime_disable(dev);
- err = pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- /*
- * Forcibly set runtime PM status of request queue to "active"
- * to make sure we can again get requests from the queue
- * (see also blk_pm_peek_request()).
- *
- * The resume hook will correct runtime PM status of the disk.
- */
- if (!err && scsi_is_sdev_device(dev)) {
- struct scsi_device *sdev = to_scsi_device(dev);
-
- blk_set_runtime_active(sdev->request_queue);
- }
- }
-
- return err;
-}
-
static int
scsi_bus_suspend_common(struct device *dev,
int (*cb)(struct device *, const struct dev_pm_ops *))
{
- int err = 0;
-
- if (scsi_is_sdev_device(dev)) {
- /*
- * All the high-level SCSI drivers that implement runtime
- * PM treat runtime suspend, system suspend, and system
- * hibernate nearly identically. In all cases the requirements
- * for runtime suspension are stricter.
- */
- if (pm_runtime_suspended(dev))
- return 0;
-
- err = scsi_dev_type_suspend(dev, cb);
- }
-
- return err;
-}
-
-static void async_sdev_resume(void *dev, async_cookie_t cookie)
-{
- scsi_dev_type_resume(dev, do_scsi_resume);
-}
-
-static void async_sdev_thaw(void *dev, async_cookie_t cookie)
-{
- scsi_dev_type_resume(dev, do_scsi_thaw);
-}
+ if (!scsi_is_sdev_device(dev))
+ return 0;
-static void async_sdev_restore(void *dev, async_cookie_t cookie)
-{
- scsi_dev_type_resume(dev, do_scsi_restore);
+ return scsi_dev_type_suspend(dev, cb);
}
static int scsi_bus_resume_common(struct device *dev,
int (*cb)(struct device *, const struct dev_pm_ops *))
{
- async_func_t fn;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int err;
if (!scsi_is_sdev_device(dev))
- fn = NULL;
- else if (cb == do_scsi_resume)
- fn = async_sdev_resume;
- else if (cb == do_scsi_thaw)
- fn = async_sdev_thaw;
- else if (cb == do_scsi_restore)
- fn = async_sdev_restore;
- else
- fn = NULL;
-
- if (fn) {
- async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
-
- /*
- * If a user has disabled async probing a likely reason
- * is due to a storage enclosure that does not inject
- * staggered spin-ups. For safety, make resume
- * synchronous as well in that case.
- */
- if (strncmp(scsi_scan_type, "async", 5) != 0)
- async_synchronize_full_domain(&scsi_sd_pm_domain);
- } else {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
- return 0;
+ return 0;
+
+ err = cb(dev, pm);
+ scsi_device_resume(to_scsi_device(dev));
+ dev_dbg(dev, "scsi resume: %d\n", err);
+
+ return err;
}
static int scsi_bus_prepare(struct device *dev)
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 6d9152031a40..a278fc8948f4 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -84,6 +84,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost,
int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q);
int scsi_noretry_cmd(struct scsi_cmnd *scmd);
+void scsi_eh_done(struct scsi_cmnd *scmd);
/* scsi_lib.c */
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
@@ -116,7 +117,7 @@ extern void scsi_exit_procfs(void);
#endif /* CONFIG_PROC_FS */
/* scsi_scan.c */
-extern char scsi_scan_type[];
+void scsi_enable_async_suspend(struct device *dev);
extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, u64, enum scsi_scan_mode);
@@ -143,7 +144,7 @@ extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
extern struct bus_type scsi_bus_type;
-extern const struct attribute_group *scsi_sysfs_shost_attr_groups[];
+extern const struct attribute_group scsi_shost_attr_group;
/* scsi_netlink.c */
#ifdef CONFIG_SCSI_NETLINK
@@ -170,8 +171,6 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
#endif /* CONFIG_PM */
-extern struct async_domain scsi_sd_pm_domain;
-
/* scsi_dh.c */
#ifdef CONFIG_SCSI_DH
void scsi_dh_add_device(struct scsi_device *sdev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 2808c0cb5711..23e1c0acdeae 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -123,6 +123,22 @@ struct async_scan_data {
};
/**
+ * scsi_enable_async_suspend - Enable async suspend and resume
+ */
+void scsi_enable_async_suspend(struct device *dev)
+{
+ /*
+ * If a user has disabled async probing a likely reason is due to a
+ * storage enclosure that does not inject staggered spin-ups. For
+ * safety, make resume synchronous as well in that case.
+ */
+ if (strncmp(scsi_scan_type, "async", 5) != 0)
+ return;
+ /* Enable asynchronous suspend and resume. */
+ device_enable_async_suspend(dev);
+}
+
+/**
* scsi_complete_async_scans - Wait for asynchronous scans to complete
*
* When this function returns, any host which started scanning before
@@ -453,6 +469,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
dev->bus = &scsi_bus_type;
dev->type = &scsi_target_type;
+ scsi_enable_async_suspend(dev);
starget->id = id;
starget->channel = channel;
starget->can_queue = 0;
@@ -1901,60 +1918,3 @@ void scsi_forget_host(struct Scsi_Host *shost)
spin_unlock_irqrestore(shost->host_lock, flags);
}
-/**
- * scsi_get_host_dev - Create a scsi_device that points to the host adapter itself
- * @shost: Host that needs a scsi_device
- *
- * Lock status: None assumed.
- *
- * Returns: The scsi_device or NULL
- *
- * Notes:
- * Attach a single scsi_device to the Scsi_Host - this should
- * be made to look like a "pseudo-device" that points to the
- * HA itself.
- *
- * Note - this device is not accessible from any high-level
- * drivers (including generics), which is probably not
- * optimal. We can add hooks later to attach.
- */
-struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
-{
- struct scsi_device *sdev = NULL;
- struct scsi_target *starget;
-
- mutex_lock(&shost->scan_mutex);
- if (!scsi_host_scan_allowed(shost))
- goto out;
- starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
- if (!starget)
- goto out;
-
- sdev = scsi_alloc_sdev(starget, 0, NULL);
- if (sdev)
- sdev->borken = 0;
- else
- scsi_target_reap(starget);
- put_device(&starget->dev);
- out:
- mutex_unlock(&shost->scan_mutex);
- return sdev;
-}
-EXPORT_SYMBOL(scsi_get_host_dev);
-
-/**
- * scsi_free_host_dev - Free a scsi_device that points to the host adapter itself
- * @sdev: Host device to be freed
- *
- * Lock status: None assumed.
- *
- * Returns: Nothing
- */
-void scsi_free_host_dev(struct scsi_device *sdev)
-{
- BUG_ON(sdev->id != sdev->host->this_id);
-
- __scsi_remove_device(sdev);
-}
-EXPORT_SYMBOL(scsi_free_host_dev);
-
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index a35841b34bfd..d3d362289ecc 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -424,15 +424,10 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
NULL
};
-static struct attribute_group scsi_shost_attr_group = {
+const struct attribute_group scsi_shost_attr_group = {
.attrs = scsi_sysfs_shost_attrs,
};
-const struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
- &scsi_shost_attr_group,
- NULL
-};
-
static void scsi_device_cls_release(struct device *class_dev)
{
struct scsi_device *sdev;
@@ -1342,7 +1337,7 @@ static int scsi_target_add(struct scsi_target *starget)
**/
int scsi_sysfs_add_sdev(struct scsi_device *sdev)
{
- int error, i;
+ int error;
struct scsi_target *starget = sdev->sdev_target;
error = scsi_target_add(starget);
@@ -1395,23 +1390,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
}
}
- /* add additional host specific attributes */
- if (sdev->host->hostt->sdev_attrs) {
- for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
- error = device_create_file(&sdev->sdev_gendev,
- sdev->host->hostt->sdev_attrs[i]);
- if (error)
- return error;
- }
- }
-
- if (sdev->host->hostt->sdev_groups) {
- error = sysfs_create_groups(&sdev->sdev_gendev.kobj,
- sdev->host->hostt->sdev_groups);
- if (error)
- return error;
- }
-
scsi_autopm_put_device(sdev);
return error;
}
@@ -1451,10 +1429,6 @@ void __scsi_remove_device(struct scsi_device *sdev)
if (res != 0)
return;
- if (sdev->host->hostt->sdev_groups)
- sysfs_remove_groups(&sdev->sdev_gendev.kobj,
- sdev->host->hostt->sdev_groups);
-
if (IS_ENABLED(CONFIG_BLK_DEV_BSG) && sdev->bsg_dev)
bsg_unregister_queue(sdev->bsg_dev);
device_unregister(&sdev->sdev_dev);
@@ -1593,18 +1567,6 @@ EXPORT_SYMBOL(scsi_register_interface);
**/
int scsi_sysfs_add_host(struct Scsi_Host *shost)
{
- int error, i;
-
- /* add host specific attributes */
- if (shost->hostt->shost_attrs) {
- for (i = 0; shost->hostt->shost_attrs[i]; i++) {
- error = device_create_file(&shost->shost_dev,
- shost->hostt->shost_attrs[i]);
- if (error)
- return error;
- }
- }
-
transport_register_device(&shost->shost_gendev);
transport_configure_device(&shost->shost_gendev);
return 0;
@@ -1618,15 +1580,27 @@ static struct device_type scsi_dev_type = {
void scsi_sysfs_device_initialize(struct scsi_device *sdev)
{
+ int i, j = 0;
unsigned long flags;
struct Scsi_Host *shost = sdev->host;
+ struct scsi_host_template *hostt = shost->hostt;
struct scsi_target *starget = sdev->sdev_target;
device_initialize(&sdev->sdev_gendev);
sdev->sdev_gendev.bus = &scsi_bus_type;
sdev->sdev_gendev.type = &scsi_dev_type;
+ scsi_enable_async_suspend(&sdev->sdev_gendev);
dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu",
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
+ sdev->gendev_attr_groups[j++] = &scsi_sdev_attr_group;
+ if (hostt->sdev_groups) {
+ for (i = 0; hostt->sdev_groups[i] &&
+ j < ARRAY_SIZE(sdev->gendev_attr_groups);
+ i++, j++) {
+ sdev->gendev_attr_groups[j] = hostt->sdev_groups[i];
+ }
+ }
+ WARN_ON_ONCE(j >= ARRAY_SIZE(sdev->gendev_attr_groups));
device_initialize(&sdev->sdev_dev);
sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 4a96fb05731d..4ee578b181da 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -154,6 +154,7 @@ static struct {
{ SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
{ SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
{ SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
+ { SAS_LINK_RATE_22_5_GBPS, "22.5 Gbit" },
};
sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 252e43d7c73f..65875a598d62 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -110,7 +110,7 @@ static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
static int sd_suspend_system(struct device *);
static int sd_suspend_runtime(struct device *);
-static int sd_resume(struct device *);
+static int sd_resume_system(struct device *);
static int sd_resume_runtime(struct device *);
static void sd_rescan(struct device *);
static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
@@ -603,9 +603,9 @@ static struct class sd_disk_class = {
static const struct dev_pm_ops sd_pm_ops = {
.suspend = sd_suspend_system,
- .resume = sd_resume,
+ .resume = sd_resume_system,
.poweroff = sd_suspend_system,
- .restore = sd_resume,
+ .restore = sd_resume_system,
.runtime_suspend = sd_suspend_runtime,
.runtime_resume = sd_resume_runtime,
};
@@ -2647,6 +2647,13 @@ sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
unsigned char *buffer, int len, struct scsi_mode_data *data,
struct scsi_sense_hdr *sshdr)
{
+ /*
+ * If we must use MODE SENSE(10), make sure that the buffer length
+ * is at least 8 bytes so that the mode sense header fits.
+ */
+ if (sdkp->device->use_10_for_ms && len < 8)
+ len = 8;
+
return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
SD_TIMEOUT, sdkp->max_retries, data,
sshdr);
@@ -2825,7 +2832,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
}
- sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
+ sd_first_printk(KERN_WARNING, sdkp,
+ "No Caching mode page found\n");
goto defaults;
Page_found:
@@ -2880,7 +2888,7 @@ defaults:
"Assuming drive cache: write back\n");
sdkp->WCE = 1;
} else {
- sd_first_printk(KERN_ERR, sdkp,
+ sd_first_printk(KERN_WARNING, sdkp,
"Assuming drive cache: write through\n");
sdkp->WCE = 0;
}
@@ -3570,7 +3578,13 @@ static int sd_probe(struct device *dev)
pm_runtime_set_autosuspend_delay(dev,
sdp->host->hostt->rpm_autosuspend_delay);
}
- device_add_disk(dev, gd, NULL);
+
+ error = device_add_disk(dev, gd, NULL);
+ if (error) {
+ put_device(&sdkp->dev);
+ goto out;
+ }
+
if (sdkp->capacity)
sd_dif_config_host(sdkp);
@@ -3618,7 +3632,6 @@ static int sd_remove(struct device *dev)
sdkp = dev_get_drvdata(dev);
scsi_autopm_get_device(sdkp->device);
- async_synchronize_full_domain(&scsi_sd_pm_domain);
device_del(&sdkp->dev);
del_gendisk(sdkp->disk);
sd_shutdown(dev);
@@ -3775,6 +3788,9 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
static int sd_suspend_system(struct device *dev)
{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
return sd_suspend_common(dev, true);
}
@@ -3801,6 +3817,14 @@ static int sd_resume(struct device *dev)
return ret;
}
+static int sd_resume_system(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return sd_resume(dev);
+}
+
static int sd_resume_runtime(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 70eca203d72f..aac88ac0a0b7 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -82,9 +82,11 @@ struct pqi_ctrl_registers {
__le32 sis_product_identifier; /* B4h */
u8 reserved5[0xbc - (0xb4 + sizeof(__le32))];
__le32 sis_firmware_status; /* BCh */
- u8 reserved6[0x1000 - (0xbc + sizeof(__le32))];
+ u8 reserved6[0xcc - (0xbc + sizeof(__le32))];
+ __le32 sis_ctrl_shutdown_reason_code; /* CCh */
+ u8 reserved7[0x1000 - (0xcc + sizeof(__le32))];
__le32 sis_mailbox[8]; /* 1000h */
- u8 reserved7[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
+ u8 reserved8[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
/*
* The PQI spec states that the PQI registers should be at
* offset 0 from the PCIe BAR 0. However, we can't map
@@ -102,6 +104,21 @@ struct pqi_ctrl_registers {
#define PQI_DEVICE_REGISTERS_OFFSET 0x4000
+/* shutdown reasons for taking the controller offline */
+enum pqi_ctrl_shutdown_reason {
+ PQI_IQ_NOT_DRAINED_TIMEOUT = 1,
+ PQI_LUN_RESET_TIMEOUT = 2,
+ PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT = 3,
+ PQI_NO_HEARTBEAT = 4,
+ PQI_FIRMWARE_KERNEL_NOT_UP = 5,
+ PQI_OFA_RESPONSE_TIMEOUT = 6,
+ PQI_INVALID_REQ_ID = 7,
+ PQI_UNMATCHED_REQ_ID = 8,
+ PQI_IO_PI_OUT_OF_RANGE = 9,
+ PQI_EVENT_PI_OUT_OF_RANGE = 10,
+ PQI_UNEXPECTED_IU_TYPE = 11
+};
+
enum pqi_io_path {
RAID_PATH = 0,
AIO_PATH = 1
@@ -850,7 +867,9 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14
#define PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME 15
#define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN 16
-#define PQI_FIRMWARE_FEATURE_MAXIMUM 16
+#define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17
+#define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18
+#define PQI_FIRMWARE_FEATURE_MAXIMUM 18
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -925,19 +944,21 @@ struct report_lun_header {
#define CISS_REPORT_LOG_FLAG_QUEUE_DEPTH (1 << 5)
#define CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX (1 << 6)
-#define CISS_REPORT_PHYS_FLAG_OTHER (1 << 1)
+#define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2 0x2
+#define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4 0x4
+#define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK 0xf
-struct report_log_lun_extended_entry {
+struct report_log_lun {
u8 lunid[8];
u8 volume_id[16];
};
-struct report_log_lun_extended {
+struct report_log_lun_list {
struct report_lun_header header;
- struct report_log_lun_extended_entry lun_entries[1];
+ struct report_log_lun lun_entries[1];
};
-struct report_phys_lun_extended_entry {
+struct report_phys_lun_8byte_wwid {
u8 lunid[8];
__be64 wwid;
u8 device_type;
@@ -947,12 +968,27 @@ struct report_phys_lun_extended_entry {
u32 aio_handle;
};
+struct report_phys_lun_16byte_wwid {
+ u8 lunid[8];
+ u8 wwid[16];
+ u8 device_type;
+ u8 device_flags;
+ u8 lun_count; /* number of LUNs in a multi-LUN device */
+ u8 redundant_paths;
+ u32 aio_handle;
+};
+
/* for device_flags field of struct report_phys_lun_extended_entry */
#define CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED 0x8
-struct report_phys_lun_extended {
+struct report_phys_lun_8byte_wwid_list {
struct report_lun_header header;
- struct report_phys_lun_extended_entry lun_entries[1];
+ struct report_phys_lun_8byte_wwid lun_entries[1];
+};
+
+struct report_phys_lun_16byte_wwid_list {
+ struct report_lun_header header;
+ struct report_phys_lun_16byte_wwid lun_entries[1];
};
struct raid_map_disk_data {
@@ -1059,7 +1095,7 @@ struct pqi_scsi_dev {
int target;
int lun;
u8 scsi3addr[8];
- __be64 wwid;
+ u8 wwid[16];
u8 volume_id[16];
u8 is_physical_device : 1;
u8 is_external_raid_device : 1;
@@ -1070,6 +1106,7 @@ struct pqi_scsi_dev {
u8 keep_device : 1;
u8 volume_offline : 1;
u8 rescan : 1;
+ u8 ignore_device : 1;
bool aio_enabled; /* only valid for physical disks */
bool in_remove;
bool device_offline;
@@ -1297,6 +1334,8 @@ struct pqi_ctrl_info {
u8 raid_iu_timeout_supported : 1;
u8 tmf_iu_timeout_supported : 1;
u8 unique_wwid_in_report_phys_lun_supported : 1;
+ u8 firmware_triage_supported : 1;
+ u8 rpl_extended_format_4_5_supported : 1;
u8 enable_r1_writes : 1;
u8 enable_r5_writes : 1;
u8 enable_r6_writes : 1;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index ecb2af3f43ca..f0897d587454 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.10-020"
+#define DRIVER_VERSION "2.1.12-055"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 10
-#define DRIVER_REVISION 20
+#define DRIVER_RELEASE 12
+#define DRIVER_REVISION 55
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -54,7 +54,8 @@ MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
static void pqi_ctrl_offline_worker(struct work_struct *work);
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
static void pqi_scan_start(struct Scsi_Host *shost);
@@ -194,7 +195,7 @@ static char *pqi_raid_level_to_string(u8 raid_level)
static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
{
pqi_prep_for_scsi_done(scmd);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
}
static inline void pqi_disable_write_same(struct scsi_device *sdev)
@@ -226,7 +227,7 @@ static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
{
if (ctrl_info->controller_online)
if (!sis_is_firmware_running(ctrl_info))
- pqi_take_ctrl_offline(ctrl_info);
+ pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
}
static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
@@ -234,15 +235,46 @@ static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
}
+#define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
+#define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
+
static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
{
- return sis_read_driver_scratch(ctrl_info);
+ return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
}
static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
enum pqi_ctrl_mode mode)
{
- sis_write_driver_scratch(ctrl_info, mode);
+ u32 driver_scratch;
+
+ driver_scratch = sis_read_driver_scratch(ctrl_info);
+
+ if (mode == PQI_MODE)
+ driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
+ else
+ driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
+
+ sis_write_driver_scratch(ctrl_info, driver_scratch);
+}
+
+static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
+{
+ return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
+}
+
+static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
+{
+ u32 driver_scratch;
+
+ driver_scratch = sis_read_driver_scratch(ctrl_info);
+
+ if (is_supported)
+ driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
+ else
+ driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
+
+ sis_write_driver_scratch(ctrl_info, driver_scratch);
}
static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
@@ -523,6 +555,10 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
cdb = request->cdb;
switch (cmd) {
+ case TEST_UNIT_READY:
+ request->data_direction = SOP_READ_FLAG;
+ cdb[0] = TEST_UNIT_READY;
+ break;
case INQUIRY:
request->data_direction = SOP_READ_FLAG;
cdb[0] = INQUIRY;
@@ -536,10 +572,14 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
case CISS_REPORT_PHYS:
request->data_direction = SOP_READ_FLAG;
cdb[0] = cmd;
- if (cmd == CISS_REPORT_PHYS)
- cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
- else
+ if (cmd == CISS_REPORT_PHYS) {
+ if (ctrl_info->rpl_extended_format_4_5_supported)
+ cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
+ else
+ cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
+ } else {
cdb[1] = ctrl_info->ciss_report_log_flags;
+ }
put_unaligned_be32(cdb_length, &cdb[6]);
break;
case CISS_GET_RAID_MAP:
@@ -1096,7 +1136,64 @@ out:
static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
{
- return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer);
+ int rc;
+ unsigned int i;
+ u8 rpl_response_format;
+ u32 num_physicals;
+ size_t rpl_16byte_wwid_list_length;
+ void *rpl_list;
+ struct report_lun_header *rpl_header;
+ struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
+ struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
+
+ rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
+ if (rc)
+ return rc;
+
+ if (ctrl_info->rpl_extended_format_4_5_supported) {
+ rpl_header = rpl_list;
+ rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
+ if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
+ *buffer = rpl_list;
+ return 0;
+ } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "RPL returned unsupported data format %u\n",
+ rpl_response_format);
+ return -EINVAL;
+ } else {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "RPL returned extended format 2 instead of 4\n");
+ }
+ }
+
+ rpl_8byte_wwid_list = rpl_list;
+ num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
+ rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
+
+ rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
+ if (!rpl_16byte_wwid_list)
+ return -ENOMEM;
+
+ put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
+ &rpl_16byte_wwid_list->header.list_length);
+ rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
+
+ for (i = 0; i < num_physicals; i++) {
+ memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
+ memset(&rpl_16byte_wwid_list->lun_entries[i].wwid, 0, 8);
+ memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
+ rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
+ rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
+ rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
+ rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
+ rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
+ }
+
+ kfree(rpl_8byte_wwid_list);
+ *buffer = rpl_16byte_wwid_list;
+
+ return 0;
}
static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
@@ -1105,14 +1202,14 @@ static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void
}
static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
- struct report_phys_lun_extended **physdev_list,
- struct report_log_lun_extended **logdev_list)
+ struct report_phys_lun_16byte_wwid_list **physdev_list,
+ struct report_log_lun_list **logdev_list)
{
int rc;
size_t logdev_list_length;
size_t logdev_data_length;
- struct report_log_lun_extended *internal_logdev_list;
- struct report_log_lun_extended *logdev_data;
+ struct report_log_lun_list *internal_logdev_list;
+ struct report_log_lun_list *logdev_data;
struct report_lun_header report_lun_header;
rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
@@ -1137,7 +1234,7 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
} else {
memset(&report_lun_header, 0, sizeof(report_lun_header));
logdev_data =
- (struct report_log_lun_extended *)&report_lun_header;
+ (struct report_log_lun_list *)&report_lun_header;
logdev_list_length = 0;
}
@@ -1145,7 +1242,7 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
logdev_list_length;
internal_logdev_list = kmalloc(logdev_data_length +
- sizeof(struct report_log_lun_extended), GFP_KERNEL);
+ sizeof(struct report_log_lun), GFP_KERNEL);
if (!internal_logdev_list) {
kfree(*logdev_list);
*logdev_list = NULL;
@@ -1154,9 +1251,9 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
memcpy(internal_logdev_list, logdev_data, logdev_data_length);
memset((u8 *)internal_logdev_list + logdev_data_length, 0,
- sizeof(struct report_log_lun_extended_entry));
+ sizeof(struct report_log_lun));
put_unaligned_be32(logdev_list_length +
- sizeof(struct report_log_lun_extended_entry),
+ sizeof(struct report_log_lun),
&internal_logdev_list->header.list_length);
kfree(*logdev_list);
@@ -1543,6 +1640,85 @@ out:
return rc;
}
+/*
+ * Prevent adding drive to OS for some corner cases such as a drive
+ * undergoing a sanitize operation. Some OSes will continue to poll
+ * the drive until the sanitize completes, which can take hours,
+ * resulting in long bootup delays. Commands such as TUR, READ_CAP
+ * are allowed, but READ/WRITE cause check condition. So the OS
+ * cannot check/read the partition table.
+ * Note: devices that have completed sanitize must be re-enabled
+ * using the management utility.
+ */
+static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ u8 scsi_status;
+ int rc;
+ enum dma_data_direction dir;
+ char *buffer;
+ int buffer_length = 64;
+ size_t sense_data_length;
+ struct scsi_sense_hdr sshdr;
+ struct pqi_raid_path_request request;
+ struct pqi_raid_error_info error_info;
+ bool offline = false; /* Assume keep online */
+
+ /* Do not check controllers. */
+ if (pqi_is_hba_lunid(device->scsi3addr))
+ return false;
+
+ /* Do not check LVs. */
+ if (pqi_is_logical_device(device))
+ return false;
+
+ buffer = kmalloc(buffer_length, GFP_KERNEL);
+ if (!buffer)
+ return false; /* Assume not offline */
+
+ /* Check for SANITIZE in progress using TUR */
+ rc = pqi_build_raid_path_request(ctrl_info, &request,
+ TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
+ buffer_length, 0, &dir);
+ if (rc)
+ goto out; /* Assume not offline */
+
+ memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
+
+ if (rc)
+ goto out; /* Assume not offline */
+
+ scsi_status = error_info.status;
+ sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
+ if (sense_data_length == 0)
+ sense_data_length =
+ get_unaligned_le16(&error_info.response_data_length);
+ if (sense_data_length) {
+ if (sense_data_length > sizeof(error_info.data))
+ sense_data_length = sizeof(error_info.data);
+
+ /*
+ * Check for sanitize in progress: asc:0x04, ascq: 0x1b
+ */
+ if (scsi_status == SAM_STAT_CHECK_CONDITION &&
+ scsi_normalize_sense(error_info.data,
+ sense_data_length, &sshdr) &&
+ sshdr.sense_key == NOT_READY &&
+ sshdr.asc == 0x04 &&
+ sshdr.ascq == 0x1b) {
+ device->device_offline = true;
+ offline = true;
+ goto out; /* Keep device offline */
+ }
+ }
+
+out:
+ kfree(buffer);
+ return offline;
+}
+
static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
struct bmic_identify_physical_device *id_phys)
@@ -1693,8 +1869,6 @@ static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi
{
int rc;
- pqi_device_remove_start(device);
-
rc = pqi_device_wait_for_pending_io(ctrl_info, device,
PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
if (rc)
@@ -1708,6 +1882,8 @@ static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi
scsi_remove_device(device->sdev);
else
pqi_remove_sas_device(device);
+
+ pqi_device_remove_start(device);
}
/* Assumes the SCSI device list lock is held. */
@@ -1730,7 +1906,7 @@ static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_d
return false;
if (dev1->is_physical_device)
- return dev1->wwid == dev2->wwid;
+ return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
}
@@ -1800,7 +1976,9 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
else
count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
- " %016llx", device->sas_address);
+ " %016llx%016llx",
+ get_unaligned_be64(&device->wwid[0]),
+ get_unaligned_be64(&device->wwid[8]));
count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
" %s %.8s %.16s ",
@@ -1986,7 +2164,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
scsi_device_list_entry) {
if (device->device_gone) {
- list_del_init(&device->scsi_device_list_entry);
+ list_del(&device->scsi_device_list_entry);
list_add_tail(&device->delete_list_entry, &delete_list);
}
}
@@ -2025,15 +2203,13 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
if (device->volume_offline) {
pqi_dev_info(ctrl_info, "offline", device);
pqi_show_volume_status(ctrl_info, device);
- }
- list_del(&device->delete_list_entry);
- if (pqi_is_device_added(device)) {
- pqi_remove_device(ctrl_info, device);
} else {
- if (!device->volume_offline)
- pqi_dev_info(ctrl_info, "removed", device);
- pqi_free_device(device);
+ pqi_dev_info(ctrl_info, "removed", device);
}
+ if (pqi_is_device_added(device))
+ pqi_remove_device(ctrl_info, device);
+ list_del(&device->delete_list_entry);
+ pqi_free_device(device);
}
/*
@@ -2116,13 +2292,14 @@ static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
}
static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct report_phys_lun_extended_entry *phys_lun_ext_entry)
+ struct pqi_scsi_dev *device, struct report_phys_lun_16byte_wwid *phys_lun)
{
if (ctrl_info->unique_wwid_in_report_phys_lun_supported ||
+ ctrl_info->rpl_extended_format_4_5_supported ||
pqi_is_device_with_sas_address(device))
- device->wwid = phys_lun_ext_entry->wwid;
+ memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
else
- device->wwid = cpu_to_be64(get_unaligned_be64(&device->page_83_identifier));
+ memcpy(&device->wwid[8], device->page_83_identifier, 8);
}
static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
@@ -2130,10 +2307,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
int i;
int rc;
LIST_HEAD(new_device_list_head);
- struct report_phys_lun_extended *physdev_list = NULL;
- struct report_log_lun_extended *logdev_list = NULL;
- struct report_phys_lun_extended_entry *phys_lun_ext_entry;
- struct report_log_lun_extended_entry *log_lun_ext_entry;
+ struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
+ struct report_log_lun_list *logdev_list = NULL;
+ struct report_phys_lun_16byte_wwid *phys_lun;
+ struct report_log_lun *log_lun;
struct bmic_identify_physical_device *id_phys = NULL;
u32 num_physicals;
u32 num_logicals;
@@ -2184,10 +2361,9 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (pqi_hide_vsep) {
for (i = num_physicals - 1; i >= 0; i--) {
- phys_lun_ext_entry =
- &physdev_list->lun_entries[i];
- if (CISS_GET_DRIVE_NUMBER(phys_lun_ext_entry->lunid) == PQI_VSEP_CISS_BTL) {
- pqi_mask_device(phys_lun_ext_entry->lunid);
+ phys_lun = &physdev_list->lun_entries[i];
+ if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
+ pqi_mask_device(phys_lun->lunid);
break;
}
}
@@ -2231,16 +2407,14 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if ((!pqi_expose_ld_first && i < num_physicals) ||
(pqi_expose_ld_first && i >= num_logicals)) {
is_physical_device = true;
- phys_lun_ext_entry =
- &physdev_list->lun_entries[physical_index++];
- log_lun_ext_entry = NULL;
- scsi3addr = phys_lun_ext_entry->lunid;
+ phys_lun = &physdev_list->lun_entries[physical_index++];
+ log_lun = NULL;
+ scsi3addr = phys_lun->lunid;
} else {
is_physical_device = false;
- phys_lun_ext_entry = NULL;
- log_lun_ext_entry =
- &logdev_list->lun_entries[logical_index++];
- scsi3addr = log_lun_ext_entry->lunid;
+ phys_lun = NULL;
+ log_lun = &logdev_list->lun_entries[logical_index++];
+ scsi3addr = log_lun->lunid;
}
if (is_physical_device && pqi_skip_device(scsi3addr))
@@ -2255,7 +2429,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
device->is_physical_device = is_physical_device;
if (is_physical_device) {
- device->device_type = phys_lun_ext_entry->device_type;
+ device->device_type = phys_lun->device_type;
if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
device->is_expander_smp_device = true;
} else {
@@ -2266,6 +2440,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (!pqi_is_supported_device(device))
continue;
+ /* Do not present disks that the OS cannot fully probe */
+ if (pqi_keep_device_offline(ctrl_info, device))
+ continue;
+
/* Gather information about the device. */
rc = pqi_get_device_info(ctrl_info, device, id_phys);
if (rc == -ENOMEM) {
@@ -2276,8 +2454,9 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (rc) {
if (device->is_physical_device)
dev_warn(&ctrl_info->pci_dev->dev,
- "obtaining device info failed, skipping physical device %016llx\n",
- get_unaligned_be64(&phys_lun_ext_entry->wwid));
+ "obtaining device info failed, skipping physical device %016llx%016llx\n",
+ get_unaligned_be64(&phys_lun->wwid[0]),
+ get_unaligned_be64(&phys_lun->wwid[8]));
else
dev_warn(&ctrl_info->pci_dev->dev,
"obtaining device info failed, skipping logical device %08x%08x\n",
@@ -2290,21 +2469,21 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
pqi_assign_bus_target_lun(device);
if (device->is_physical_device) {
- pqi_set_physical_device_wwid(ctrl_info, device, phys_lun_ext_entry);
- if ((phys_lun_ext_entry->device_flags &
+ pqi_set_physical_device_wwid(ctrl_info, device, phys_lun);
+ if ((phys_lun->device_flags &
CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
- phys_lun_ext_entry->aio_handle) {
+ phys_lun->aio_handle) {
device->aio_enabled = true;
device->aio_handle =
- phys_lun_ext_entry->aio_handle;
+ phys_lun->aio_handle;
}
} else {
- memcpy(device->volume_id, log_lun_ext_entry->volume_id,
+ memcpy(device->volume_id, log_lun->volume_id,
sizeof(device->volume_id));
}
if (pqi_is_device_with_sas_address(device))
- device->sas_address = get_unaligned_be64(&device->wwid);
+ device->sas_address = get_unaligned_be64(&device->wwid[8]);
new_device_list[num_valid_devices++] = device;
}
@@ -2328,6 +2507,25 @@ out:
return rc;
}
+static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+ struct pqi_scsi_dev *next;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry) {
+ if (pqi_is_device_added(device))
+ pqi_remove_device(ctrl_info, device);
+ list_del(&device->scsi_device_list_entry);
+ pqi_free_device(device);
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int rc;
@@ -3132,9 +3330,10 @@ static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_inf
return rc;
}
-static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{
- pqi_take_ctrl_offline(ctrl_info);
+ pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
}
static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
@@ -3152,7 +3351,7 @@ static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue
while (1) {
oq_pi = readl(queue_group->oq_pi);
if (oq_pi >= ctrl_info->num_elements_per_oq) {
- pqi_invalid_response(ctrl_info);
+ pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
dev_err(&ctrl_info->pci_dev->dev,
"I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
@@ -3167,7 +3366,7 @@ static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue
request_id = get_unaligned_le16(&response->request_id);
if (request_id >= ctrl_info->max_io_slots) {
- pqi_invalid_response(ctrl_info);
+ pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
dev_err(&ctrl_info->pci_dev->dev,
"request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
@@ -3176,7 +3375,7 @@ static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue
io_request = &ctrl_info->io_request_pool[request_id];
if (atomic_read(&io_request->refcount) == 0) {
- pqi_invalid_response(ctrl_info);
+ pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
dev_err(&ctrl_info->pci_dev->dev,
"request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
request_id, oq_pi, oq_ci);
@@ -3212,7 +3411,7 @@ static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue
pqi_process_io_error(response->header.iu_type, io_request);
break;
default:
- pqi_invalid_response(ctrl_info);
+ pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
dev_err(&ctrl_info->pci_dev->dev,
"unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
response->header.iu_type, oq_pi, oq_ci);
@@ -3394,7 +3593,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
pqi_ofa_free_host_buffer(ctrl_info);
pqi_ctrl_ofa_done(ctrl_info);
pqi_ofa_ctrl_unquiesce(ctrl_info);
- pqi_take_ctrl_offline(ctrl_info);
+ pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
break;
}
}
@@ -3519,7 +3718,7 @@ static void pqi_heartbeat_timer_handler(struct timer_list *t)
dev_err(&ctrl_info->pci_dev->dev,
"no heartbeat detected - last heartbeat count: %u\n",
heartbeat_count);
- pqi_take_ctrl_offline(ctrl_info);
+ pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
return;
}
} else {
@@ -3583,7 +3782,7 @@ static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
while (1) {
oq_pi = readl(event_queue->oq_pi);
if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
- pqi_invalid_response(ctrl_info);
+ pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
dev_err(&ctrl_info->pci_dev->dev,
"event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
@@ -4079,12 +4278,12 @@ static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
while (1) {
+ msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
status = readb(&pqi_registers->function_and_status_code);
if (status == PQI_STATUS_IDLE)
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
- msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
}
/*
@@ -5749,64 +5948,91 @@ out:
return rc;
}
-static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
- struct pqi_queue_group *queue_group)
+static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
{
+ unsigned int i;
unsigned int path;
unsigned long flags;
- bool list_is_empty;
+ unsigned int queued_io_count;
+ struct pqi_queue_group *queue_group;
+ struct pqi_io_request *io_request;
- for (path = 0; path < 2; path++) {
- while (1) {
- spin_lock_irqsave(
- &queue_group->submit_lock[path], flags);
- list_is_empty =
- list_empty(&queue_group->request_list[path]);
- spin_unlock_irqrestore(
- &queue_group->submit_lock[path], flags);
- if (list_is_empty)
- break;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
- usleep_range(1000, 2000);
+ queued_io_count = 0;
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ queue_group = &ctrl_info->queue_groups[i];
+ for (path = 0; path < 2; path++) {
+ spin_lock_irqsave(&queue_group->submit_lock[path], flags);
+ list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
+ queued_io_count++;
+ spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
}
}
- return 0;
+ return queued_io_count;
}
-static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
+static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
{
- int rc;
unsigned int i;
unsigned int path;
+ unsigned int nonempty_inbound_queue_count;
struct pqi_queue_group *queue_group;
pqi_index_t iq_pi;
pqi_index_t iq_ci;
+ nonempty_inbound_queue_count = 0;
+
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
queue_group = &ctrl_info->queue_groups[i];
-
- rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
- if (rc)
- return rc;
-
for (path = 0; path < 2; path++) {
iq_pi = queue_group->iq_pi_copy[path];
+ iq_ci = readl(queue_group->iq_ci[path]);
+ if (iq_ci != iq_pi)
+ nonempty_inbound_queue_count++;
+ }
+ }
- while (1) {
- iq_ci = readl(queue_group->iq_ci[path]);
- if (iq_ci == iq_pi)
- break;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
- usleep_range(1000, 2000);
- }
+ return nonempty_inbound_queue_count;
+}
+
+#define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
+
+static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long start_jiffies;
+ unsigned long warning_timeout;
+ unsigned int queued_io_count;
+ unsigned int nonempty_inbound_queue_count;
+ bool displayed_warning;
+
+ displayed_warning = false;
+ start_jiffies = jiffies;
+ warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
+
+ while (1) {
+ queued_io_count = pqi_queued_io_count(ctrl_info);
+ nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
+ if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
+ break;
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ if (time_after(jiffies, warning_timeout)) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
+ jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
+ displayed_warning = true;
+ warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
}
+ usleep_range(1000, 2000);
}
+ if (displayed_warning)
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "queued I/O drained after waiting for %u seconds\n",
+ jiffies_to_msecs(jiffies - start_jiffies) / 1000);
+
return 0;
}
@@ -5872,7 +6098,7 @@ static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
if (pqi_ctrl_offline(ctrl_info))
return -ENXIO;
msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
- if (msecs_waiting > timeout_msecs) {
+ if (msecs_waiting >= timeout_msecs) {
dev_err(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
ctrl_info->scsi_host->host_no, device->bus, device->target,
@@ -5907,6 +6133,7 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
{
int rc;
unsigned int wait_secs;
+ int cmds_outstanding;
wait_secs = 0;
@@ -5924,11 +6151,10 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
}
wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
-
+ cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding);
dev_warn(&ctrl_info->pci_dev->dev,
- "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete\n",
- ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun,
- wait_secs);
+ "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, wait_secs, cmds_outstanding);
}
return rc;
@@ -6071,9 +6297,13 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
rphy = target_to_rphy(starget);
device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
if (device) {
- device->target = sdev_id(sdev);
- device->lun = sdev->lun;
- device->target_lun_valid = true;
+ if (device->target_lun_valid) {
+ device->ignore_device = true;
+ } else {
+ device->target = sdev_id(sdev);
+ device->lun = sdev->lun;
+ device->target_lun_valid = true;
+ }
}
} else {
device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
@@ -6110,39 +6340,25 @@ static int pqi_map_queues(struct Scsi_Host *shost)
ctrl_info->pci_dev, 0);
}
-static int pqi_slave_configure(struct scsi_device *sdev)
+static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
{
- struct pqi_scsi_dev *device;
-
- device = sdev->hostdata;
- device->devtype = sdev->type;
-
- return 0;
+ return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
}
-static void pqi_slave_destroy(struct scsi_device *sdev)
+static int pqi_slave_configure(struct scsi_device *sdev)
{
- unsigned long flags;
+ int rc = 0;
struct pqi_scsi_dev *device;
- struct pqi_ctrl_info *ctrl_info;
-
- ctrl_info = shost_to_hba(sdev->host);
-
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
- if (device) {
- sdev->hostdata = NULL;
- if (!list_empty(&device->scsi_device_list_entry))
- list_del(&device->scsi_device_list_entry);
- }
-
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ device->devtype = sdev->type;
- if (device) {
- pqi_dev_info(ctrl_info, "removed", device);
- pqi_free_device(device);
+ if (pqi_is_tape_changer_device(device) && device->ignore_device) {
+ rc = -ENXIO;
+ device->ignore_device = false;
}
+
+ return rc;
}
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
@@ -6631,20 +6847,22 @@ static DEVICE_ATTR(enable_r5_writes, 0644,
static DEVICE_ATTR(enable_r6_writes, 0644,
pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
-static struct device_attribute *pqi_shost_attrs[] = {
- &dev_attr_driver_version,
- &dev_attr_firmware_version,
- &dev_attr_model,
- &dev_attr_serial_number,
- &dev_attr_vendor,
- &dev_attr_rescan,
- &dev_attr_lockup_action,
- &dev_attr_enable_stream_detection,
- &dev_attr_enable_r5_writes,
- &dev_attr_enable_r6_writes,
+static struct attribute *pqi_shost_attrs[] = {
+ &dev_attr_driver_version.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_model.attr,
+ &dev_attr_serial_number.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_rescan.attr,
+ &dev_attr_lockup_action.attr,
+ &dev_attr_enable_stream_detection.attr,
+ &dev_attr_enable_r5_writes.attr,
+ &dev_attr_enable_r6_writes.attr,
NULL
};
+ATTRIBUTE_GROUPS(pqi_shost);
+
static ssize_t pqi_unique_id_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
@@ -6665,12 +6883,10 @@ static ssize_t pqi_unique_id_show(struct device *dev,
return -ENODEV;
}
- if (device->is_physical_device) {
- memset(unique_id, 0, 8);
- memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
- } else {
+ if (device->is_physical_device)
+ memcpy(unique_id, device->wwid, sizeof(device->wwid));
+ else
memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
- }
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -6915,17 +7131,19 @@ static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show
static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
-static struct device_attribute *pqi_sdev_attrs[] = {
- &dev_attr_lunid,
- &dev_attr_unique_id,
- &dev_attr_path_info,
- &dev_attr_sas_address,
- &dev_attr_ssd_smart_path_enabled,
- &dev_attr_raid_level,
- &dev_attr_raid_bypass_cnt,
+static struct attribute *pqi_sdev_attrs[] = {
+ &dev_attr_lunid.attr,
+ &dev_attr_unique_id.attr,
+ &dev_attr_path_info.attr,
+ &dev_attr_sas_address.attr,
+ &dev_attr_ssd_smart_path_enabled.attr,
+ &dev_attr_raid_level.attr,
+ &dev_attr_raid_bypass_cnt.attr,
NULL
};
+ATTRIBUTE_GROUPS(pqi_sdev);
+
static struct scsi_host_template pqi_driver_template = {
.module = THIS_MODULE,
.name = DRIVER_NAME_SHORT,
@@ -6938,10 +7156,9 @@ static struct scsi_host_template pqi_driver_template = {
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
.slave_configure = pqi_slave_configure,
- .slave_destroy = pqi_slave_destroy,
.map_queues = pqi_map_queues,
- .sdev_attrs = pqi_sdev_attrs,
- .shost_attrs = pqi_shost_attrs,
+ .sdev_groups = pqi_sdev_groups,
+ .shost_groups = pqi_shost_groups,
};
static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
@@ -7301,6 +7518,13 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
ctrl_info->unique_wwid_in_report_phys_lun_supported =
firmware_feature->enabled;
break;
+ case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
+ ctrl_info->firmware_triage_supported = firmware_feature->enabled;
+ pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
+ break;
+ case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
+ ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
+ break;
}
pqi_firmware_feature_status(ctrl_info, firmware_feature);
@@ -7396,6 +7620,16 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN,
.feature_status = pqi_ctrl_update_feature_flags,
},
+ {
+ .feature_name = "Firmware Triage",
+ .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RPL Extended Formats 4 and 5",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
};
static void pqi_process_firmware_features(
@@ -7496,6 +7730,8 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
ctrl_info->raid_iu_timeout_supported = false;
ctrl_info->tmf_iu_timeout_supported = false;
ctrl_info->unique_wwid_in_report_phys_lun_supported = false;
+ ctrl_info->firmware_triage_supported = false;
+ ctrl_info->rpl_extended_format_4_5_supported = false;
}
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
@@ -7627,6 +7863,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
u32 product_id;
if (reset_devices) {
+ if (pqi_is_fw_triage_supported(ctrl_info)) {
+ rc = sis_wait_for_fw_triage_completion(ctrl_info);
+ if (rc)
+ return rc;
+ }
sis_soft_reset(ctrl_info);
msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ);
} else {
@@ -8169,6 +8410,7 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
{
pqi_cancel_rescan_worker(ctrl_info);
pqi_cancel_update_time_worker(ctrl_info);
+ pqi_remove_all_scsi_devices(ctrl_info);
pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info);
@@ -8390,6 +8632,7 @@ static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
unsigned int i;
struct pqi_io_request *io_request;
struct scsi_cmnd *scmd;
+ struct scsi_device *sdev;
for (i = 0; i < ctrl_info->max_io_slots; i++) {
io_request = &ctrl_info->io_request_pool[i];
@@ -8398,7 +8641,13 @@ static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
scmd = io_request->scmd;
if (scmd) {
- set_host_byte(scmd, DID_NO_CONNECT);
+ sdev = scmd->device;
+ if (!sdev || !scsi_device_online(sdev)) {
+ pqi_free_io_request(io_request);
+ continue;
+ } else {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ }
} else {
io_request->status = -ENXIO;
io_request->error_info =
@@ -8430,7 +8679,8 @@ static void pqi_ctrl_offline_worker(struct work_struct *work)
pqi_take_ctrl_offline_deferred(ctrl_info);
}
-static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{
if (!ctrl_info->controller_online)
return;
@@ -8439,7 +8689,7 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
ctrl_info->pqi_mode_enabled = false;
pqi_ctrl_block_requests(ctrl_info);
if (!pqi_disable_ctrl_shutdown)
- sis_shutdown_ctrl(ctrl_info);
+ sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
pci_disable_device(ctrl_info->pci_dev);
dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
schedule_work(&ctrl_info->ctrl_offline_work);
@@ -9043,6 +9293,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
},
{
@@ -9275,6 +9529,8 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_firmware_status) != 0xbc);
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_ctrl_shutdown_reason_code) != 0xcc);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_mailbox) != 0x1000);
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
pqi_registers) != 0x4000);
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index afd9bafebd1d..dea4ebaf1677 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -343,7 +343,7 @@ static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
}
if (found_device->devtype == TYPE_ENCLOSURE) {
- *identifier = get_unaligned_be64(&found_device->wwid);
+ *identifier = get_unaligned_be64(&found_device->wwid[8]);
rc = 0;
goto out;
}
@@ -364,7 +364,7 @@ static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
memcmp(device->phys_connector,
found_device->phys_connector, 2) == 0) {
*identifier =
- get_unaligned_be64(&device->wwid);
+ get_unaligned_be64(&device->wwid[8]);
rc = 0;
goto out;
}
@@ -380,7 +380,7 @@ static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
if (device->devtype == TYPE_ENCLOSURE &&
CISS_GET_DRIVE_NUMBER(device->scsi3addr) ==
PQI_VSEP_CISS_BTL) {
- *identifier = get_unaligned_be64(&device->wwid);
+ *identifier = get_unaligned_be64(&device->wwid[8]);
rc = 0;
goto out;
}
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index d63c46a8e38b..d66eb8ea161c 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -51,12 +51,20 @@
#define SIS_BASE_STRUCT_REVISION 9
#define SIS_BASE_STRUCT_ALIGNMENT 16
+#define SIS_CTRL_KERNEL_FW_TRIAGE 0x3
#define SIS_CTRL_KERNEL_UP 0x80
#define SIS_CTRL_KERNEL_PANIC 0x100
#define SIS_CTRL_READY_TIMEOUT_SECS 180
#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90
#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
+enum sis_fw_triage_status {
+ FW_TRIAGE_NOT_STARTED = 0,
+ FW_TRIAGE_STARTED,
+ FW_TRIAGE_COND_INVALID,
+ FW_TRIAGE_COMPLETED
+};
+
#pragma pack(1)
/* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */
@@ -389,14 +397,17 @@ void sis_enable_intx(struct pqi_ctrl_info *ctrl_info)
sis_set_doorbell_bit(ctrl_info, SIS_ENABLE_INTX);
}
-void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info)
+void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{
if (readl(&ctrl_info->registers->sis_firmware_status) &
SIS_CTRL_KERNEL_PANIC)
return;
- writel(SIS_TRIGGER_SHUTDOWN,
- &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ if (ctrl_info->firmware_triage_supported)
+ writel(ctrl_shutdown_reason, &ctrl_info->registers->sis_ctrl_shutdown_reason_code);
+
+ writel(SIS_TRIGGER_SHUTDOWN, &ctrl_info->registers->sis_host_to_ctrl_doorbell);
}
int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info)
@@ -419,12 +430,55 @@ u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info)
return readl(&ctrl_info->registers->sis_driver_scratch);
}
+static inline enum sis_fw_triage_status
+ sis_read_firmware_triage_status(struct pqi_ctrl_info *ctrl_info)
+{
+ return ((enum sis_fw_triage_status)(readl(&ctrl_info->registers->sis_firmware_status) &
+ SIS_CTRL_KERNEL_FW_TRIAGE));
+}
+
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
{
writel(SIS_SOFT_RESET,
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
}
+#define SIS_FW_TRIAGE_STATUS_TIMEOUT_SECS 300
+#define SIS_FW_TRIAGE_STATUS_POLL_INTERVAL_SECS 1
+
+int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ enum sis_fw_triage_status status;
+ unsigned long timeout;
+
+ timeout = (SIS_FW_TRIAGE_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ while (1) {
+ status = sis_read_firmware_triage_status(ctrl_info);
+ if (status == FW_TRIAGE_COND_INVALID) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "firmware triage condition invalid\n");
+ rc = -EINVAL;
+ break;
+ } else if (status == FW_TRIAGE_NOT_STARTED ||
+ status == FW_TRIAGE_COMPLETED) {
+ rc = 0;
+ break;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "timed out waiting for firmware triage status\n");
+ rc = -ETIMEDOUT;
+ break;
+ }
+
+ ssleep(SIS_FW_TRIAGE_STATUS_POLL_INTERVAL_SECS);
+ }
+
+ return rc;
+}
+
static void __attribute__((unused)) verify_structures(void)
{
BUILD_BUG_ON(offsetof(struct sis_base_struct,
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index d29c1352a826..bd92ff49f385 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -21,12 +21,14 @@ int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info);
int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info);
void sis_enable_msix(struct pqi_ctrl_info *ctrl_info);
void sis_enable_intx(struct pqi_ctrl_info *ctrl_info);
-void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info);
+void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info);
int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info);
+int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info);
#endif /* _SMARTPQI_SIS_H */
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
index f4c666285bba..4ec7e30678e1 100644
--- a/drivers/scsi/snic/snic.h
+++ b/drivers/scsi/snic/snic.h
@@ -374,7 +374,7 @@ int snic_glob_init(void);
void snic_glob_cleanup(void);
extern struct workqueue_struct *snic_event_queue;
-extern struct device_attribute *snic_attrs[];
+extern const struct attribute_group *snic_host_groups[];
int snic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
int snic_abort_cmd(struct scsi_cmnd *);
diff --git a/drivers/scsi/snic/snic_attrs.c b/drivers/scsi/snic/snic_attrs.c
index 32d5d556b6f8..dc03ce1ec909 100644
--- a/drivers/scsi/snic/snic_attrs.c
+++ b/drivers/scsi/snic/snic_attrs.c
@@ -68,10 +68,19 @@ static DEVICE_ATTR(snic_state, S_IRUGO, snic_show_state, NULL);
static DEVICE_ATTR(drv_version, S_IRUGO, snic_show_drv_version, NULL);
static DEVICE_ATTR(link_state, S_IRUGO, snic_show_link_state, NULL);
-struct device_attribute *snic_attrs[] = {
- &dev_attr_snic_sym_name,
- &dev_attr_snic_state,
- &dev_attr_drv_version,
- &dev_attr_link_state,
+static struct attribute *snic_host_attrs[] = {
+ &dev_attr_snic_sym_name.attr,
+ &dev_attr_snic_state.attr,
+ &dev_attr_drv_version.attr,
+ &dev_attr_link_state.attr,
NULL,
};
+
+static const struct attribute_group snic_host_attr_group = {
+ .attrs = snic_host_attrs
+};
+
+const struct attribute_group *snic_host_groups[] = {
+ &snic_host_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 14f4ce665e58..29d56396058c 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -129,7 +129,7 @@ static struct scsi_host_template snic_host_template = {
.can_queue = SNIC_MAX_IO_REQ,
.sg_tablesize = SNIC_MAX_SG_DESC_CNT,
.max_sectors = 0x800,
- .shost_attrs = snic_attrs,
+ .shost_groups = snic_host_groups,
.track_queue_depth = 1,
.cmd_size = sizeof(struct snic_internal_io_state),
.proc_name = "snic_scsi",
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index 43a950185e24..5f17666f3e1d 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -342,7 +342,7 @@ snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id);
atomic64_inc(&snic->s_stats.misc.tgt_not_rdy);
sc->result = ret;
- sc->scsi_done(sc);
+ scsi_done(sc);
return 0;
}
@@ -676,8 +676,7 @@ snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
- if (sc->scsi_done)
- sc->scsi_done(sc);
+ scsi_done(sc);
snic_stats_update_io_cmpl(&snic->s_stats);
} /* end of snic_icmnd_cmpl_handler */
@@ -855,14 +854,12 @@ snic_process_itmf_cmpl(struct snic *snic,
snic_release_req_buf(snic, rqi, sc);
- if (sc->scsi_done) {
- SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
- jiffies_to_msecs(jiffies - start_time),
- (ulong) fwreq, SNIC_TRC_CMD(sc),
- SNIC_TRC_CMD_STATE_FLAGS(sc));
+ SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
+ jiffies_to_msecs(jiffies - start_time),
+ (ulong) fwreq, SNIC_TRC_CMD(sc),
+ SNIC_TRC_CMD_STATE_FLAGS(sc));
- sc->scsi_done(sc);
- }
+ scsi_done(sc);
break;
@@ -1475,7 +1472,7 @@ snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc)
* Call scsi_done to complete the IO.
*/
sc->result = (DID_ERROR << 16);
- sc->scsi_done(sc);
+ scsi_done(sc);
break;
default:
@@ -1855,7 +1852,7 @@ snic_dr_clean_single_req(struct snic *snic,
snic_release_req_buf(snic, rqi, sc);
sc->result = (DID_ERROR << 16);
- sc->scsi_done(sc);
+ scsi_done(sc);
ret = 0;
@@ -2500,14 +2497,12 @@ cleanup:
/* Update IO stats */
snic_stats_update_io_cmpl(&snic->s_stats);
- if (sc->scsi_done) {
- SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
- jiffies_to_msecs(jiffies - st_time), 0,
- SNIC_TRC_CMD(sc),
- SNIC_TRC_CMD_STATE_FLAGS(sc));
+ SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
+ jiffies_to_msecs(jiffies - st_time), 0,
+ SNIC_TRC_CMD(sc),
+ SNIC_TRC_CMD_STATE_FLAGS(sc));
- sc->scsi_done(sc);
- }
+ scsi_done(sc);
}
} /* end of snic_scsi_cleanup */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 3009b986d1d7..50f8dc960ae2 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -728,7 +728,12 @@ static int sr_probe(struct device *dev)
dev_set_drvdata(dev, cd);
disk->flags |= GENHD_FL_REMOVABLE;
sr_revalidate_disk(cd);
- device_add_disk(&sdev->sdev_gendev, disk, NULL);
+
+ error = device_add_disk(&sdev->sdev_gendev, disk, NULL);
+ if (error) {
+ kref_put(&cd->kref, sr_kref_release);
+ goto fail;
+ }
sdev_printk(KERN_DEBUG, sdev,
"Attached scsi CD-ROM %s\n", cd->cdi.name);
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index f1ba7f5b52a8..e6420f2127ce 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -574,7 +574,7 @@ static void return_abnormal_state(struct st_hba *hba, int status)
if (ccb->cmd) {
scsi_dma_unmap(ccb->cmd);
ccb->cmd->result = status << 16;
- ccb->cmd->scsi_done(ccb->cmd);
+ scsi_done(ccb->cmd);
ccb->cmd = NULL;
}
}
@@ -590,9 +590,9 @@ stex_slave_config(struct scsi_device *sdev)
return 0;
}
-static int
-stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int stex_queuecommand_lck(struct scsi_cmnd *cmd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct st_hba *hba;
struct Scsi_Host *host;
unsigned int id, lun;
@@ -688,8 +688,6 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
break;
}
- cmd->scsi_done = done;
-
tag = scsi_cmd_to_rq(cmd)->tag;
if (unlikely(tag >= host->can_queue))
@@ -764,7 +762,7 @@ static void stex_scsi_done(struct st_ccb *ccb)
}
cmd->result = result;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
static void stex_copy_data(struct st_ccb *ccb,
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 9eb1b88a29dd..20595c0ba0ae 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1154,7 +1154,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
scsi_set_resid(scmnd,
cmd_request->payload->range.len - data_transfer_length);
- scmnd->scsi_done(scmnd);
+ scsi_done(scmnd);
if (payload_sz >
sizeof(struct vmbus_channel_packet_multipage_buffer))
@@ -1767,7 +1767,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
* future versions of the host.
*/
if (!storvsc_scsi_cmd_ok(scmnd)) {
- scmnd->scsi_done(scmnd);
+ scsi_done(scmnd);
return 0;
}
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 6d0b07b9cb31..b04bfde65e3f 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -133,7 +133,7 @@ void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd)
complete(ucmd->eh_done);
scsi_dma_unmap(cmd);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
/*
@@ -486,14 +486,12 @@ void sym_log_bus_error(struct Scsi_Host *shost)
* queuecommand method. Entered with the host adapter lock held and
* interrupts disabled.
*/
-static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd)
{
struct sym_hcb *np = SYM_SOFTC_PTR(cmd);
struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd);
int sts = 0;
- cmd->scsi_done = done;
memset(ucp, 0, sizeof(*ucp));
/*
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 432df76e6318..b2521b830be7 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -165,14 +165,14 @@ config SCSI_UFS_BSG
If unsure, say N.
config SCSI_UFS_EXYNOS
- tristate "EXYNOS specific hooks to UFS controller platform driver"
+ tristate "Exynos specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && (ARCH_EXYNOS || COMPILE_TEST)
help
- This selects the EXYNOS specific additions to UFSHCD platform driver.
- UFS host on EXYNOS includes HCI and UNIPRO layer, and associates with
- UFS-PHY driver.
+ This selects the Samsung Exynos SoC specific additions to UFSHCD
+ platform driver. UFS host on Samsung Exynos SoC includes HCI and
+ UNIPRO layer, and associates with UFS-PHY driver.
- Select this if you have UFS host controller on EXYNOS chipset.
+ Select this if you have UFS host controller on Samsung Exynos SoC.
If unsure, say N.
config SCSI_UFS_CRYPTO
@@ -199,3 +199,12 @@ config SCSI_UFS_FAULT_INJECTION
help
Enable fault injection support in the UFS driver. This makes it easier
to test the UFS error handler and abort handler.
+
+config SCSI_UFS_HWMON
+ bool "UFS Temperature Notification"
+ depends on SCSI_UFSHCD=HWMON || HWMON=y
+ help
+ This provides support for UFS hardware monitoring. If enabled,
+ a hardware monitoring device will be created for the UFS device.
+
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index c407da9b5171..966048875b50 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -10,6 +10,7 @@ ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o
ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
+ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index bb2dd79a1bcd..30d0c1aba0c7 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -1176,10 +1176,14 @@ static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
}
}
-static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ enum ufs_notify_change_status status)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ if (status == PRE_CHANGE)
+ return 0;
+
if (!ufshcd_is_link_active(hba))
phy_power_off(ufs->phy);
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 6b706de8354b..8c7e8d321746 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -396,10 +396,14 @@ out:
return ret;
}
-static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ enum ufs_notify_change_status status)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+ if (status == PRE_CHANGE)
+ return 0;
+
if (pm_op == UFS_RUNTIME_PM)
return 0;
diff --git a/drivers/scsi/ufs/ufs-hwmon.c b/drivers/scsi/ufs/ufs-hwmon.c
new file mode 100644
index 000000000000..74855491dc8f
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-hwmon.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UFS hardware monitoring support
+ * Copyright (c) 2021, Western Digital Corporation
+ */
+
+#include <linux/hwmon.h>
+#include <linux/units.h>
+
+#include "ufshcd.h"
+
+struct ufs_hwmon_data {
+ struct ufs_hba *hba;
+ u8 mask;
+};
+
+static int ufs_read_temp_enable(struct ufs_hba *hba, u8 mask, long *val)
+{
+ u32 ee_mask;
+ int err;
+
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
+ &ee_mask);
+ if (err)
+ return err;
+
+ *val = (mask & ee_mask & MASK_EE_TOO_HIGH_TEMP) || (mask & ee_mask & MASK_EE_TOO_LOW_TEMP);
+
+ return 0;
+}
+
+static int ufs_get_temp(struct ufs_hba *hba, enum attr_idn idn, long *val)
+{
+ u32 value;
+ int err;
+
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, idn, 0, 0, &value);
+ if (err)
+ return err;
+
+ if (value == 0)
+ return -ENODATA;
+
+ *val = ((long)value - 80) * MILLIDEGREE_PER_DEGREE;
+
+ return 0;
+}
+
+static int ufs_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long *val)
+{
+ struct ufs_hwmon_data *data = dev_get_drvdata(dev);
+ struct ufs_hba *hba = data->hba;
+ int err;
+
+ down(&hba->host_sem);
+
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+
+ switch (attr) {
+ case hwmon_temp_enable:
+ err = ufs_read_temp_enable(hba, data->mask, val);
+
+ break;
+ case hwmon_temp_crit:
+ err = ufs_get_temp(hba, QUERY_ATTR_IDN_HIGH_TEMP_BOUND, val);
+
+ break;
+ case hwmon_temp_lcrit:
+ err = ufs_get_temp(hba, QUERY_ATTR_IDN_LOW_TEMP_BOUND, val);
+
+ break;
+ case hwmon_temp_input:
+ err = ufs_get_temp(hba, QUERY_ATTR_IDN_CASE_ROUGH_TEMP, val);
+
+ break;
+ default:
+ err = -EOPNOTSUPP;
+
+ break;
+ }
+
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+
+ return err;
+}
+
+static int ufs_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long val)
+{
+ struct ufs_hwmon_data *data = dev_get_drvdata(dev);
+ struct ufs_hba *hba = data->hba;
+ int err;
+
+ if (attr != hwmon_temp_enable)
+ return -EINVAL;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+
+ if (val == 1)
+ err = ufshcd_update_ee_usr_mask(hba, MASK_EE_URGENT_TEMP, 0);
+ else
+ err = ufshcd_update_ee_usr_mask(hba, 0, MASK_EE_URGENT_TEMP);
+
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+
+ return err;
+}
+
+static umode_t ufs_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_enable:
+ return 0644;
+ case hwmon_temp_crit:
+ case hwmon_temp_lcrit:
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *ufs_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_ENABLE | HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_LCRIT),
+ NULL
+};
+
+static const struct hwmon_ops ufs_hwmon_ops = {
+ .is_visible = ufs_hwmon_is_visible,
+ .read = ufs_hwmon_read,
+ .write = ufs_hwmon_write,
+};
+
+static const struct hwmon_chip_info ufs_hwmon_hba_info = {
+ .ops = &ufs_hwmon_ops,
+ .info = ufs_hwmon_info,
+};
+
+void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask)
+{
+ struct device *dev = hba->dev;
+ struct ufs_hwmon_data *data;
+ struct device *hwmon;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ data->hba = hba;
+ data->mask = mask;
+
+ hwmon = hwmon_device_register_with_info(dev, "ufs", data, &ufs_hwmon_hba_info, NULL);
+ if (IS_ERR(hwmon)) {
+ dev_warn(dev, "Failed to instantiate hwmon device\n");
+ kfree(data);
+ return;
+ }
+
+ hba->hwmon_device = hwmon;
+}
+
+void ufs_hwmon_remove(struct ufs_hba *hba)
+{
+ struct ufs_hwmon_data *data;
+
+ if (!hba->hwmon_device)
+ return;
+
+ data = dev_get_drvdata(hba->hwmon_device);
+ hwmon_device_unregister(hba->hwmon_device);
+ hba->hwmon_device = NULL;
+ kfree(data);
+}
+
+void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask)
+{
+ if (!hba->hwmon_device)
+ return;
+
+ if (ee_mask & MASK_EE_TOO_HIGH_TEMP)
+ hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_max_alarm, 0);
+
+ if (ee_mask & MASK_EE_TOO_LOW_TEMP)
+ hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_min_alarm, 0);
+}
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 80b3545dd17d..fc5b214347b3 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
+#include <linux/sched/clock.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
#include "ufshcd.h"
@@ -246,9 +247,9 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
if (on) {
ufs_mtk_ref_clk_notify(on, res);
- ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
} else {
+ ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
}
@@ -273,16 +274,16 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
out:
host->ref_clk_enabled = on;
- if (!on) {
- ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
+ if (on)
+ ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
+ else
ufs_mtk_ref_clk_notify(on, res);
- }
return 0;
}
static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
- u16 gating_us, u16 ungating_us)
+ u16 gating_us)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -293,7 +294,62 @@ static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
host->ref_clk_gating_wait_us = gating_us;
}
- host->ref_clk_ungating_wait_us = ungating_us;
+ host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
+}
+
+static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
+ ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
+ ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
+ ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
+ ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
+ ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
+ } else {
+ ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+ }
+}
+
+static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
+ unsigned long retry_ms)
+{
+ u64 timeout, time_checked;
+ u32 val, sm;
+ bool wait_idle;
+
+ /* cannot use plain ktime_get() in suspend */
+ timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
+
+ /* wait a specific time after check base */
+ udelay(10);
+ wait_idle = false;
+
+ do {
+ time_checked = ktime_get_mono_fast_ns();
+ ufs_mtk_dbg_sel(hba);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+
+ sm = val & 0x1f;
+
+ /*
+ * if state is in H8 enter and H8 enter confirm
+ * wait until return to idle state.
+ */
+ if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
+ wait_idle = true;
+ udelay(50);
+ continue;
+ } else if (!wait_idle)
+ break;
+
+ if (wait_idle && (sm == VS_HCE_BASE))
+ break;
+ } while (time_checked < timeout);
+
+ if (wait_idle && sm != VS_HCE_BASE)
+ dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
}
static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
@@ -305,7 +361,7 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
timeout = ktime_add_ms(ktime_get(), max_wait_ms);
do {
time_checked = ktime_get();
- ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+ ufs_mtk_dbg_sel(hba);
val = ufshcd_readl(hba, REG_UFS_PROBE);
val = val >> 28;
@@ -689,6 +745,8 @@ static int ufs_mtk_init(struct ufs_hba *hba)
ufs_mtk_mphy_power_on(hba, true);
ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
+ host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+
goto out;
out_variant_clear:
@@ -932,11 +990,37 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
REGULATOR_MODE_NORMAL);
}
-static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ int ret;
+
+ /* disable auto-hibern8 */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* wait host return to idle state when auto-hibern8 off */
+ ufs_mtk_wait_idle_state(hba, 5);
+
+ ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
+ if (ret)
+ dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
+}
+
+static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ enum ufs_notify_change_status status)
{
int err;
struct arm_smccc_res res;
+ if (status == PRE_CHANGE) {
+ if (!ufshcd_is_auto_hibern8_supported(hba))
+ return 0;
+ ufs_mtk_auto_hibern8_disable(hba);
+ return 0;
+ }
+
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_lpm(hba);
if (err)
@@ -1001,7 +1085,7 @@ static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
"MPHY Ctrl ");
/* Direct debugging information to REG_MTK_PROBE */
- ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+ ufs_mtk_dbg_sel(hba);
ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
}
@@ -1019,11 +1103,14 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
* requirements.
*/
if (mid == UFS_VENDOR_SAMSUNG)
- ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
+ ufs_mtk_setup_ref_clk_wait_us(hba, 1);
else if (mid == UFS_VENDOR_SKHYNIX)
- ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
+ ufs_mtk_setup_ref_clk_wait_us(hba, 30);
else if (mid == UFS_VENDOR_TOSHIBA)
- ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
+ ufs_mtk_setup_ref_clk_wait_us(hba, 100);
+ else
+ ufs_mtk_setup_ref_clk_wait_us(hba,
+ REFCLK_DEFAULT_WAIT_US);
return 0;
}
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index 3f0d3bb769e8..414dca86c09f 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -15,9 +15,14 @@
#define REG_UFS_REFCLK_CTRL 0x144
#define REG_UFS_EXTREG 0x2100
#define REG_UFS_MPHYCTRL 0x2200
+#define REG_UFS_MTK_IP_VER 0x2240
#define REG_UFS_REJECT_MON 0x22AC
#define REG_UFS_DEBUG_SEL 0x22C0
#define REG_UFS_PROBE 0x22C8
+#define REG_UFS_DEBUG_SEL_B0 0x22D0
+#define REG_UFS_DEBUG_SEL_B1 0x22D4
+#define REG_UFS_DEBUG_SEL_B2 0x22D8
+#define REG_UFS_DEBUG_SEL_B3 0x22DC
/*
* Ref-clk control
@@ -29,6 +34,7 @@
#define REFCLK_ACK BIT(1)
#define REFCLK_REQ_TIMEOUT_US 3000
+#define REFCLK_DEFAULT_WAIT_US 32
/*
* Other attributes
@@ -50,6 +56,26 @@ enum {
};
/*
+ * Vendor specific host controller state
+ */
+enum {
+ VS_HCE_RESET = 0,
+ VS_HCE_BASE = 1,
+ VS_HCE_OOCPR_WAIT = 2,
+ VS_HCE_DME_RESET = 3,
+ VS_HCE_MIDDLE = 4,
+ VS_HCE_DME_ENABLE = 5,
+ VS_HCE_DEFAULTS = 6,
+ VS_HIB_IDLEEN = 7,
+ VS_HIB_ENTER = 8,
+ VS_HIB_ENTER_CONF = 9,
+ VS_HIB_MIDDLE = 10,
+ VS_HIB_WAITTIMER = 11,
+ VS_HIB_EXIT_CONF = 12,
+ VS_HIB_EXIT = 13,
+};
+
+/*
* SiP commands
*/
#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276)
@@ -113,6 +139,7 @@ struct ufs_mtk_host {
bool ref_clk_enabled;
u16 ref_clk_ungating_wait_us;
u16 ref_clk_gating_wait_us;
+ u32 ip_ver;
};
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 9d9770f1db4f..0d2e950d0865 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -589,11 +589,15 @@ static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
gpiod_set_value_cansleep(host->device_reset, asserted);
}
-static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
+ if (status == PRE_CHANGE)
+ return 0;
+
if (ufs_qcom_is_link_off(hba)) {
/*
* Disable the tx/rx lane symbol clocks before PHY is
@@ -888,7 +892,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err = 0;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@@ -916,7 +919,7 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
break;
}
- return err;
+ return 0;
}
static int
@@ -1213,24 +1216,34 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
int err = 0;
if (status == PRE_CHANGE) {
+ err = ufshcd_uic_hibern8_enter(hba);
+ if (err)
+ return err;
if (scale_up)
err = ufs_qcom_clk_scale_up_pre_change(hba);
else
err = ufs_qcom_clk_scale_down_pre_change(hba);
+ if (err)
+ ufshcd_uic_hibern8_exit(hba);
+
} else {
if (scale_up)
err = ufs_qcom_clk_scale_up_post_change(hba);
else
err = ufs_qcom_clk_scale_down_post_change(hba);
- if (err || !dev_req_params)
+
+ if (err || !dev_req_params) {
+ ufshcd_uic_hibern8_exit(hba);
goto out;
+ }
ufs_qcom_cfg_timers(hba,
dev_req_params->gear_rx,
dev_req_params->pwr_rx,
dev_req_params->hs_rate,
false);
+ ufshcd_uic_hibern8_exit(hba);
}
out:
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 8c6b38b1b142..0bfdca3e648e 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -152,6 +152,9 @@ enum attr_idn {
QUERY_ATTR_IDN_PSA_STATE = 0x15,
QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
+ QUERY_ATTR_IDN_CASE_ROUGH_TEMP = 0x18,
+ QUERY_ATTR_IDN_HIGH_TEMP_BOUND = 0x19,
+ QUERY_ATTR_IDN_LOW_TEMP_BOUND = 0x1A,
QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
@@ -338,6 +341,9 @@ enum {
/* Possible values for dExtendedUFSFeaturesSupport */
enum {
+ UFS_DEV_LOW_TEMP_NOTIF = BIT(4),
+ UFS_DEV_HIGH_TEMP_NOTIF = BIT(5),
+ UFS_DEV_EXT_TEMP_NOTIF = BIT(6),
UFS_DEV_HPB_SUPPORT = BIT(7),
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
};
@@ -370,6 +376,7 @@ enum {
MASK_EE_WRITEBOOSTER_EVENT = BIT(5),
MASK_EE_PERFORMANCE_THROTTLING = BIT(6),
};
+#define MASK_EE_URGENT_TEMP (MASK_EE_TOO_HIGH_TEMP | MASK_EE_TOO_LOW_TEMP)
/* Background operation status */
enum bkops_status {
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 8859c13f4e09..eaeae83b999f 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -91,7 +91,7 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
clki->min_freq = clkfreq[i];
clki->max_freq = clkfreq[i+1];
- clki->name = kstrdup(name, GFP_KERNEL);
+ clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
if (!strcmp(name, "ref_clk"))
clki->keep_link_active = true;
dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
@@ -126,7 +126,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
if (!vreg)
return -ENOMEM;
- vreg->name = kstrdup(name, GFP_KERNEL);
+ vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index db1bc8655d46..5c6a58a666d2 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -62,6 +62,9 @@
/* maximum number of reset retries before giving up */
#define MAX_HOST_RESET_RETRIES 5
+/* Maximum number of error handler retries before giving up */
+#define MAX_ERR_HANDLER_RETRIES 5
+
/* Expose the flag value from utp_upiu_query.value */
#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
@@ -222,10 +225,8 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
-static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
@@ -2685,7 +2686,19 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
+ break;
case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
+ /*
+ * SCSI error handler can call ->queuecommand() while UFS error
+ * handler is in progress. Error interrupts could change the
+ * state from UFSHCD_STATE_RESET to
+ * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
+ * being issued in that case.
+ */
+ if (ufshcd_eh_in_progress(hba)) {
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
break;
case UFSHCD_STATE_EH_SCHEDULED_FATAL:
/*
@@ -2701,7 +2714,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (hba->pm_op_in_progress) {
hba->force_reset = true;
set_host_byte(cmd, DID_BAD_TARGET);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
goto out;
}
fallthrough;
@@ -2710,7 +2723,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
case UFSHCD_STATE_ERROR:
set_host_byte(cmd, DID_ERROR);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
goto out;
}
@@ -4073,14 +4086,12 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
if (ret)
dev_err(hba->dev, "%s: link recovery failed, err %d",
__func__, ret);
- else
- ufshcd_clear_ua_wluns(hba);
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{
int ret;
struct uic_command uic_cmd = {0};
@@ -4102,6 +4113,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{
@@ -5258,7 +5270,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
/* Do not touch lrbp after scsi done */
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
ufshcd_release(hba);
update_scaling = true;
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
@@ -5606,6 +5618,24 @@ out:
__func__, err);
}
+static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
+{
+ u32 value;
+
+ if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
+ return;
+
+ dev_info(hba->dev, "exception Tcase %d\n", value - 80);
+
+ ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
+
+ /*
+ * A placeholder for the platform vendors to add whatever additional
+ * steps required
+ */
+}
+
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
@@ -5785,10 +5815,12 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
ufshcd_bkops_exception_event_handler(hba);
+ if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
+ ufshcd_temp_exception_event_handler(hba, status);
+
ufs_debugfs_exception_event(hba, status);
out:
ufshcd_scsi_unblock_requests(hba);
- return;
}
/* Complete requests that have door-bell cleared */
@@ -5959,7 +5991,6 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
ufshcd_release(hba);
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
- ufshcd_clear_ua_wluns(hba);
ufshcd_rpm_put(hba);
}
@@ -6033,13 +6064,15 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
*/
static void ufshcd_err_handler(struct work_struct *work)
{
+ int retries = MAX_ERR_HANDLER_RETRIES;
struct ufs_hba *hba;
unsigned long flags;
- bool err_xfer = false;
- bool err_tm = false;
- int err = 0, pmc_err;
+ bool needs_restore;
+ bool needs_reset;
+ bool err_xfer;
+ bool err_tm;
+ int pmc_err;
int tag;
- bool needs_reset = false, needs_restore = false;
hba = container_of(work, struct ufs_hba, eh_work);
@@ -6058,6 +6091,12 @@ static void ufshcd_err_handler(struct work_struct *work)
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
+again:
+ needs_restore = false;
+ needs_reset = false;
+ err_xfer = false;
+ err_tm = false;
+
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_RESET;
/*
@@ -6178,6 +6217,8 @@ lock_skip_pending_xfer_clear:
do_reset:
/* Fatal errors need reset */
if (needs_reset) {
+ int err;
+
hba->force_reset = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_reset_and_restore(hba);
@@ -6197,6 +6238,13 @@ skip_err_handling:
dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
__func__, hba->saved_err, hba->saved_uic_err);
}
+ /* Exit in an operational state or dead */
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
+ hba->ufshcd_state != UFSHCD_STATE_ERROR) {
+ if (--retries)
+ goto again;
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ }
ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_err_handling_unprepare(hba);
@@ -7102,31 +7150,41 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
*/
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
{
- u32 saved_err;
- u32 saved_uic_err;
+ u32 saved_err = 0;
+ u32 saved_uic_err = 0;
int err = 0;
unsigned long flags;
int retries = MAX_HOST_RESET_RETRIES;
- /*
- * This is a fresh start, cache and clear saved error first,
- * in case new error generated during reset and restore.
- */
spin_lock_irqsave(hba->host->host_lock, flags);
- saved_err = hba->saved_err;
- saved_uic_err = hba->saved_uic_err;
- hba->saved_err = 0;
- hba->saved_uic_err = 0;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
do {
+ /*
+ * This is a fresh start, cache and clear saved error first,
+ * in case new error generated during reset and restore.
+ */
+ saved_err |= hba->saved_err;
+ saved_uic_err |= hba->saved_uic_err;
+ hba->saved_err = 0;
+ hba->saved_uic_err = 0;
+ hba->force_reset = false;
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
/* Reset the attached device */
ufshcd_device_reset(hba);
err = ufshcd_host_reset_and_restore(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (err)
+ continue;
+ /* Do not exit unless operational or dead */
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
+ hba->ufshcd_state != UFSHCD_STATE_ERROR &&
+ hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
+ err = -EAGAIN;
} while (err && --retries);
- spin_lock_irqsave(hba->host->host_lock, flags);
/*
* Inform scsi mid-layer that we did reset and allow to handle
* Unit Attention properly.
@@ -7437,6 +7495,29 @@ wb_disabled:
hba->caps &= ~UFSHCD_CAP_WB_EN;
}
+static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u32 ext_ufs_feature;
+ u8 mask = 0;
+
+ if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
+ return;
+
+ ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+
+ if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
+ mask |= MASK_EE_TOO_LOW_TEMP;
+
+ if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
+ mask |= MASK_EE_TOO_HIGH_TEMP;
+
+ if (mask) {
+ ufshcd_enable_ee(hba, mask);
+ ufs_hwmon_probe(hba, mask);
+ }
+}
+
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
{
struct ufs_dev_fix *f;
@@ -7532,6 +7613,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_wb_probe(hba, desc_buf);
+ ufshcd_temp_notif_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -7875,8 +7958,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
if (ret)
goto out;
- ufshcd_clear_ua_wluns(hba);
-
/* Initialize devfreq after UFS device is detected */
if (ufshcd_is_clkscaling_supported(hba)) {
memcpy(&hba->clk_scaling.saved_pwr_info.info,
@@ -7902,116 +7983,6 @@ out:
return ret;
}
-static void ufshcd_request_sense_done(struct request *rq, blk_status_t error)
-{
- if (error != BLK_STS_OK)
- pr_err("%s: REQUEST SENSE failed (%d)\n", __func__, error);
- kfree(rq->end_io_data);
- blk_mq_free_request(rq);
-}
-
-static int
-ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- /*
- * Some UFS devices clear unit attention condition only if the sense
- * size used (UFS_SENSE_SIZE in this case) is non-zero.
- */
- static const u8 cmd[6] = {REQUEST_SENSE, 0, 0, 0, UFS_SENSE_SIZE, 0};
- struct scsi_request *rq;
- struct request *req;
- char *buffer;
- int ret;
-
- buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- req = blk_mq_alloc_request(sdev->request_queue, REQ_OP_DRV_IN,
- /*flags=*/BLK_MQ_REQ_PM);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
- goto out_free;
- }
-
- ret = blk_rq_map_kern(sdev->request_queue, req,
- buffer, UFS_SENSE_SIZE, GFP_NOIO);
- if (ret)
- goto out_put;
-
- rq = scsi_req(req);
- rq->cmd_len = ARRAY_SIZE(cmd);
- memcpy(rq->cmd, cmd, rq->cmd_len);
- rq->retries = 3;
- req->timeout = 1 * HZ;
- req->rq_flags |= RQF_PM | RQF_QUIET;
- req->end_io_data = buffer;
-
- blk_execute_rq_nowait(/*bd_disk=*/NULL, req, /*at_head=*/true,
- ufshcd_request_sense_done);
- return 0;
-
-out_put:
- blk_mq_free_request(req);
-out_free:
- kfree(buffer);
- return ret;
-}
-
-static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
-{
- struct scsi_device *sdp;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (wlun == UFS_UPIU_UFS_DEVICE_WLUN)
- sdp = hba->sdev_ufs_device;
- else if (wlun == UFS_UPIU_RPMB_WLUN)
- sdp = hba->sdev_rpmb;
- else
- BUG();
- if (sdp) {
- ret = scsi_device_get(sdp);
- if (!ret && !scsi_device_online(sdp)) {
- ret = -ENODEV;
- scsi_device_put(sdp);
- }
- } else {
- ret = -ENODEV;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (ret)
- goto out_err;
-
- ret = ufshcd_request_sense_async(hba, sdp);
- scsi_device_put(sdp);
-out_err:
- if (ret)
- dev_err(hba->dev, "%s: UAC clear LU=%x ret = %d\n",
- __func__, wlun, ret);
- return ret;
-}
-
-static int ufshcd_clear_ua_wluns(struct ufs_hba *hba)
-{
- int ret = 0;
-
- if (!hba->wlun_dev_clr_ua)
- goto out;
-
- ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
- if (!ret)
- ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
- if (!ret)
- hba->wlun_dev_clr_ua = false;
-out:
- if (ret)
- dev_err(hba->dev, "%s: Failed to clear UAC WLUNS ret = %d\n",
- __func__, ret);
- return ret;
-}
-
/**
* ufshcd_probe_hba - probe hba to detect device and initialize it
* @hba: per-adapter instance
@@ -8062,8 +8033,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
/* UFS device is also active now */
ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba);
- hba->wlun_dev_clr_ua = true;
- hba->wlun_rpmb_clr_ua = true;
/* Gear up to HS gear if supported */
if (hba->max_pwr_info.is_valid) {
@@ -8600,7 +8569,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
struct scsi_sense_hdr sshdr;
struct scsi_device *sdp;
unsigned long flags;
- int ret;
+ int ret, retries;
spin_lock_irqsave(hba->host->host_lock, flags);
sdp = hba->sdev_ufs_device;
@@ -8625,8 +8594,6 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* handling context.
*/
hba->host->eh_noresume = 1;
- if (hba->wlun_dev_clr_ua)
- ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
cmd[4] = pwr_mode << 4;
@@ -8635,8 +8602,14 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
- ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
+ for (retries = 3; retries > 0; --retries) {
+ ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
+ if (!scsi_status_is_check_condition(ret) ||
+ !scsi_sense_valid(&sshdr) ||
+ sshdr.sense_key != UNIT_ATTENTION)
+ break;
+ }
if (ret) {
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
@@ -8878,6 +8851,10 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
flush_work(&hba->eeh_work);
+ ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
+ if (ret)
+ goto enable_scaling;
+
if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
if (pm_op != UFS_RUNTIME_PM)
/* ensure that bkops is disabled */
@@ -8905,7 +8882,7 @@ vops_suspend:
* vendor specific host controller register space call them before the
* host clocks are ON.
*/
- ret = ufshcd_vops_suspend(hba, pm_op);
+ ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
if (ret)
goto set_link_active;
goto out;
@@ -9033,7 +9010,8 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
set_old_link_state:
ufshcd_link_state_transition(hba, old_link_state, 0);
vendor_suspend:
- ufshcd_vops_suspend(hba, pm_op);
+ ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
+ ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
out:
if (ret)
ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
@@ -9378,6 +9356,7 @@ void ufshcd_remove(struct ufs_hba *hba)
{
if (hba->sdev_ufs_device)
ufshcd_rpm_get_sync(hba);
+ ufs_hwmon_remove(hba);
ufs_bsg_remove(hba);
ufshpb_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
@@ -9699,10 +9678,6 @@ void ufshcd_resume_complete(struct device *dev)
ufshcd_rpm_put(hba);
hba->complete_put = false;
}
- if (hba->rpmb_complete_put) {
- ufshcd_rpmb_rpm_put(hba);
- hba->rpmb_complete_put = false;
- }
}
EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
@@ -9725,10 +9700,6 @@ int ufshcd_suspend_prepare(struct device *dev)
}
hba->complete_put = true;
}
- if (hba->sdev_rpmb) {
- ufshcd_rpmb_rpm_get_sync(hba);
- hba->rpmb_complete_put = true;
- }
return 0;
}
EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
@@ -9797,49 +9768,6 @@ static struct scsi_driver ufs_dev_wlun_template = {
},
};
-static int ufshcd_rpmb_probe(struct device *dev)
-{
- return is_rpmb_wlun(to_scsi_device(dev)) ? 0 : -ENODEV;
-}
-
-static inline int ufshcd_clear_rpmb_uac(struct ufs_hba *hba)
-{
- int ret = 0;
-
- if (!hba->wlun_rpmb_clr_ua)
- return 0;
- ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
- if (!ret)
- hba->wlun_rpmb_clr_ua = 0;
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int ufshcd_rpmb_resume(struct device *dev)
-{
- struct ufs_hba *hba = wlun_dev_to_hba(dev);
-
- if (hba->sdev_rpmb)
- ufshcd_clear_rpmb_uac(hba);
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops ufs_rpmb_pm_ops = {
- SET_RUNTIME_PM_OPS(NULL, ufshcd_rpmb_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(NULL, ufshcd_rpmb_resume)
-};
-
-/* ufs_rpmb_wlun_template - Describes UFS RPMB WLUN. Used only to send UAC. */
-static struct scsi_driver ufs_rpmb_wlun_template = {
- .gendrv = {
- .name = "ufs_rpmb_wlun",
- .owner = THIS_MODULE,
- .probe = ufshcd_rpmb_probe,
- .pm = &ufs_rpmb_pm_ops,
- },
-};
-
static int __init ufshcd_core_init(void)
{
int ret;
@@ -9848,24 +9776,13 @@ static int __init ufshcd_core_init(void)
ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
if (ret)
- goto debugfs_exit;
-
- ret = scsi_register_driver(&ufs_rpmb_wlun_template.gendrv);
- if (ret)
- goto unregister;
-
- return ret;
-unregister:
- scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
-debugfs_exit:
- ufs_debugfs_exit();
+ ufs_debugfs_exit();
return ret;
}
static void __exit ufshcd_core_exit(void)
{
ufs_debugfs_exit();
- scsi_unregister_driver(&ufs_rpmb_wlun_template.gendrv);
scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 62bdc412d38a..dd765863b05f 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -344,7 +344,8 @@ struct ufs_hba_variant_ops {
enum ufs_notify_change_status);
int (*apply_dev_quirks)(struct ufs_hba *hba);
void (*fixup_dev_quirks)(struct ufs_hba *hba);
- int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
+ int (*suspend)(struct ufs_hba *, enum ufs_pm_op,
+ enum ufs_notify_change_status);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
int (*phy_initialization)(struct ufs_hba *);
@@ -653,6 +654,12 @@ enum ufshcd_caps {
* in order to exit DeepSleep state.
*/
UFSHCD_CAP_DEEPSLEEP = 1 << 10,
+
+ /*
+ * This capability allows the host controller driver to use temperature
+ * notification if it is supported by the UFS device.
+ */
+ UFSHCD_CAP_TEMP_NOTIF = 1 << 11,
};
struct ufs_hba_variant_params {
@@ -791,6 +798,10 @@ struct ufs_hba {
struct scsi_device *sdev_ufs_device;
struct scsi_device *sdev_rpmb;
+#ifdef CONFIG_SCSI_UFS_HWMON
+ struct device *hwmon_device;
+#endif
+
enum ufs_dev_pwr_mode curr_dev_pwr_mode;
enum uic_link_state uic_link_state;
/* Desired UFS power management level during runtime PM */
@@ -871,9 +882,6 @@ struct ufs_hba {
struct ufs_vreg_info vreg_info;
struct list_head clk_list_head;
- bool wlun_dev_clr_ua;
- bool wlun_rpmb_clr_ua;
-
/* Number of requests aborts */
int req_abort_count;
@@ -920,7 +928,6 @@ struct ufs_hba {
#endif
u32 luns_avail;
bool complete_put;
- bool rpmb_complete_put;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -1007,6 +1014,7 @@ int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
int ufshcd_link_recovery(struct ufs_hba *hba);
int ufshcd_make_hba_operational(struct ufs_hba *hba);
void ufshcd_remove(struct ufs_hba *);
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
@@ -1055,6 +1063,16 @@ static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
return 0;
}
+#ifdef CONFIG_SCSI_UFS_HWMON
+void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask);
+void ufs_hwmon_remove(struct ufs_hba *hba);
+void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask);
+#else
+static inline void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask) {}
+static inline void ufs_hwmon_remove(struct ufs_hba *hba) {}
+static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {}
+#endif
+
#ifdef CONFIG_PM
extern int ufshcd_runtime_suspend(struct device *dev);
extern int ufshcd_runtime_resume(struct device *dev);
@@ -1301,10 +1319,11 @@ static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
hba->vops->fixup_dev_quirks(hba);
}
-static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
+static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op,
+ enum ufs_notify_change_status status)
{
if (hba->vops && hba->vops->suspend)
- return hba->vops->suspend(hba, op);
+ return hba->vops->suspend(hba, op, status);
return 0;
}
@@ -1393,14 +1412,4 @@ static inline int ufshcd_rpm_put(struct ufs_hba *hba)
return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev);
}
-static inline int ufshcd_rpmb_rpm_get_sync(struct ufs_hba *hba)
-{
- return pm_runtime_get_sync(&hba->sdev_rpmb->sdev_gendev);
-}
-
-static inline int ufshcd_rpmb_rpm_put(struct ufs_hba *hba)
-{
- return pm_runtime_put(&hba->sdev_rpmb->sdev_gendev);
-}
-
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 182bcbf60f8c..f4eca441b433 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -2371,11 +2371,11 @@ static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
QUERY_DESC_IDN_UNIT, lun, 0,
desc_buf, &size);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
if (ret) {
dev_err(hba->dev,
@@ -2598,11 +2598,8 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
if (version == HPB_SUPPORT_LEGACY_VERSION)
hpb_dev_info->is_legacy = true;
- pm_runtime_get_sync(hba->dev);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
- pm_runtime_put_sync(hba->dev);
-
if (ret)
dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
__func__);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index b11b57355914..19f7d7b90625 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -164,7 +164,7 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
VIRTIO_SCSI_SENSE_SIZE));
}
- sc->scsi_done(sc);
+ scsi_done(sc);
}
static void virtscsi_vq_done(struct virtio_scsi *vscsi,
@@ -620,9 +620,8 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
* we're using independent interrupts (e.g. MSI). Poll the
* virtqueues once.
*
- * In the abort case, sc->scsi_done will do nothing, because
- * the block layer must have detected a timeout and as a result
- * REQ_ATOM_COMPLETE has been set.
+ * In the abort case, scsi_done() will do nothing, because the
+ * command timed out and hence SCMD_STATE_COMPLETE has been set.
*/
virtscsi_poll_requests(vscsi);
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index ce1ba1b93629..c2ba65224633 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -643,7 +643,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
"cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
/*
@@ -768,7 +768,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
return 0;
}
-static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int pvscsi_queue_lck(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct pvscsi_adapter *adapter = shost_priv(host);
@@ -786,7 +786,6 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
return SCSI_MLQUEUE_HOST_BUSY;
}
- cmd->scsi_done = done;
op = cmd->cmnd[0];
dev_dbg(&cmd->device->sdev_gendev,
@@ -860,7 +859,7 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
* Successfully aborted the command.
*/
cmd->result = (DID_ABORT << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
out:
spin_unlock_irqrestore(&adapter->hw_lock, flags);
@@ -887,7 +886,7 @@ static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
pvscsi_patch_sense(cmd);
pvscsi_release_context(adapter, ctx);
cmd->result = (DID_RESET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
}
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 4468bc45aaa4..7d2f00f3571a 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -362,9 +362,7 @@ calc_sync_msg(unsigned int period, unsigned int offset, unsigned int fast,
msg[1] = offset;
}
-static int
-wd33c93_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int wd33c93_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct WD33C93_hostdata *hostdata;
struct scsi_cmnd *tmp;
@@ -376,11 +374,9 @@ wd33c93_queuecommand_lck(struct scsi_cmnd *cmd,
/* Set up a few fields in the scsi_cmnd structure for our own use:
* - host_scribble is the pointer to the next cmd in the input queue
- * - scsi_done points to the routine we call when a cmd is finished
* - result is what you'd expect
*/
cmd->host_scribble = NULL;
- cmd->scsi_done = done;
cmd->result = 0;
/* We use the Scsi_Pointer structure that's included with each command
@@ -856,7 +852,7 @@ wd33c93_intr(struct Scsi_Host *instance)
cmd->result = DID_NO_CONNECT << 16;
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
/* From esp.c:
* There is a window of time within the scsi_done() path
@@ -1183,7 +1179,7 @@ wd33c93_intr(struct Scsi_Host *instance)
scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
set_status_byte(cmd, cmd->SCp.Status);
}
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
/* We are no longer connected to a target - check to see if
* there are commands waiting to be executed.
@@ -1270,7 +1266,7 @@ wd33c93_intr(struct Scsi_Host *instance)
scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
set_status_byte(cmd, cmd->SCp.Status);
}
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
/* We are no longer connected to a target - check to see if
* there are commands waiting to be executed.
@@ -1306,7 +1302,7 @@ wd33c93_intr(struct Scsi_Host *instance)
scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
set_status_byte(cmd, cmd->SCp.Status);
}
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
break;
case S_PRE_TMP_DISC:
case S_RUNNING_LEVEL2:
@@ -1636,7 +1632,7 @@ wd33c93_abort(struct scsi_cmnd * cmd)
("scsi%d: Abort - removing command from input_Q. ",
instance->host_no);
enable_irq(cmd->device->host->irq);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return SUCCESS;
}
prev = tmp;
@@ -1711,7 +1707,7 @@ wd33c93_abort(struct scsi_cmnd * cmd)
wd33c93_execute(instance);
enable_irq(cmd->device->host->irq);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return SUCCESS;
}
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 6f10a43510fb..1a7947554581 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -200,7 +200,7 @@ static void wd719x_finish_cmd(struct wd719x_scb *scb, int result)
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
cmd->result = result << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
/* Build a SCB and send it to the card */
@@ -295,7 +295,7 @@ out_unmap_scb:
DMA_BIDIRECTIONAL);
out_error:
cmd->result = DID_ERROR << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 0204e314b482..12c10a5e3d93 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -276,7 +276,7 @@ static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
if (sense_len)
memcpy(sc->sense_buffer, ring_rsp->sense_buffer, sense_len);
- sc->scsi_done(sc);
+ scsi_done(sc);
}
static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
@@ -558,7 +558,7 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
if (err == -ENOMEM)
return SCSI_MLQUEUE_HOST_BUSY;
sc->result = DID_ERROR << 16;
- sc->scsi_done(sc);
+ scsi_done(sc);
return 0;
}
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 41d13becec5c..91fcf85e150a 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -118,9 +118,9 @@ static int slave_configure(struct scsi_device *sdev)
/* queue a command */
/* This is always called with scsi_lock(host) held */
-static int queuecommand_lck(struct scsi_cmnd *srb,
- void (*done)(struct scsi_cmnd *))
+static int queuecommand_lck(struct scsi_cmnd *srb)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
struct rtsx_chip *chip = dev->chip;
@@ -140,7 +140,6 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
}
/* enqueue the command and wake up the control thread */
- srb->scsi_done = done;
chip->srb = srb;
complete(&dev->cmnd_ready);
@@ -423,7 +422,7 @@ static int rtsx_control_thread(void *__dev)
/* indicate that the command is done */
else if (chip->srb->result != DID_ABORT << 16) {
- chip->srb->scsi_done(chip->srb);
+ scsi_done(chip->srb);
} else {
skip_for_abort:
dev_err(&dev->pci->dev, "scsi command aborted\n");
@@ -635,7 +634,7 @@ static void quiesce_and_remove_host(struct rtsx_dev *dev)
if (chip->srb) {
chip->srb->result = DID_NO_CONNECT << 16;
scsi_lock(host);
- chip->srb->scsi_done(dev->chip->srb);
+ scsi_done(dev->chip->srb);
chip->srb = NULL;
scsi_unlock(host);
}
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 41f8a72a2a95..694644112447 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -327,7 +327,7 @@ static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
if (rtn == SUCCESS) {
scsicmd->result = DID_ABORT << 16;
- scsicmd->scsi_done(scsicmd);
+ scsi_done(scsicmd);
}
return rtn;
}
@@ -354,7 +354,7 @@ static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
if (rtn == SUCCESS) {
scsicmd->result = DID_RESET << 16;
- scsicmd->scsi_done(scsicmd);
+ scsi_done(scsicmd);
}
return rtn;
}
@@ -383,7 +383,7 @@ static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
if (rtn == SUCCESS) {
scsicmd->result = DID_RESET << 16;
- scsicmd->scsi_done(scsicmd);
+ scsi_done(scsicmd);
}
return rtn;
}
@@ -446,10 +446,9 @@ static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
* Return: 0 if successfully queued to the Service Partition, otherwise
* error code
*/
-static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
- void (*visorhba_cmnd_done)
- (struct scsi_cmnd *))
+static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd)
{
+ void (*visorhba_cmnd_done)(struct scsi_cmnd *) = scsi_done;
struct uiscmdrsp *cmdrsp;
struct scsi_device *scsidev = scsicmd->device;
int insert_location;
@@ -476,8 +475,7 @@ static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
*/
cmdrsp->scsi.handle = insert_location;
- /* save done function that we have call when cmd is complete */
- scsicmd->scsi_done = visorhba_cmnd_done;
+ WARN_ON_ONCE(visorhba_cmnd_done != scsi_done);
/* save destination */
cmdrsp->scsi.vdest.channel = scsidev->channel;
cmdrsp->scsi.vdest.id = scsidev->id;
@@ -584,7 +582,6 @@ static struct scsi_host_template visorhba_driver_template = {
.eh_device_reset_handler = visorhba_device_reset_handler,
.eh_bus_reset_handler = visorhba_bus_reset_handler,
.eh_host_reset_handler = visorhba_host_reset_handler,
- .shost_attrs = NULL,
#define visorhba_MAX_CMNDS 128
.can_queue = visorhba_MAX_CMNDS,
.sg_tablesize = 64,
@@ -686,8 +683,7 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
case CMD_SCSI_TYPE:
scsicmd = pendingdel->sent;
scsicmd->result = DID_RESET << 16;
- if (scsicmd->scsi_done)
- scsicmd->scsi_done(scsicmd);
+ scsi_done(scsicmd);
break;
case CMD_SCSITASKMGMT_TYPE:
cmdrsp = pendingdel->sent;
@@ -853,7 +849,7 @@ static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
else
do_scsi_nolinuxstat(cmdrsp, scsicmd);
- scsicmd->scsi_done(scsicmd);
+ scsi_done(scsicmd);
}
/*
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 518ded214e74..da31a308a064 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -836,11 +836,13 @@ static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
csk->rcv_win = CXGBIT_10G_RCV_WIN;
if (scale)
csk->rcv_win *= scale;
+ csk->rcv_win = min(csk->rcv_win, RCV_BUFSIZ_M << 10);
#define CXGBIT_10G_SND_WIN (256 * 1024)
csk->snd_win = CXGBIT_10G_SND_WIN;
if (scale)
csk->snd_win *= scale;
+ csk->snd_win = min(csk->snd_win, 512U * 1024);
pr_debug("%s snd_win %d rcv_win %d\n",
__func__, csk->snd_win, csk->rcv_win);
@@ -1065,7 +1067,7 @@ int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
if (!skb)
return -1;
- credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
+ credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(3) |
RX_CREDITS_V(csk->rx_credits);
cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
@@ -1197,7 +1199,6 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN_V(1);
- opt2 |= RX_COALESCE_V(3);
opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
opt2 |= T5_ISS_F;
@@ -1646,9 +1647,6 @@ cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
csk->rcv_nxt = rcv_isn;
- if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
- csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
-
csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
cxgbit_set_emss(csk, tcp_opt);
dst_confirm(csk->dst);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index bd37f2afadea..c6678dc8dd41 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -33,11 +33,18 @@ static void cxgbit_set_mdsl(struct cxgbit_device *cdev)
struct cxgb4_lld_info *lldi = &cdev->lldi;
u32 mdsl;
-#define ULP2_MAX_PKT_LEN 16224
-#define ISCSI_PDU_NONPAYLOAD_LEN 312
- mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN,
- ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN);
- mdsl = min_t(u32, mdsl, 8192);
+#define CXGBIT_T5_MAX_PDU_LEN 16224
+#define CXGBIT_PDU_NONPAYLOAD_LEN 312 /* 48(BHS) + 256(AHS) + 8(Digest) */
+ if (is_t5(lldi->adapter_type)) {
+ mdsl = min_t(u32, lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN,
+ CXGBIT_T5_MAX_PDU_LEN - CXGBIT_PDU_NONPAYLOAD_LEN);
+ } else {
+ mdsl = lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN;
+ mdsl = min(mdsl, 16384U);
+ }
+
+ mdsl = round_down(mdsl, 4);
+ mdsl = min_t(u32, mdsl, 4 * PAGE_SIZE);
mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE);
cdev->mdsl = mdsl;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index 282297ffc404..d314ee120a48 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -189,8 +189,8 @@ cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) |
FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
- req->tunnel_to_proxy = htonl((wr_ulp_mode) | force |
- FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1));
+ req->tunnel_to_proxy = htonl(wr_ulp_mode | force |
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
}
static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb)
@@ -1531,7 +1531,7 @@ out:
return ret;
}
-static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+static int cxgbit_t5_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
{
struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
@@ -1557,6 +1557,24 @@ static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
return ret;
}
+static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ int ret;
+
+ ret = cxgbit_process_lro_skb(csk, skb);
+ if (ret)
+ return ret;
+
+ csk->rx_credits += lro_cb->pdu_totallen;
+ if (csk->rx_credits >= csk->rcv_win) {
+ csk->rx_credits = 0;
+ cxgbit_rx_data_ack(csk);
+ }
+
+ return 0;
+}
+
static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
{
struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
@@ -1564,9 +1582,9 @@ static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) {
if (is_t5(lldi->adapter_type))
- ret = cxgbit_rx_lro_skb(csk, skb);
+ ret = cxgbit_t5_rx_lro_skb(csk, skb);
else
- ret = cxgbit_process_lro_skb(csk, skb);
+ ret = cxgbit_rx_lro_skb(csk, skb);
}
__kfree_skb(skb);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index f4a24fa5058e..2a9de24a8bbe 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1005,74 +1005,15 @@ static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
/* Start items for lio_target_tpg_cit */
-static ssize_t lio_target_tpg_enable_show(struct config_item *item, char *page)
-{
- struct se_portal_group *se_tpg = to_tpg(item);
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
- ssize_t len;
-
- spin_lock(&tpg->tpg_state_lock);
- len = sprintf(page, "%d\n",
- (tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0);
- spin_unlock(&tpg->tpg_state_lock);
-
- return len;
-}
-
-static ssize_t lio_target_tpg_enable_store(struct config_item *item,
- const char *page, size_t count)
-{
- struct se_portal_group *se_tpg = to_tpg(item);
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
- u32 op;
- int ret;
-
- ret = kstrtou32(page, 0, &op);
- if (ret)
- return ret;
- if ((op != 1) && (op != 0)) {
- pr_err("Illegal value for tpg_enable: %u\n", op);
- return -EINVAL;
- }
-
- ret = iscsit_get_tpg(tpg);
- if (ret < 0)
- return -EINVAL;
-
- if (op) {
- ret = iscsit_tpg_enable_portal_group(tpg);
- if (ret < 0)
- goto out;
- } else {
- /*
- * iscsit_tpg_disable_portal_group() assumes force=1
- */
- ret = iscsit_tpg_disable_portal_group(tpg, 1);
- if (ret < 0)
- goto out;
- }
-
- iscsit_put_tpg(tpg);
- return count;
-out:
- iscsit_put_tpg(tpg);
- return -EINVAL;
-}
-
-
static ssize_t lio_target_tpg_dynamic_sessions_show(struct config_item *item,
char *page)
{
return target_show_dynamic_sessions(to_tpg(item), page);
}
-CONFIGFS_ATTR(lio_target_tpg_, enable);
CONFIGFS_ATTR_RO(lio_target_tpg_, dynamic_sessions);
static struct configfs_attribute *lio_target_tpg_attrs[] = {
- &lio_target_tpg_attr_enable,
&lio_target_tpg_attr_dynamic_sessions,
NULL,
};
@@ -1129,6 +1070,37 @@ free_out:
return NULL;
}
+static int lio_target_tiqn_enabletpg(struct se_portal_group *se_tpg,
+ bool enable)
+{
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ int ret;
+
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return -EINVAL;
+
+ if (enable) {
+ ret = iscsit_tpg_enable_portal_group(tpg);
+ if (ret < 0)
+ goto out;
+ } else {
+ /*
+ * iscsit_tpg_disable_portal_group() assumes force=1
+ */
+ ret = iscsit_tpg_disable_portal_group(tpg, 1);
+ if (ret < 0)
+ goto out;
+ }
+
+ iscsit_put_tpg(tpg);
+ return 0;
+out:
+ iscsit_put_tpg(tpg);
+ return -EINVAL;
+}
+
static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg;
@@ -1556,6 +1528,7 @@ const struct target_core_fabric_ops iscsi_ops = {
.fabric_drop_wwn = lio_target_call_coredeltiqn,
.add_wwn_groups = lio_target_add_wwn_groups,
.fabric_make_tpg = lio_target_tiqn_addtpg,
+ .fabric_enable_tpg = lio_target_tiqn_enabletpg,
.fabric_drop_tpg = lio_target_tiqn_deltpg,
.fabric_make_np = lio_target_call_addnptotpg,
.fabric_drop_np = lio_target_call_delnpfromtpg,
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 52db28d868d5..4407b56aa6d1 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -71,7 +71,7 @@ static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
else
- sc->scsi_done(sc);
+ scsi_done(sc);
}
static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
@@ -165,7 +165,7 @@ static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd)
return;
out_done:
- sc->scsi_done(sc);
+ scsi_done(sc);
}
/*
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index b9f9fb5d7e63..504670994fb4 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -2125,32 +2125,13 @@ static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
return count;
}
-static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
+static int sbp_enable_tpg(struct se_portal_group *se_tpg, bool enable)
{
- struct se_portal_group *se_tpg = to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
- return sprintf(page, "%d\n", tport->enable);
-}
-
-static ssize_t sbp_tpg_enable_store(struct config_item *item,
- const char *page, size_t count)
-{
- struct se_portal_group *se_tpg = to_tpg(item);
- struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
- struct sbp_tport *tport = tpg->tport;
- unsigned long val;
int ret;
- if (kstrtoul(page, 0, &val) < 0)
- return -EINVAL;
- if ((val != 0) && (val != 1))
- return -EINVAL;
-
- if (tport->enable == val)
- return count;
-
- if (val) {
+ if (enable) {
if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
pr_err("Cannot enable a target with no LUNs!\n");
return -EINVAL;
@@ -2165,7 +2146,7 @@ static ssize_t sbp_tpg_enable_store(struct config_item *item,
spin_unlock_bh(&se_tpg->session_lock);
}
- tport->enable = val;
+ tport->enable = enable;
ret = sbp_update_unit_directory(tport);
if (ret < 0) {
@@ -2173,15 +2154,13 @@ static ssize_t sbp_tpg_enable_store(struct config_item *item,
return ret;
}
- return count;
+ return 0;
}
CONFIGFS_ATTR(sbp_tpg_, directory_id);
-CONFIGFS_ATTR(sbp_tpg_, enable);
static struct configfs_attribute *sbp_tpg_base_attrs[] = {
&sbp_tpg_attr_directory_id,
- &sbp_tpg_attr_enable,
NULL,
};
@@ -2319,6 +2298,7 @@ static const struct target_core_fabric_ops sbp_ops = {
.fabric_make_wwn = sbp_make_tport,
.fabric_drop_wwn = sbp_drop_tport,
.fabric_make_tpg = sbp_make_tpg,
+ .fabric_enable_tpg = sbp_enable_tpg,
.fabric_drop_tpg = sbp_drop_tpg,
.fabric_post_link = sbp_post_link_lun,
.fabric_pre_unlink = sbp_pre_unlink_lun,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index cb1de1ecaaa6..b56ef8af66e7 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -247,11 +247,11 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
* this CDB was received upon to determine this value individually
* for ALUA target port group.
*/
- spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
- tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
+ rcu_read_lock();
+ tg_pt_gp = rcu_dereference(cmd->se_lun->lun_tg_pt_gp);
if (tg_pt_gp)
buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
- spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
+ rcu_read_unlock();
}
transport_kunmap_data_sg(cmd);
@@ -292,24 +292,24 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
* Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp.
*/
- spin_lock(&l_lun->lun_tg_pt_gp_lock);
- l_tg_pt_gp = l_lun->lun_tg_pt_gp;
+ rcu_read_lock();
+ l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp);
if (!l_tg_pt_gp) {
- spin_unlock(&l_lun->lun_tg_pt_gp_lock);
+ rcu_read_unlock();
pr_err("Unable to access l_lun->tg_pt_gp\n");
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
- spin_unlock(&l_lun->lun_tg_pt_gp_lock);
+ rcu_read_unlock();
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICIT_ALUA is disabled\n");
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
- spin_unlock(&l_lun->lun_tg_pt_gp_lock);
+ rcu_read_unlock();
ptr = &buf[4]; /* Skip over RESERVED area in header */
@@ -662,17 +662,17 @@ target_alua_state_check(struct se_cmd *cmd)
" target port\n");
return TCM_ALUA_OFFLINE;
}
-
- if (!lun->lun_tg_pt_gp)
+ rcu_read_lock();
+ tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
+ if (!tg_pt_gp) {
+ rcu_read_unlock();
return 0;
+ }
- spin_lock(&lun->lun_tg_pt_gp_lock);
- tg_pt_gp = lun->lun_tg_pt_gp;
out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
-
- spin_unlock(&lun->lun_tg_pt_gp_lock);
+ rcu_read_unlock();
/*
* Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
* statement so the compiler knows explicitly to check this case first.
@@ -1219,10 +1219,10 @@ static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp *tg_pt_gp;
int trans_delay_msecs;
- spin_lock(&lun->lun_tg_pt_gp_lock);
- tg_pt_gp = lun->lun_tg_pt_gp;
+ rcu_read_lock();
+ tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (!tg_pt_gp) {
- spin_unlock(&lun->lun_tg_pt_gp_lock);
+ rcu_read_unlock();
pr_err("Unable to complete secondary state"
" transition\n");
return -EINVAL;
@@ -1246,7 +1246,7 @@ static int core_alua_set_tg_pt_secondary_state(
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
- spin_unlock(&lun->lun_tg_pt_gp_lock);
+ rcu_read_unlock();
/*
* Do the optional transition delay after we set the secondary
* ALUA access state.
@@ -1674,7 +1674,6 @@ int core_alua_set_tg_pt_gp_id(
pr_err("Maximum ALUA alua_tg_pt_gps_count:"
" 0x0000ffff reached\n");
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
- kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
return -ENOSPC;
}
again:
@@ -1755,13 +1754,14 @@ void core_alua_free_tg_pt_gp(
__target_attach_tg_pt_gp(lun,
dev->t10_alua.default_tg_pt_gp);
} else
- lun->lun_tg_pt_gp = NULL;
+ rcu_assign_pointer(lun->lun_tg_pt_gp, NULL);
spin_unlock(&lun->lun_tg_pt_gp_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+ synchronize_rcu();
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
}
@@ -1806,7 +1806,7 @@ static void __target_attach_tg_pt_gp(struct se_lun *lun,
assert_spin_locked(&lun->lun_tg_pt_gp_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- lun->lun_tg_pt_gp = tg_pt_gp;
+ rcu_assign_pointer(lun->lun_tg_pt_gp, tg_pt_gp);
list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
tg_pt_gp->tg_pt_gp_members++;
spin_lock(&lun->lun_deve_lock);
@@ -1823,6 +1823,7 @@ void target_attach_tg_pt_gp(struct se_lun *lun,
spin_lock(&lun->lun_tg_pt_gp_lock);
__target_attach_tg_pt_gp(lun, tg_pt_gp);
spin_unlock(&lun->lun_tg_pt_gp_lock);
+ synchronize_rcu();
}
static void __target_detach_tg_pt_gp(struct se_lun *lun,
@@ -1834,8 +1835,6 @@ static void __target_detach_tg_pt_gp(struct se_lun *lun,
list_del_init(&lun->lun_tg_pt_gp_link);
tg_pt_gp->tg_pt_gp_members--;
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
-
- lun->lun_tg_pt_gp = NULL;
}
void target_detach_tg_pt_gp(struct se_lun *lun)
@@ -1843,10 +1842,25 @@ void target_detach_tg_pt_gp(struct se_lun *lun)
struct t10_alua_tg_pt_gp *tg_pt_gp;
spin_lock(&lun->lun_tg_pt_gp_lock);
- tg_pt_gp = lun->lun_tg_pt_gp;
- if (tg_pt_gp)
+ tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp,
+ lockdep_is_held(&lun->lun_tg_pt_gp_lock));
+ if (tg_pt_gp) {
__target_detach_tg_pt_gp(lun, tg_pt_gp);
+ rcu_assign_pointer(lun->lun_tg_pt_gp, NULL);
+ }
spin_unlock(&lun->lun_tg_pt_gp_lock);
+ synchronize_rcu();
+}
+
+static void target_swap_tg_pt_gp(struct se_lun *lun,
+ struct t10_alua_tg_pt_gp *old_tg_pt_gp,
+ struct t10_alua_tg_pt_gp *new_tg_pt_gp)
+{
+ assert_spin_locked(&lun->lun_tg_pt_gp_lock);
+
+ if (old_tg_pt_gp)
+ __target_detach_tg_pt_gp(lun, old_tg_pt_gp);
+ __target_attach_tg_pt_gp(lun, new_tg_pt_gp);
}
ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
@@ -1855,8 +1869,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
struct t10_alua_tg_pt_gp *tg_pt_gp;
ssize_t len = 0;
- spin_lock(&lun->lun_tg_pt_gp_lock);
- tg_pt_gp = lun->lun_tg_pt_gp;
+ rcu_read_lock();
+ tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (tg_pt_gp) {
tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
@@ -1872,7 +1886,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
"Offline" : "None",
core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
}
- spin_unlock(&lun->lun_tg_pt_gp_lock);
+ rcu_read_unlock();
return len;
}
@@ -1919,7 +1933,8 @@ ssize_t core_alua_store_tg_pt_gp_info(
}
spin_lock(&lun->lun_tg_pt_gp_lock);
- tg_pt_gp = lun->lun_tg_pt_gp;
+ tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp,
+ lockdep_is_held(&lun->lun_tg_pt_gp_lock));
if (tg_pt_gp) {
/*
* Clearing an existing tg_pt_gp association, and replacing
@@ -1937,18 +1952,16 @@ ssize_t core_alua_store_tg_pt_gp_info(
&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id);
- __target_detach_tg_pt_gp(lun, tg_pt_gp);
- __target_attach_tg_pt_gp(lun,
+ target_swap_tg_pt_gp(lun, tg_pt_gp,
dev->t10_alua.default_tg_pt_gp);
spin_unlock(&lun->lun_tg_pt_gp_lock);
- return count;
+ goto sync_rcu;
}
- __target_detach_tg_pt_gp(lun, tg_pt_gp);
move = 1;
}
- __target_attach_tg_pt_gp(lun, tg_pt_gp_new);
+ target_swap_tg_pt_gp(lun, tg_pt_gp, tg_pt_gp_new);
spin_unlock(&lun->lun_tg_pt_gp_lock);
pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
" Target Port Group: alua/%s, ID: %hu\n", (move) ?
@@ -1959,6 +1972,8 @@ ssize_t core_alua_store_tg_pt_gp_info(
tg_pt_gp_new->tg_pt_gp_id);
core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+sync_rcu:
+ synchronize_rcu();
return count;
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 023bd4516a68..4c86697fe4ec 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -490,6 +490,7 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
* fabric driver unload of TFO->module to proceed.
*/
rcu_barrier();
+ kfree(t->tf_tpg_base_cit.ct_attrs);
kfree(t);
return;
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8cb1fa0c0585..44bb380e7390 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -772,6 +772,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
spin_lock_init(&dev->t10_alua.lba_map_lock);
+ INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
+
dev->t10_wwn.t10_dev = dev;
/*
* Use OpenFabrics IEEE Company ID: 00 14 05
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index fc7edc04ee09..0b65de9f2df1 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -815,8 +815,76 @@ static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
.release = target_fabric_tpg_release,
};
-TF_CIT_SETUP_DRV(tpg_base, &target_fabric_tpg_base_item_ops, NULL);
+static ssize_t target_fabric_tpg_base_enable_show(struct config_item *item,
+ char *page)
+{
+ return sysfs_emit(page, "%d\n", to_tpg(item)->enabled);
+}
+
+static ssize_t target_fabric_tpg_base_enable_store(struct config_item *item,
+ const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ int ret;
+ bool op;
+
+ ret = strtobool(page, &op);
+ if (ret)
+ return ret;
+
+ if (se_tpg->enabled == op)
+ return count;
+
+ ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, op);
+ if (ret)
+ return ret;
+
+ se_tpg->enabled = op;
+
+ return count;
+}
+
+CONFIGFS_ATTR(target_fabric_tpg_base_, enable);
+static int
+target_fabric_setup_tpg_base_cit(struct target_fabric_configfs *tf)
+{
+ struct config_item_type *cit = &tf->tf_tpg_base_cit;
+ struct configfs_attribute **attrs = NULL;
+ size_t nr_attrs = 0;
+ int i = 0;
+
+ if (tf->tf_ops->tfc_tpg_base_attrs)
+ while (tf->tf_ops->tfc_tpg_base_attrs[nr_attrs] != NULL)
+ nr_attrs++;
+
+ if (tf->tf_ops->fabric_enable_tpg)
+ nr_attrs++;
+
+ if (nr_attrs == 0)
+ goto done;
+
+ /* + 1 for final NULL in the array */
+ attrs = kcalloc(nr_attrs + 1, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ if (tf->tf_ops->tfc_tpg_base_attrs)
+ for (; tf->tf_ops->tfc_tpg_base_attrs[i] != NULL; i++)
+ attrs[i] = tf->tf_ops->tfc_tpg_base_attrs[i];
+
+ if (tf->tf_ops->fabric_enable_tpg)
+ attrs[i] = &target_fabric_tpg_base_attr_enable;
+
+done:
+ cit->ct_item_ops = &target_fabric_tpg_base_item_ops;
+ cit->ct_attrs = attrs;
+ cit->ct_owner = tf->tf_ops->module;
+ pr_debug("Setup generic tpg_base\n");
+
+ return 0;
+}
/* End of tfc_tpg_base_cit */
/* Start of tfc_tpg_cit */
@@ -1028,12 +1096,18 @@ TF_CIT_SETUP_DRV(discovery, NULL, NULL);
int target_fabric_setup_cits(struct target_fabric_configfs *tf)
{
+ int ret;
+
target_fabric_setup_discovery_cit(tf);
target_fabric_setup_wwn_cit(tf);
target_fabric_setup_wwn_fabric_stats_cit(tf);
target_fabric_setup_wwn_param_cit(tf);
target_fabric_setup_tpg_cit(tf);
- target_fabric_setup_tpg_base_cit(tf);
+
+ ret = target_fabric_setup_tpg_base_cit(tf);
+ if (ret)
+ return ret;
+
target_fabric_setup_tpg_port_cit(tf);
target_fabric_setup_tpg_port_stat_cit(tf);
target_fabric_setup_tpg_lun_cit(tf);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index b1ef041cacd8..bf8ae4825a06 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -636,12 +636,10 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
- char buf[BDEVNAME_SIZE];
ssize_t bl = 0;
if (bd)
- bl += sprintf(b + bl, "iBlock device: %s",
- bdevname(bd, buf));
+ bl += sprintf(b + bl, "iBlock device: %pg", bd);
if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
bl += sprintf(b + bl, " UDEV PATH: %s",
ib_dev->ibd_udev_path);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index a343bcfa2180..a889a6237d9c 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -151,6 +151,7 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
void transport_clear_lun_ref(struct se_lun *);
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
+void target_do_delayed_work(struct work_struct *work);
bool target_check_wce(struct se_device *dev);
bool target_check_fua(struct se_device *dev);
void __target_execute_cmd(struct se_cmd *, bool);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 14c6f2bb1b01..4a2e749eb182 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1511,10 +1511,10 @@ target_cmd_parse_cdb(struct se_cmd *cmd)
ret = dev->transport->parse_cdb(cmd);
if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
- pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
- cmd->se_tfo->fabric_name,
- cmd->se_sess->se_node_acl->initiatorname,
- cmd->t_task_cdb[0]);
+ pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
+ cmd->se_tfo->fabric_name,
+ cmd->se_sess->se_node_acl->initiatorname,
+ cmd->t_task_cdb[0]);
if (ret)
return ret;
@@ -2173,32 +2173,39 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
*/
switch (cmd->sam_task_attr) {
case TCM_HEAD_TAG:
+ atomic_inc_mb(&dev->non_ordered);
pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
cmd->t_task_cdb[0]);
return false;
case TCM_ORDERED_TAG:
- atomic_inc_mb(&dev->dev_ordered_sync);
+ atomic_inc_mb(&dev->delayed_cmd_count);
pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
cmd->t_task_cdb[0]);
-
- /*
- * Execute an ORDERED command if no other older commands
- * exist that need to be completed first.
- */
- if (!atomic_read(&dev->simple_cmds))
- return false;
break;
default:
/*
* For SIMPLE and UNTAGGED Task Attribute commands
*/
- atomic_inc_mb(&dev->simple_cmds);
+ atomic_inc_mb(&dev->non_ordered);
+
+ if (atomic_read(&dev->delayed_cmd_count) == 0)
+ return false;
break;
}
- if (atomic_read(&dev->dev_ordered_sync) == 0)
- return false;
+ if (cmd->sam_task_attr != TCM_ORDERED_TAG) {
+ atomic_inc_mb(&dev->delayed_cmd_count);
+ /*
+ * We will account for this when we dequeue from the delayed
+ * list.
+ */
+ atomic_dec_mb(&dev->non_ordered);
+ }
+
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~CMD_T_SENT;
+ spin_unlock_irq(&cmd->t_state_lock);
spin_lock(&dev->delayed_cmd_lock);
list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
@@ -2206,6 +2213,12 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
cmd->t_task_cdb[0], cmd->sam_task_attr);
+ /*
+ * We may have no non ordered cmds when this function started or we
+ * could have raced with the last simple/head cmd completing, so kick
+ * the delayed handler here.
+ */
+ schedule_work(&dev->delayed_cmd_work);
return true;
}
@@ -2228,12 +2241,8 @@ void target_execute_cmd(struct se_cmd *cmd)
if (target_write_prot_action(cmd))
return;
- if (target_handle_task_attr(cmd)) {
- spin_lock_irq(&cmd->t_state_lock);
- cmd->transport_state &= ~CMD_T_SENT;
- spin_unlock_irq(&cmd->t_state_lock);
+ if (target_handle_task_attr(cmd))
return;
- }
__target_execute_cmd(cmd, true);
}
@@ -2243,29 +2252,48 @@ EXPORT_SYMBOL(target_execute_cmd);
* Process all commands up to the last received ORDERED task attribute which
* requires another blocking boundary
*/
-static void target_restart_delayed_cmds(struct se_device *dev)
+void target_do_delayed_work(struct work_struct *work)
{
- for (;;) {
+ struct se_device *dev = container_of(work, struct se_device,
+ delayed_cmd_work);
+
+ spin_lock(&dev->delayed_cmd_lock);
+ while (!dev->ordered_sync_in_progress) {
struct se_cmd *cmd;
- spin_lock(&dev->delayed_cmd_lock);
- if (list_empty(&dev->delayed_cmd_list)) {
- spin_unlock(&dev->delayed_cmd_lock);
+ if (list_empty(&dev->delayed_cmd_list))
break;
- }
cmd = list_entry(dev->delayed_cmd_list.next,
struct se_cmd, se_delayed_node);
+
+ if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
+ /*
+ * Check if we started with:
+ * [ordered] [simple] [ordered]
+ * and we are now at the last ordered so we have to wait
+ * for the simple cmd.
+ */
+ if (atomic_read(&dev->non_ordered) > 0)
+ break;
+
+ dev->ordered_sync_in_progress = true;
+ }
+
list_del(&cmd->se_delayed_node);
+ atomic_dec_mb(&dev->delayed_cmd_count);
spin_unlock(&dev->delayed_cmd_lock);
+ if (cmd->sam_task_attr != TCM_ORDERED_TAG)
+ atomic_inc_mb(&dev->non_ordered);
+
cmd->transport_state |= CMD_T_SENT;
__target_execute_cmd(cmd, true);
- if (cmd->sam_task_attr == TCM_ORDERED_TAG)
- break;
+ spin_lock(&dev->delayed_cmd_lock);
}
+ spin_unlock(&dev->delayed_cmd_lock);
}
/*
@@ -2283,14 +2311,17 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
goto restart;
if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
- atomic_dec_mb(&dev->simple_cmds);
+ atomic_dec_mb(&dev->non_ordered);
dev->dev_cur_ordered_id++;
} else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
+ atomic_dec_mb(&dev->non_ordered);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
dev->dev_cur_ordered_id);
} else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
- atomic_dec_mb(&dev->dev_ordered_sync);
+ spin_lock(&dev->delayed_cmd_lock);
+ dev->ordered_sync_in_progress = false;
+ spin_unlock(&dev->delayed_cmd_lock);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
@@ -2299,7 +2330,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
restart:
- target_restart_delayed_cmds(dev);
+ if (atomic_read(&dev->delayed_cmd_count) > 0)
+ schedule_work(&dev->delayed_cmd_work);
}
static void transport_complete_qf(struct se_cmd *cmd)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9f552f48084c..7b2a89a67cdb 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -523,8 +523,8 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
rcu_read_unlock();
for (i = cnt; i < page_cnt; i++) {
- /* try to get new page from the mm */
- page = alloc_page(GFP_NOIO);
+ /* try to get new zeroed page from the mm */
+ page = alloc_page(GFP_NOIO | __GFP_ZERO);
if (!page)
break;
@@ -1255,7 +1255,6 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
{
int i = 0, cmd_cnt = 0;
bool unqueued = false;
- uint16_t *cmd_ids = NULL;
struct tcmu_cmd *cmd;
struct se_cmd *se_cmd;
struct tcmu_tmr *tmr;
@@ -1292,7 +1291,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
- tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO);
+ tmr = kmalloc(struct_size(tmr, tmr_cmd_ids, cmd_cnt), GFP_NOIO);
if (!tmr)
goto unlock;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index d4fe7cb2bd00..6bb20aa9c5bc 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -295,8 +295,7 @@ out:
return -EINVAL;
}
-static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
- unsigned char *p)
+static int target_xcopy_parse_segdesc_02(struct xcopy_op *xop, unsigned char *p)
{
unsigned char *desc = p;
int dc = (desc[1] & 0x02);
@@ -332,9 +331,9 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
return 0;
}
-static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
- struct xcopy_op *xop, unsigned char *p,
- unsigned int sdll, sense_reason_t *sense_ret)
+static int target_xcopy_parse_segment_descriptors(struct xcopy_op *xop,
+ unsigned char *p, unsigned int sdll,
+ sense_reason_t *sense_ret)
{
unsigned char *desc = p;
unsigned int start = 0;
@@ -362,7 +361,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
*/
switch (desc[0]) {
case 0x02:
- rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
+ rc = target_xcopy_parse_segdesc_02(xop, desc);
if (rc < 0)
goto out;
@@ -840,8 +839,7 @@ static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop)
*/
seg_desc = &p[16] + tdll;
- rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc,
- sdll, &ret);
+ rc = target_xcopy_parse_segment_descriptors(xop, seg_desc, sdll, &ret);
if (rc <= 0)
goto out;
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index de161ee0b1f9..8e17ac831be0 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1495,42 +1495,24 @@ static struct configfs_attribute *usbg_wwn_attrs[] = {
NULL,
};
-static ssize_t tcm_usbg_tpg_enable_show(struct config_item *item, char *page)
-{
- struct se_portal_group *se_tpg = to_tpg(item);
- struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
-
- return snprintf(page, PAGE_SIZE, "%u\n", tpg->gadget_connect);
-}
-
static int usbg_attach(struct usbg_tpg *);
static void usbg_detach(struct usbg_tpg *);
-static ssize_t tcm_usbg_tpg_enable_store(struct config_item *item,
- const char *page, size_t count)
+static int usbg_enable_tpg(struct se_portal_group *se_tpg, bool enable)
{
- struct se_portal_group *se_tpg = to_tpg(item);
struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
- bool op;
- ssize_t ret;
-
- ret = strtobool(page, &op);
- if (ret)
- return ret;
-
- if ((op && tpg->gadget_connect) || (!op && !tpg->gadget_connect))
- return -EINVAL;
+ int ret = 0;
- if (op)
+ if (enable)
ret = usbg_attach(tpg);
else
usbg_detach(tpg);
if (ret)
return ret;
- tpg->gadget_connect = op;
+ tpg->gadget_connect = enable;
- return count;
+ return 0;
}
static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page)
@@ -1673,11 +1655,9 @@ static ssize_t tcm_usbg_tpg_nexus_store(struct config_item *item,
return count;
}
-CONFIGFS_ATTR(tcm_usbg_tpg_, enable);
CONFIGFS_ATTR(tcm_usbg_tpg_, nexus);
static struct configfs_attribute *usbg_base_attrs[] = {
- &tcm_usbg_tpg_attr_enable,
&tcm_usbg_tpg_attr_nexus,
NULL,
};
@@ -1730,6 +1710,7 @@ static const struct target_core_fabric_ops usbg_ops = {
.fabric_make_wwn = usbg_make_tport,
.fabric_drop_wwn = usbg_drop_tport,
.fabric_make_tpg = usbg_make_tpg,
+ .fabric_enable_tpg = usbg_enable_tpg,
.fabric_drop_tpg = usbg_drop_tpg,
.fabric_post_link = usbg_port_link,
.fabric_pre_unlink = usbg_port_unlink,
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 59b02a539963..b8dc6fa6a5a3 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -561,10 +561,9 @@ mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc)
desc->context.data_pipe = pipe;
}
-
-static int
-mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback)
+static int mts_scsi_queuecommand_lck(struct scsi_cmnd *srb)
{
+ mts_scsi_cmnd_callback callback = scsi_done;
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
int res;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index e5a971b83e3f..8931df5a85fd 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -363,9 +363,9 @@ static int target_alloc(struct scsi_target *starget)
/* queue a command */
/* This is always called with scsi_lock(host) held */
-static int queuecommand_lck(struct scsi_cmnd *srb,
- void (*done)(struct scsi_cmnd *))
+static int queuecommand_lck(struct scsi_cmnd *srb)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct us_data *us = host_to_us(srb->device->host);
/* check for state-transition errors */
@@ -393,7 +393,6 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
}
/* enqueue the command and wake up the control thread */
- srb->scsi_done = done;
us->srb = srb;
complete(&us->cmnd_ready);
@@ -588,11 +587,13 @@ static ssize_t max_sectors_store(struct device *dev, struct device_attribute *at
}
static DEVICE_ATTR_RW(max_sectors);
-static struct device_attribute *sysfs_device_attr_list[] = {
- &dev_attr_max_sectors,
+static struct attribute *usb_sdev_attrs[] = {
+ &dev_attr_max_sectors.attr,
NULL,
};
+ATTRIBUTE_GROUPS(usb_sdev);
+
/*
* this defines our host template, with which we'll allocate hosts
*/
@@ -653,7 +654,7 @@ static const struct scsi_host_template usb_stor_host_template = {
.skip_settle_delay = 1,
/* sysfs device attributes */
- .sdev_attrs = sysfs_device_attr_list,
+ .sdev_groups = usb_sdev_groups,
/* module management */
.module = THIS_MODULE
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index bef89c6bd1d7..7f2944729ecd 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -256,7 +256,7 @@ static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
return -EBUSY;
devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL;
uas_free_unsubmitted_urbs(cmnd);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return 0;
}
@@ -633,8 +633,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
return 0;
}
-static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
- void (*done)(struct scsi_cmnd *))
+static int uas_queuecommand_lck(struct scsi_cmnd *cmnd)
{
struct scsi_device *sdev = cmnd->device;
struct uas_dev_info *devinfo = sdev->hostdata;
@@ -653,7 +652,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
memcpy(cmnd->sense_buffer, usb_stor_sense_invalidCDB,
sizeof(usb_stor_sense_invalidCDB));
cmnd->result = SAM_STAT_CHECK_CONDITION;
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return 0;
}
@@ -661,7 +660,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
if (devinfo->resetting) {
set_host_byte(cmnd, DID_ERROR);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
goto zombie;
}
@@ -675,8 +674,6 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
return SCSI_MLQUEUE_DEVICE_BUSY;
}
- cmnd->scsi_done = done;
-
memset(cmdinfo, 0, sizeof(*cmdinfo));
cmdinfo->uas_tag = idx + 1; /* uas-tag == usb-stream-id, so 1 based */
cmdinfo->state = SUBMIT_STATUS_URB | ALLOC_CMD_URB | SUBMIT_CMD_URB;
@@ -706,7 +703,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
*/
if (err == -ENODEV) {
set_host_byte(cmnd, DID_ERROR);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
goto zombie;
}
if (err) {
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 90aa9c12ffac..8b543f2c9857 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -388,7 +388,7 @@ static int usb_stor_control_thread(void * __us)
if (srb->result == DID_ABORT << 16) {
SkipForAbort:
usb_stor_dbg(us, "scsi command aborted\n");
- srb = NULL; /* Don't call srb->scsi_done() */
+ srb = NULL; /* Don't call scsi_done() */
}
/*
@@ -417,7 +417,7 @@ SkipForAbort:
if (srb) {
usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
srb->result);
- srb->scsi_done(srb);
+ scsi_done(srb);
}
} /* for (;;) */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 236ec689056a..2884383f1718 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1403,7 +1403,7 @@ extern int ata_link_nr_enabled(struct ata_link *link);
*/
extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops;
-extern struct device_attribute *ata_common_sdev_attrs[];
+extern const struct attribute_group *ata_common_sdev_groups[];
/*
* All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated
@@ -1433,14 +1433,14 @@ extern struct device_attribute *ata_common_sdev_attrs[];
#define ATA_BASE_SHT(drv_name) \
ATA_SUBBASE_SHT(drv_name), \
- .sdev_attrs = ata_common_sdev_attrs
+ .sdev_groups = ata_common_sdev_groups
#ifdef CONFIG_SATA_HOST
-extern struct device_attribute *ata_ncq_sdev_attrs[];
+extern const struct attribute_group *ata_ncq_sdev_groups[];
#define ATA_NCQ_SHT(drv_name) \
ATA_SUBBASE_SHT(drv_name), \
- .sdev_attrs = ata_ncq_sdev_attrs, \
+ .sdev_groups = ata_ncq_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth
#endif
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 6fe125a71b60..79e4903bd414 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -664,6 +664,7 @@ extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates);
int sas_phy_reset(struct sas_phy *phy, int hard_reset);
+int sas_phy_enable(struct sas_phy *phy, int enable);
extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
extern int sas_target_alloc(struct scsi_target *);
extern int sas_slave_configure(struct scsi_device *);
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 31078063afac..cd9eaa83432b 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -10,7 +10,6 @@
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
#include <scsi/scsi_request.h>
struct Scsi_Host;
@@ -65,6 +64,12 @@ struct scsi_pointer {
#define SCMD_STATE_COMPLETE 0
#define SCMD_STATE_INFLIGHT 1
+enum scsi_cmnd_submitter {
+ SUBMITTED_BY_BLOCK_LAYER = 0,
+ SUBMITTED_BY_SCSI_ERROR_HANDLER = 1,
+ SUBMITTED_BY_SCSI_RESET_IOCTL = 2,
+} __packed;
+
struct scsi_cmnd {
struct scsi_request req;
struct scsi_device *device;
@@ -90,6 +95,7 @@ struct scsi_cmnd {
unsigned char prot_op;
unsigned char prot_type;
unsigned char prot_flags;
+ enum scsi_cmnd_submitter submitter;
unsigned short cmd_len;
enum dma_data_direction sc_data_direction;
@@ -117,10 +123,6 @@ struct scsi_cmnd {
* command (auto-sense). Length must be
* SCSI_SENSE_BUFFERSIZE bytes. */
- /* Low-level done function - can be used by low-level driver to point
- * to completion function. Not used by mid/upper level code. */
- void (*scsi_done) (struct scsi_cmnd *);
-
/*
* The following fields can be written to by the host specific code.
* Everything else should be left alone.
@@ -165,6 +167,8 @@ static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
return *(struct scsi_driver **)rq->rq_disk->private_data;
}
+void scsi_done(struct scsi_cmnd *cmd);
+
extern void scsi_finish_command(struct scsi_cmnd *cmd);
extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 430b73bd02ac..79c3045611fa 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -225,6 +225,12 @@ struct scsi_device {
struct device sdev_gendev,
sdev_dev;
+ /*
+ * The array size 6 provides space for one attribute group for the
+ * SCSI core, four attribute groups defined by SCSI LLDs and one
+ * terminating NULL pointer.
+ */
+ const struct attribute_group *gendev_attr_groups[6];
struct execute_work ew; /* used to get process context on put */
struct work_struct requeue_work;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 75363707b73f..ae715959f886 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -474,14 +474,9 @@ struct scsi_host_template {
#define SCSI_DEFAULT_HOST_BLOCKED 7
/*
- * Pointer to the sysfs class properties for this host, NULL terminated.
+ * Pointer to the SCSI host sysfs attribute groups, NULL terminated.
*/
- struct device_attribute **shost_attrs;
-
- /*
- * Pointer to the SCSI device properties for this host, NULL terminated.
- */
- struct device_attribute **sdev_attrs;
+ const struct attribute_group **shost_groups;
/*
* Pointer to the SCSI device attribute groups for this host,
@@ -516,7 +511,7 @@ struct scsi_host_template {
unsigned long irq_flags; \
int rc; \
spin_lock_irqsave(shost->host_lock, irq_flags); \
- rc = func_name##_lck (cmd, cmd->scsi_done); \
+ rc = func_name##_lck(cmd); \
spin_unlock_irqrestore(shost->host_lock, irq_flags); \
return rc; \
}
@@ -695,6 +690,12 @@ struct Scsi_Host {
/* ldm bits */
struct device shost_gendev, shost_dev;
+ /*
+ * The array size 3 provides space for one attribute group defined by
+ * the SCSI core, one attribute group defined by the SCSI LLD and one
+ * terminating NULL pointer.
+ */
+ const struct attribute_group *shost_dev_attr_groups[3];
/*
* Points to the transport data (if any) which is allocated
@@ -798,16 +799,6 @@ void scsi_host_busy_iter(struct Scsi_Host *,
struct class_container;
/*
- * These two functions are used to allocate and free a pseudo device
- * which will connect to the host adapter itself rather than any
- * physical device. You must deallocate when you are done with the
- * thing. This physical pseudo-device isn't real and won't be available
- * from any high-level drivers.
- */
-extern void scsi_free_host_dev(struct scsi_device *);
-extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
-
-/*
* DIF defines the exchange of protection information between
* initiator and SBC block device.
*
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 05ec927a3c72..0e75b9277c8c 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -41,6 +41,7 @@ enum sas_linkrate {
SAS_LINK_RATE_G2 = SAS_LINK_RATE_3_0_GBPS,
SAS_LINK_RATE_6_0_GBPS = 10,
SAS_LINK_RATE_12_0_GBPS = 11,
+ SAS_LINK_RATE_22_5_GBPS = 12,
/* These are virtual to the transport class and may never
* be signalled normally since the standard defined field
* is only 4 bits */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index fb11c7693b25..c2b36f7d917d 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -749,7 +749,7 @@ struct se_lun {
/* ALUA target port group linkage */
struct list_head lun_tg_pt_gp_link;
- struct t10_alua_tg_pt_gp *lun_tg_pt_gp;
+ struct t10_alua_tg_pt_gp __rcu *lun_tg_pt_gp;
spinlock_t lun_tg_pt_gp_lock;
struct se_portal_group *lun_tpg;
@@ -812,8 +812,9 @@ struct se_device {
atomic_long_t read_bytes;
atomic_long_t write_bytes;
/* Active commands on this virtual SE device */
- atomic_t simple_cmds;
- atomic_t dev_ordered_sync;
+ atomic_t non_ordered;
+ bool ordered_sync_in_progress;
+ atomic_t delayed_cmd_count;
atomic_t dev_qf_count;
u32 export_count;
spinlock_t delayed_cmd_lock;
@@ -834,6 +835,7 @@ struct se_device {
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
struct work_struct qf_work_queue;
+ struct work_struct delayed_cmd_work;
struct list_head delayed_cmd_list;
struct list_head qf_cmd_list;
/* Pointer to associated SE HBA */
@@ -900,6 +902,7 @@ struct se_portal_group {
* Negative values can be used by fabric drivers for internal use TPGs.
*/
int proto_id;
+ bool enabled;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t tpg_pr_ref_count;
/* Spinlock for adding/removing ACLed Nodes */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 3c5ade7a04a6..38f0662476d1 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -89,6 +89,7 @@ struct target_core_fabric_ops {
void (*add_wwn_groups)(struct se_wwn *);
struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
const char *);
+ int (*fabric_enable_tpg)(struct se_portal_group *se_tpg, bool enable);
void (*fabric_drop_tpg)(struct se_portal_group *);
int (*fabric_post_link)(struct se_portal_group *,
struct se_lun *);