summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-sas.c3
-rw-r--r--drivers/scsi/3w-xxxx.c2
-rw-r--r--drivers/scsi/BusLogic.c4
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/a2091.c2
-rw-r--r--drivers/scsi/a3000.c2
-rw-r--r--drivers/scsi/aacraid/linit.c5
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1542.c5
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/am53c974.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c24
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/arm/arxescsi.c2
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/cumana_2.c2
-rw-r--r--drivers/scsi/arm/eesox.c2
-rw-r--r--drivers/scsi/arm/oak.c2
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/atp870u.c4
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c29
-rw-r--r--drivers/scsi/be2iscsi/be_main.h1
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c20
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c2
-rw-r--r--drivers/scsi/bfa/bfa_svc.c2
-rw-r--r--drivers/scsi/bfa/bfad.c16
-rw-r--r--drivers/scsi/bfa/bfad_attr.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c4
-rw-r--r--drivers/scsi/bfa/bfad_drv.h1
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c4
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h2
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2
-rw-r--r--drivers/scsi/cxlflash/vlun.c2
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c6
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c20
-rw-r--r--drivers/scsi/elx/efct/efct_xport.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/esp_scsi.c2
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c8
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c6
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c2
-rw-r--r--drivers/scsi/fnic/fnic_trace.c17
-rw-r--r--drivers/scsi/g_NCR5380.c4
-rw-r--r--drivers/scsi/gvp11.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c152
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c10
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c10
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c222
-rw-r--r--drivers/scsi/hosts.c7
-rw-r--r--drivers/scsi/hpsa.c9
-rw-r--r--drivers/scsi/hptiop.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c30
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c790
-rw-r--r--drivers/scsi/ipr.h64
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c7
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c29
-rw-r--r--drivers/scsi/libsas/sas_expander.c124
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c106
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c80
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c63
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c50
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c487
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/mac_esp.c2
-rw-r--r--drivers/scsi/megaraid.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h20
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c7
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h112
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h23
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h6
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h4
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h20
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c9
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_debug.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c98
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c111
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c24
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c20
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c14
-rw-r--r--drivers/scsi/mvme147.c2
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvumi.c2
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/scsi/myrs.c2
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c7
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c34
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h1
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c126
-rw-r--r--drivers/scsi/pmcraid.c4
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/ps3rom.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c7
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c3
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c267
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c33
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c65
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c162
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c18
-rw-r--r--drivers/scsi/qlogicpti.c13
-rw-r--r--drivers/scsi/scsi.c14
-rw-r--r--drivers/scsi/scsi_debug.c1005
-rw-r--r--drivers/scsi/scsi_devinfo.c4
-rw-r--r--drivers/scsi/scsi_error.c16
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_sysctl.c16
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/scsi_transport_fc.c3
-rw-r--r--drivers/scsi/sd.c8
-rw-r--r--drivers/scsi/sd_zbc.c8
-rw-r--r--drivers/scsi/ses.c26
-rw-r--r--drivers/scsi/sg.c10
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/smartpqi/Kconfig2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h6
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c286
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c34
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h2
-rw-r--r--drivers/scsi/snic/snic_main.c2
-rw-r--r--drivers/scsi/snic/snic_scsi.c7
-rw-r--r--drivers/scsi/sr.c7
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/storvsc_drv.c16
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/sun_esp.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/scsi/wd719x.c2
-rw-r--r--drivers/scsi/xen-scsifront.c2
-rw-r--r--drivers/scsi/zorro_esp.c2
192 files changed, 2737 insertions, 2819 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 6cb9cca9565b..38d20a69ee12 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1976,8 +1976,7 @@ static int twa_slave_configure(struct scsi_device *sdev)
return 0;
} /* End twa_slave_configure() */
-/* scsi_host_template initializer */
-static struct scsi_host_template driver_template = {
+static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "3ware 9000 Storage Controller",
.queuecommand = twa_scsi_queue,
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index f41c93454f0c..55989eaa2d9f 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1530,8 +1530,7 @@ static int twl_slave_configure(struct scsi_device *sdev)
return 0;
} /* End twl_slave_configure() */
-/* scsi_host_template initializer */
-static struct scsi_host_template driver_template = {
+static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "3w-sas",
.queuecommand = twl_scsi_queue,
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index ffdecb12d654..36c34ced0cc1 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2229,7 +2229,7 @@ static int tw_slave_configure(struct scsi_device *sdev)
return 0;
} /* End tw_slave_configure() */
-static struct scsi_host_template driver_template = {
+static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "3ware Storage Controller",
.queuecommand = tw_scsi_queue,
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index f7b7ffda1161..72ceaf650b0d 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -54,7 +54,7 @@
#define FAILURE (-1)
#endif
-static struct scsi_host_template blogic_template;
+static const struct scsi_host_template blogic_template;
/*
blogic_drvr_options_count is a count of the number of BusLogic Driver
@@ -3663,7 +3663,7 @@ static int __init blogic_parseopts(char *options)
Get it all started
*/
-static struct scsi_host_template blogic_template = {
+static const struct scsi_host_template blogic_template = {
.module = THIS_MODULE,
.proc_name = "BusLogic",
.write_info = blogic_write_info,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 03e71e3d5e5b..0704809d9d99 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -971,8 +971,7 @@ config SCSI_SYM53C8XX_MMIO
config SCSI_IPR
tristate "IBM Power Linux RAID adapter support"
- depends on PCI && SCSI && ATA
- select SATA_HOST
+ depends on PCI && SCSI
select FW_LOADER
select IRQ_POLL
select SGL_ALLOC
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index d02eb5b213d0..b95147fb18b0 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1065,7 +1065,7 @@ static irqreturn_t inia100_intr(int irqno, void *devid)
return res;
}
-static struct scsi_host_template inia100_template = {
+static const struct scsi_host_template inia100_template = {
.proc_name = "inia100",
.name = inia100_REVID,
.queuecommand = inia100_queue,
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 74312400468b..204448bfd04b 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -180,7 +180,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
}
}
-static struct scsi_host_template a2091_scsi_template = {
+static const struct scsi_host_template a2091_scsi_template = {
.module = THIS_MODULE,
.name = "Commodore A2091/A590 SCSI",
.show_info = wd33c93_show_info,
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 2c5cb1a02e86..c3028726bbe4 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -197,7 +197,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
}
}
-static struct scsi_host_template amiga_a3000_scsi_template = {
+static const struct scsi_host_template amiga_a3000_scsi_template = {
.module = THIS_MODULE,
.name = "Amiga 3000 built-in SCSI",
.show_info = wd33c93_show_info,
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 5ba5c18b77b4..68f4dbcfff49 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
@@ -1476,7 +1475,7 @@ static const struct file_operations aac_cfg_fops = {
.llseek = noop_llseek,
};
-static struct scsi_host_template aac_driver_template = {
+static const struct scsi_host_template aac_driver_template = {
.module = THIS_MODULE,
.name = "AAC",
.proc_name = AAC_DRIVERNAME,
@@ -1783,7 +1782,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
aac_scan_host(aac);
- pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
return 0;
@@ -1949,7 +1947,6 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
scsi_host_complete_all_commands(shost, DID_NO_CONNECT);
aac_release_resources(aac);
- pci_disable_pcie_error_reporting(pdev);
aac_adapter_ioremap(aac, 0);
return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index f301aec044bb..ab066bb27a57 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -10602,7 +10602,7 @@ static int AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
}
#endif
-static struct scsi_host_template advansys_template = {
+static const struct scsi_host_template advansys_template = {
.proc_name = DRV_NAME,
#ifdef CONFIG_PROC_FS
.show_info = advansys_show_info,
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index caeebfb67149..055adb349b0e 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -400,7 +400,7 @@ MODULE_DEVICE_TABLE(isapnp, id_table);
#endif /* !AHA152X_PCMCIA */
-static struct scsi_host_template aha152x_driver_template;
+static const struct scsi_host_template aha152x_driver_template;
/*
* internal states of the host
@@ -2946,7 +2946,7 @@ static int aha152x_adjust_queue(struct scsi_device *device)
return 0;
}
-static struct scsi_host_template aha152x_driver_template = {
+static const struct scsi_host_template aha152x_driver_template = {
.module = THIS_MODULE,
.name = AHA152X_REVID,
.proc_name = "aha152x",
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 552ca95157da..9503996c6325 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -737,7 +737,8 @@ fail:
}
/* return non-zero on detection */
-static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct device *pdev, int indx)
+static struct Scsi_Host *aha1542_hw_init(const struct scsi_host_template *tpnt,
+ struct device *pdev, int indx)
{
unsigned int base_io = io[indx];
struct Scsi_Host *sh;
@@ -1031,7 +1032,7 @@ static int aha1542_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
return 0;
}
-static struct scsi_host_template driver_template = {
+static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.proc_name = "aha1542",
.name = "Adaptec 1542",
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 134255751819..3d18945abaf7 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -543,7 +543,7 @@ static int aha1740_eh_abort_handler (struct scsi_cmnd *dummy)
return SUCCESS;
}
-static struct scsi_host_template aha1740_template = {
+static const struct scsi_host_template aha1740_template = {
.module = THIS_MODULE,
.proc_name = "aha1740",
.show_info = aha1740_show_info,
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 954d0c5ae2e2..f7f81f6c3fbf 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -35,7 +35,7 @@ static struct scsi_transport_template *aic94xx_transport_template;
static int asd_scan_finished(struct Scsi_Host *, unsigned long);
static void asd_scan_start(struct Scsi_Host *);
-static struct scsi_host_template aic94xx_sht = {
+static const struct scsi_host_template aic94xx_sht = {
.module = THIS_MODULE,
/* .name is initialized */
.name = "aic94xx",
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index b69edb473295..fbb29dbb1e50 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -371,7 +371,7 @@ static void dc390_check_eeprom(struct esp *esp)
static int pci_esp_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- struct scsi_host_template *hostt = &scsi_esp_template;
+ const struct scsi_host_template *hostt = &scsi_esp_template;
int err = -ENODEV;
struct Scsi_Host *shost;
struct esp *esp;
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 07df255c4b1b..ed8d9319862a 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -45,11 +45,12 @@
#include <linux/interrupt.h>
struct device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
+#define ARCMSR_NAME "arcmsr"
#define ARCMSR_MAX_FREECCB_NUM 1024
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
#define ARCMSR_MIN_OUTSTANDING_CMD 32
-#define ARCMSR_DRIVER_VERSION "v1.50.00.05-20210429"
+#define ARCMSR_DRIVER_VERSION "v1.50.00.13-20230206"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index d3fb8a9c1c39..2cd12c7f06c6 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -57,7 +57,6 @@
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/circ_buf.h>
#include <asm/dma.h>
#include <asm/io.h>
@@ -152,8 +151,9 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_de
return scsi_change_queue_depth(sdev, queue_depth);
}
-static struct scsi_host_template arcmsr_scsi_host_template = {
+static const struct scsi_host_template arcmsr_scsi_host_template = {
.module = THIS_MODULE,
+ .proc_name = ARCMSR_NAME,
.name = "Areca SAS/SATA RAID driver",
.info = arcmsr_info,
.queuecommand = arcmsr_queue_command,
@@ -997,6 +997,8 @@ static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb)
if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) ||
dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64)))
goto dma32;
+ if (acb->adapter_type <= ACB_ADAPTER_TYPE_B)
+ return 0;
if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) ||
dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) {
printk("arcmsr: set DMA 64 mask failed\n");
@@ -1300,20 +1302,13 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
return rtnval;
}
-static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
-{
- struct scsi_cmnd *pcmd = ccb->pcmd;
-
- scsi_dma_unmap(pcmd);
-}
-
static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
{
struct AdapterControlBlock *acb = ccb->acb;
struct scsi_cmnd *pcmd = ccb->pcmd;
unsigned long flags;
atomic_dec(&acb->ccboutstandingcount);
- arcmsr_pci_unmap_dma(ccb);
+ scsi_dma_unmap(ccb->pcmd);
ccb->startdone = ARCMSR_CCB_DONE;
spin_lock_irqsave(&acb->ccblist_lock, flags);
list_add_tail(&ccb->list, &acb->ccb_free_list);
@@ -1597,7 +1592,7 @@ static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb)
ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
ccb->pcmd->result = DID_NO_CONNECT << 16;
- arcmsr_pci_unmap_dma(ccb);
+ scsi_dma_unmap(ccb->pcmd);
scsi_done(ccb->pcmd);
}
}
@@ -2260,8 +2255,11 @@ static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
spin_lock_irqsave(&acb->rqbuffer_lock, flags);
prbuffer = arcmsr_get_iop_rqbuffer(acb);
- buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) &
- (ARCMSR_MAX_QBUFFER - 1);
+ if (acb->rqbuf_putIndex >= acb->rqbuf_getIndex) {
+ buf_empty_len = (ARCMSR_MAX_QBUFFER - 1) -
+ (acb->rqbuf_putIndex - acb->rqbuf_getIndex);
+ } else
+ buf_empty_len = acb->rqbuf_getIndex - acb->rqbuf_putIndex - 1;
if (buf_empty_len >= readl(&prbuffer->data_len)) {
if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 7602639da9b3..0b046e4b395c 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2780,7 +2780,7 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
return 0;
}
-static struct scsi_host_template acornscsi_template = {
+static const struct scsi_host_template acornscsi_template = {
.module = THIS_MODULE,
.show_info = acornscsi_show_info,
.name = "AcornSCSI",
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 2527b542bcdd..925d0bd68aa5 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -238,7 +238,7 @@ arxescsi_show_info(struct seq_file *m, struct Scsi_Host *host)
return 0;
}
-static struct scsi_host_template arxescsi_template = {
+static const struct scsi_host_template arxescsi_template = {
.show_info = arxescsi_show_info,
.name = "ARXE SCSI card",
.info = arxescsi_info,
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 5d4f67ba74c0..d1a2a22ffe8c 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -211,7 +211,7 @@ static void cumanascsi_write(struct NCR5380_hostdata *hostdata,
#include "../NCR5380.c"
-static struct scsi_host_template cumanascsi_template = {
+static const struct scsi_host_template cumanascsi_template = {
.module = THIS_MODULE,
.name = "Cumana 16-bit SCSI",
.info = cumanascsi_info,
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index d15053f02472..c5d8f4313b31 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -356,7 +356,7 @@ static int cumanascsi_2_show_info(struct seq_file *m, struct Scsi_Host *host)
return 0;
}
-static struct scsi_host_template cumanascsi2_template = {
+static const struct scsi_host_template cumanascsi2_template = {
.module = THIS_MODULE,
.show_info = cumanascsi_2_show_info,
.write_info = cumanascsi_2_set_proc_info,
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 6f374af9f45f..b3ec7635bc72 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -473,7 +473,7 @@ static ssize_t eesoxscsi_store_term(struct device *dev, struct device_attribute
static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR,
eesoxscsi_show_term, eesoxscsi_store_term);
-static struct scsi_host_template eesox_template = {
+static const struct scsi_host_template eesox_template = {
.module = THIS_MODULE,
.show_info = eesoxscsi_show_info,
.write_info = eesoxscsi_set_proc_info,
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index f18a0620c808..d69245007096 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -100,7 +100,7 @@ printk("reading %p len %d\n", addr, len);
#include "../NCR5380.c"
-static struct scsi_host_template oakscsi_template = {
+static const struct scsi_host_template oakscsi_template = {
.module = THIS_MODULE,
.name = "Oak 16-bit SCSI",
.info = oakscsi_info,
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 7586d2a03812..3b5991427886 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -279,7 +279,7 @@ powertecscsi_store_term(struct device *dev, struct device_attribute *attr, const
static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR,
powertecscsi_show_term, powertecscsi_store_term);
-static struct scsi_host_template powertecscsi_template = {
+static const struct scsi_host_template powertecscsi_template = {
.module = THIS_MODULE,
.show_info = powertecscsi_show_info,
.write_info = powertecscsi_set_proc_info,
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 7143418d690f..2a748af269c2 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -40,7 +40,7 @@
#include "atp870u.h"
-static struct scsi_host_template atp870u_template;
+static const struct scsi_host_template atp870u_template;
static void send_s870(struct atp_unit *dev,unsigned char c);
static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip,
unsigned char lvdmode);
@@ -1726,7 +1726,7 @@ static void atp870u_remove (struct pci_dev *pdev)
}
MODULE_LICENSE("GPL");
-static struct scsi_host_template atp870u_template = {
+static const struct scsi_host_template atp870u_template = {
.module = THIS_MODULE,
.name = "atp870u" /* name */,
.proc_name = "atp870u",
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 69b1a80e3687..0b59b63bce79 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1173,7 +1173,6 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
int status;
unsigned int curr_pages;
- u32 internal_page_offset = 0;
u32 temp_num_pages = num_pages;
if (num_pages == 0xff)
@@ -1192,7 +1191,6 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
req->page_offset = page_offset;
be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
- internal_page_offset += req->num_pages;
page_offset += req->num_pages;
num_pages -= req->num_pages;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 50a577ac3bb4..e48f14ad6dfd 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -139,7 +139,7 @@ beiscsi_disp_param(_name)\
beiscsi_change_param(_name, _minval, _maxval, _defval)\
beiscsi_store_param(_name)\
beiscsi_init_param(_name, _minval, _maxval, _defval)\
-DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
beiscsi_##_name##_disp, beiscsi_##_name##_store)
/*
@@ -155,14 +155,14 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
"\t\t\t\tConfiguration Path : 0x20\n"
"\t\t\t\tiSCSI Protocol : 0x40\n");
-DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
-DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
-DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
-DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
-DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
- beiscsi_active_session_disp, NULL);
-DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
- beiscsi_free_session_disp, NULL);
+static DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
+static DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
+static DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
+static DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
+static DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
+ beiscsi_active_session_disp, NULL);
+static DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
+ beiscsi_free_session_disp, NULL);
static struct attribute *beiscsi_attrs[] = {
&dev_attr_beiscsi_log_enable.attr,
@@ -398,7 +398,7 @@ static const struct pci_device_id beiscsi_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
-static struct scsi_host_template beiscsi_sht = {
+static const struct scsi_host_template beiscsi_sht = {
.module = THIS_MODULE,
.name = "Emulex 10Gbe open-iscsi Initiator Driver",
.proc_name = DRV_NAME,
@@ -5545,13 +5545,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
goto disable_pci;
}
- /* Enable EEH reporting */
- ret = pci_enable_pcie_error_reporting(pcidev);
- if (ret)
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : PCIe Error Reporting "
- "Enabling Failed\n");
-
pci_save_state(pcidev);
/* Initialize Driver configuration Paramters */
@@ -5736,7 +5729,6 @@ free_hba:
pci_disable_msix(phba->pcidev);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
- pci_disable_pcie_error_reporting(pcidev);
pci_set_drvdata(pcidev, NULL);
disable_pci:
pci_release_regions(pcidev);
@@ -5779,7 +5771,6 @@ static void beiscsi_remove(struct pci_dev *pcidev)
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
- pci_disable_pcie_error_reporting(pcidev);
pci_set_drvdata(pcidev, NULL);
pci_release_regions(pcidev);
pci_disable_device(pcidev);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 98977c0700f1..71c95d144560 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -16,7 +16,6 @@
#include <linux/in.h>
#include <linux/ctype.h>
#include <linux/module.h>
-#include <linux/aer.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index df18d9d2af53..773c84af784c 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1134,7 +1134,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
rspnid->dap = s_id;
- strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
+ strscpy(rspnid->spn, name, sizeof(rspnid->spn));
rspnid->spn_len = (u8) strlen(rspnid->spn);
return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
@@ -1155,7 +1155,7 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
rsnn_nn->node_name = node_name;
- strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
+ strscpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index d2d396ca0e9a..5023c0ab4277 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -761,7 +761,7 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strlcpy(port_cfg->sym_name.symname, model,
+ strscpy(port_cfg->sym_name.symname, model,
BFA_SYMNAME_MAXLEN);
strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
BFA_SYMNAME_MAXLEN);
@@ -822,7 +822,7 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strlcpy(port_cfg->node_sym_name.symname, model,
+ strscpy(port_cfg->node_sym_name.symname, model,
BFA_SYMNAME_MAXLEN);
strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index b12afcc4b189..008afd817087 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -2642,10 +2642,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
hba_attr->fw_version);
- strlcpy(hba_attr->driver_version, (char *)driver_info->version,
+ strscpy(hba_attr->driver_version, (char *)driver_info->version,
sizeof(hba_attr->driver_version));
- strlcpy(hba_attr->os_name, driver_info->host_os_name,
+ strscpy(hba_attr->os_name, driver_info->host_os_name,
sizeof(hba_attr->os_name));
/*
@@ -2663,13 +2663,13 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
- strlcpy(hba_attr->node_sym_name.symname,
+ strscpy(hba_attr->node_sym_name.symname,
port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
strcpy(hba_attr->vendor_info, "QLogic");
hba_attr->num_ports =
cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
- strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
+ strscpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
}
@@ -2736,19 +2736,19 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
/*
* OS device Name
*/
- strlcpy(port_attr->os_device_name, driver_info->os_device_name,
+ strscpy(port_attr->os_device_name, driver_info->os_device_name,
sizeof(port_attr->os_device_name));
/*
* Host name
*/
- strlcpy(port_attr->host_name, driver_info->host_machine_name,
+ strscpy(port_attr->host_name, driver_info->host_machine_name,
sizeof(port_attr->host_name));
port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
- strlcpy(port_attr->port_sym_name.symname,
+ strscpy(port_attr->port_sym_name.symname,
bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
bfa_fcs_lport_get_attr(port, &lport_attr);
port_attr->port_type = cpu_to_be32(lport_attr.port_type);
@@ -3229,7 +3229,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
rsp_str[gmal_entry->len-1] = 0;
/* copy IP Address to fabric */
- strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
+ strscpy(bfa_fcs_lport_get_fabric_ipaddr(port),
gmal_entry->ip_addr,
BFA_FCS_FABRIC_IPADDR_SZ);
break;
@@ -4667,7 +4667,7 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
* to that of the base port.
*/
- strlcpy(symbl,
+ strscpy(symbl,
(char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
sizeof(symbl));
@@ -5194,7 +5194,7 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
* For Vports, we append the vport's port symbolic name
* to that of the base port.
*/
- strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
+ strscpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
sizeof(symbl));
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 5740302d83ac..e1ed1424fddb 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -2788,7 +2788,7 @@ void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
- strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strscpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 4e3cef02f10f..c9745c0b4eee 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -330,7 +330,7 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
lp.eid = event;
lp.log_type = BFA_PL_LOG_TYPE_STRING;
lp.misc = misc;
- strlcpy(lp.log_entry.string_log, log_str,
+ strscpy(lp.log_entry.string_log, log_str,
BFA_PL_STRING_LOG_SZ);
lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
bfa_plog_add(plog, &lp);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index e5aa982ffedc..62cb7a864fd5 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -738,9 +738,6 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
goto out_release_region;
}
- /* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
- pci_enable_pcie_error_reporting(pdev);
-
bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
@@ -801,8 +798,6 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
pci_iounmap(pdev, bfad->pci_bar0_kva);
pci_iounmap(pdev, bfad->pci_bar2_kva);
pci_release_regions(pdev);
- /* Disable PCIE Advanced Error Recovery (AER) */
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
@@ -970,19 +965,19 @@ bfad_start_ops(struct bfad_s *bfad) {
/* Fill the driver_info info to fcs*/
memset(&driver_info, 0, sizeof(driver_info));
- strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
+ strscpy(driver_info.version, BFAD_DRIVER_VERSION,
sizeof(driver_info.version));
if (host_name)
- strlcpy(driver_info.host_machine_name, host_name,
+ strscpy(driver_info.host_machine_name, host_name,
sizeof(driver_info.host_machine_name));
if (os_name)
- strlcpy(driver_info.host_os_name, os_name,
+ strscpy(driver_info.host_os_name, os_name,
sizeof(driver_info.host_os_name));
if (os_patch)
- strlcpy(driver_info.host_os_patch, os_patch,
+ strscpy(driver_info.host_os_patch, os_patch,
sizeof(driver_info.host_os_patch));
- strlcpy(driver_info.os_device_name, bfad->pci_name,
+ strscpy(driver_info.os_device_name, bfad->pci_name,
sizeof(driver_info.os_device_name));
/* FCS driver info init */
@@ -1562,7 +1557,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
if (restart_bfa(bfad) == -1)
goto out_disable_device;
- pci_enable_pcie_error_reporting(pdev);
dev_printk(KERN_WARNING, &pdev->dev,
"slot_reset completed flags: 0x%x!\n", bfad->bfad_flags);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 5a85401e9e2d..e96e4b6df265 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -834,7 +834,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
char symname[BFA_SYMNAME_MAXLEN];
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
- strlcpy(symname, port_attr.port_cfg.sym_name.symname,
+ strscpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
return sysfs_emit(buf, "%s\n", symname);
}
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 79d4f7ee5bcb..520f9152f3bf 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -119,7 +119,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
/* fill in driver attr info */
strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
- strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
+ strscpy(iocmd->ioc_attr.driver_attr.driver_ver,
BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
iocmd->ioc_attr.adapter_attr.fw_ver);
@@ -307,7 +307,7 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
iocmd->attr.port_type = port_attr.port_type;
iocmd->attr.loopback = port_attr.loopback;
iocmd->attr.authfail = port_attr.authfail;
- strlcpy(iocmd->attr.port_symname.symname,
+ strscpy(iocmd->attr.port_symname.symname,
port_attr.port_cfg.sym_name.symname,
sizeof(iocmd->attr.port_symname.symname));
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index eaee7c8bc2d2..7682cfa34265 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -30,7 +30,6 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
-#include <linux/aer.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index c335f7a188d2..a9d3d8562d3c 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -1046,7 +1046,7 @@ bfad_fc_host_init(struct bfad_im_port_s *im_port)
/* For fibre channel services type 0x20 */
fc_host_supported_fc4s(host)[7] = 1;
- strlcpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
+ strscpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
sprintf(fc_host_symbolic_name(host), "%s", symname);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index a3c800e04a2e..9971f32a663c 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -22,7 +22,7 @@
struct scsi_transport_template *bnx2i_scsi_xport_template;
struct iscsi_transport bnx2i_iscsi_transport;
-static struct scsi_host_template bnx2i_host_template;
+static const struct scsi_host_template bnx2i_host_template;
/*
* Global endpoint resource info
@@ -2250,7 +2250,7 @@ static umode_t bnx2i_attr_is_visible(int param_type, int param)
* 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
* used while registering with the scsi host and iSCSI transport module.
*/
-static struct scsi_host_template bnx2i_host_template = {
+static const struct scsi_host_template bnx2i_host_template = {
.module = THIS_MODULE,
.name = "QLogic Offload iSCSI Initiator",
.proc_name = "bnx2i",
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 72fe6df78bc5..ac648bb8f7e7 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -995,7 +995,7 @@ static int __init init_ch_module(void)
int rc;
printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n");
- ch_sysfs_class = class_create(THIS_MODULE, "scsi_changer");
+ ch_sysfs_class = class_create("scsi_changer");
if (IS_ERR(ch_sysfs_class)) {
rc = PTR_ERR(ch_sysfs_class);
return rc;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index ccbded3353bd..0c32faefad7c 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -38,7 +38,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/kdebug.h>
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index ff9d4287937a..ec6530240707 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -80,7 +80,7 @@ static struct cxgb3_client t3_client = {
.event_handler = cxgb3i_dev_event_handler,
};
-static struct scsi_host_template cxgb3i_host_template = {
+static const struct scsi_host_template cxgb3i_host_template = {
.module = THIS_MODULE,
.name = DRV_MODULE_NAME,
.proc_name = DRV_MODULE_NAME,
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index af281e271f88..abde60a50cf7 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -337,7 +337,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev)
EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
- unsigned int max_conns, struct scsi_host_template *sht,
+ unsigned int max_conns, const struct scsi_host_template *sht,
struct scsi_transport_template *stt)
{
struct cxgbi_hba *chba;
@@ -2314,9 +2314,9 @@ static int cxgbi_sock_tx_queue_up(struct cxgbi_sock *csk, struct sk_buff *skb)
frags++;
if (frags >= SKB_WR_LIST_SIZE) {
- pr_err("csk 0x%p, frags %u, %u,%u >%lu.\n",
+ pr_err("csk 0x%p, frags %u, %u,%u >%u.\n",
csk, skb_shinfo(skb)->nr_frags, skb->len,
- skb->data_len, SKB_WR_LIST_SIZE);
+ skb->data_len, (unsigned int)SKB_WR_LIST_SIZE);
return -EINVAL;
}
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index d8fc7beafa20..d92cf1dccc2f 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -591,7 +591,7 @@ struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *);
struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *,
int *);
int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int,
- struct scsi_host_template *,
+ const struct scsi_host_template *,
struct scsi_transport_template *);
void cxgbi_hbas_remove(struct cxgbi_device *);
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 395b00b942f7..debd36974119 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3880,7 +3880,7 @@ static int cxlflash_class_init(void)
cxlflash_major = MAJOR(devno);
- cxlflash_class = class_create(THIS_MODULE, "cxlflash");
+ cxlflash_class = class_create("cxlflash");
if (IS_ERR(cxlflash_class)) {
rc = PTR_ERR(cxlflash_class);
pr_err("%s: class_create failed rc=%d\n", __func__, rc);
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 22cfc2e1dfb9..e1b55b03e812 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -358,7 +358,7 @@ retry:
dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
retry_cnt ? "re" : "", scsi_cmd[0]);
- /* Drop the ioctl read semahpore across lengthy call */
+ /* Drop the ioctl read semaphore across lengthy call */
up_read(&cfg->ioctl_rwsem);
result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, cmd_buf,
CMD_BUFSIZE, to, CMD_RETRIES, &exec_args);
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 9caabf550436..cbd5a648a131 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -448,7 +448,7 @@ static int write_same16(struct scsi_device *sdev,
put_unaligned_be32(ws_limit < left ? ws_limit : left,
&scsi_cmd[10]);
- /* Drop the ioctl read semahpore across lengthy call */
+ /* Drop the ioctl read semaphore across lengthy call */
up_read(&cfg->ioctl_rwsem);
result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_OUT,
cmd_buf, CMD_BUFSIZE, to,
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 670a836a6ba1..c8e86f8a631e 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4541,7 +4541,7 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
}
-static struct scsi_host_template dc395x_driver_template = {
+static const struct scsi_host_template dc395x_driver_template = {
.module = THIS_MODULE,
.proc_name = DC395X_NAME,
.show_info = dc395x_show_info,
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 362fa631f39b..a226dc1b65d7 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1145,10 +1145,12 @@ static int alua_activate(struct scsi_device *sdev,
rcu_read_unlock();
mutex_unlock(&h->init_mutex);
- if (alua_rtpg_queue(pg, sdev, qdata, true))
+ if (alua_rtpg_queue(pg, sdev, qdata, true)) {
fn = NULL;
- else
+ } else {
+ kfree(qdata);
err = SCSI_DH_DEV_OFFLINED;
+ }
kref_put(&pg->kref, release_port_group);
out:
if (fn)
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index a171ce6b70b2..dfb091d34363 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -39,7 +39,7 @@
#define DMX3191D_REGION_LEN 8
-static struct scsi_host_template dmx3191d_driver_template = {
+static const struct scsi_host_template dmx3191d_driver_template = {
.module = THIS_MODULE,
.proc_name = DMX3191D_DRIVER_NAME,
.name = "Domex DMX3191D",
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
index be4b5c1ee32d..a982b9cf9870 100644
--- a/drivers/scsi/elx/efct/efct_lio.c
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -285,11 +285,6 @@ efct_lio_npiv_check_prod_write_protect(struct se_portal_group *se_tpg)
return tpg->tpg_attrib.prod_mode_write_protect;
}
-static u32 efct_lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
static int efct_lio_check_stop_free(struct se_cmd *se_cmd)
{
struct efct_scsi_tgt_io *ocp =
@@ -355,15 +350,6 @@ static void efct_lio_close_session(struct se_session *se_sess)
efc_node_post_shutdown(node, NULL);
}
-static u32 efct_lio_sess_get_index(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void efct_lio_set_default_node_attrs(struct se_node_acl *nacl)
-{
-}
-
static int efct_lio_get_cmd_state(struct se_cmd *cmd)
{
struct efct_scsi_tgt_io *ocp =
@@ -1607,14 +1593,11 @@ static const struct target_core_fabric_ops efct_lio_ops = {
.tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
.tpg_check_demo_mode_write_protect = efct_lio_check_demo_write_protect,
.tpg_check_prod_mode_write_protect = efct_lio_check_prod_write_protect,
- .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
.check_stop_free = efct_lio_check_stop_free,
.aborted_task = efct_lio_aborted_task,
.release_cmd = efct_lio_release_cmd,
.close_session = efct_lio_close_session,
- .sess_get_index = efct_lio_sess_get_index,
.write_pending = efct_lio_write_pending,
- .set_default_node_attributes = efct_lio_set_default_node_attrs,
.get_cmd_state = efct_lio_get_cmd_state,
.queue_data_in = efct_lio_queue_data_in,
.queue_status = efct_lio_queue_status,
@@ -1644,14 +1627,11 @@ static const struct target_core_fabric_ops efct_lio_npiv_ops = {
efct_lio_npiv_check_demo_write_protect,
.tpg_check_prod_mode_write_protect =
efct_lio_npiv_check_prod_write_protect,
- .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
.check_stop_free = efct_lio_check_stop_free,
.aborted_task = efct_lio_aborted_task,
.release_cmd = efct_lio_release_cmd,
.close_session = efct_lio_close_session,
- .sess_get_index = efct_lio_sess_get_index,
.write_pending = efct_lio_write_pending,
- .set_default_node_attributes = efct_lio_set_default_node_attrs,
.get_cmd_state = efct_lio_get_cmd_state,
.queue_data_in = efct_lio_queue_data_in,
.queue_status = efct_lio_queue_status,
diff --git a/drivers/scsi/elx/efct/efct_xport.c b/drivers/scsi/elx/efct/efct_xport.c
index 9495cedcc0b9..cf4dced20b8b 100644
--- a/drivers/scsi/elx/efct/efct_xport.c
+++ b/drivers/scsi/elx/efct/efct_xport.c
@@ -10,7 +10,7 @@
static struct dentry *efct_debugfs_root;
static atomic_t efct_debugfs_count;
-static struct scsi_host_template efct_template = {
+static const struct scsi_host_template efct_template = {
.module = THIS_MODULE,
.name = EFCT_DRIVER_NAME,
.supported_mode = MODE_TARGET,
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index e003d923acbf..055d2e87a2c8 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -56,7 +56,7 @@ dma_addr_t esas2r_buffered_ioctl_addr;
u32 esas2r_buffered_ioctl_size;
struct pci_dev *esas2r_buffered_ioctl_pcid;
-static DEFINE_SEMAPHORE(buffered_ioctl_semaphore);
+static DEFINE_SEMAPHORE(buffered_ioctl_semaphore, 1);
typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *,
struct esas2r_request *,
struct esas2r_sg_context *,
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index d7a2c49ff5ee..f700a16cd885 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -231,7 +231,7 @@ struct bin_attribute bin_attr_default_nvram = {
.write = NULL
};
-static struct scsi_host_template driver_template = {
+static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.show_info = esas2r_show_info,
.name = ESAS2R_LONGNAME,
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 64ec6bb84550..97816a0e6240 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2660,7 +2660,7 @@ static const char *esp_info(struct Scsi_Host *host)
return "esp";
}
-struct scsi_host_template scsi_esp_template = {
+const struct scsi_host_template scsi_esp_template = {
.module = THIS_MODULE,
.name = "esp",
.info = esp_info,
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index c73760d3cf83..00cd7c0ccc76 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -572,7 +572,7 @@ struct esp {
* 13) Check scsi_esp_register() return value, release all resources
* if an error was returned.
*/
-extern struct scsi_host_template scsi_esp_template;
+extern const struct scsi_host_template scsi_esp_template;
extern int scsi_esp_register(struct esp *);
extern void scsi_esp_unregister(struct esp *);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 38774a272e62..f1429f270170 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -260,7 +260,7 @@ static struct fc_function_template fcoe_vport_fc_functions = {
.bsg_request = fc_lport_bsg_request,
};
-static struct scsi_host_template fcoe_shost_template = {
+static const struct scsi_host_template fcoe_shost_template = {
.module = THIS_MODULE,
.name = "FCoE Driver",
.proc_name = FCOE_NAME,
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 6260aa5ea6af..e17957f8085c 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -659,17 +659,17 @@ static const struct device_type fcoe_fcf_device_type = {
.release = fcoe_fcf_device_release,
};
-static ssize_t ctlr_create_store(struct bus_type *bus, const char *buf,
+static ssize_t ctlr_create_store(const struct bus_type *bus, const char *buf,
size_t count)
{
- return fcoe_ctlr_create_store(bus, buf, count);
+ return fcoe_ctlr_create_store(buf, count);
}
static BUS_ATTR_WO(ctlr_create);
-static ssize_t ctlr_destroy_store(struct bus_type *bus, const char *buf,
+static ssize_t ctlr_destroy_store(const struct bus_type *bus, const char *buf,
size_t count)
{
- return fcoe_ctlr_destroy_store(bus, buf, count);
+ return fcoe_ctlr_destroy_store(buf, count);
}
static BUS_ATTR_WO(ctlr_destroy);
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 62341c6353a7..46b0bf237be1 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -745,8 +745,7 @@ static int libfcoe_device_notification(struct notifier_block *notifier,
return NOTIFY_OK;
}
-ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
- const char *buf, size_t count)
+ssize_t fcoe_ctlr_create_store(const char *buf, size_t count)
{
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
@@ -808,8 +807,7 @@ out_nodev:
return count;
}
-ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
- const char *buf, size_t count)
+ssize_t fcoe_ctlr_destroy_store(const char *buf, size_t count)
{
int rc = -ENODEV;
struct net_device *netdev = NULL;
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 444eac9b2466..504c4e0c5d17 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -496,7 +496,7 @@ static int fdomain_biosparam(struct scsi_device *sdev,
return 0;
}
-static struct scsi_host_template fdomain_template = {
+static const struct scsi_host_template fdomain_template = {
.module = THIS_MODULE,
.name = "Future Domain TMC-16x0",
.proc_name = "fdomain",
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 1077110ab273..984bc5fc55e2 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -95,7 +95,7 @@ static int fnic_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static struct scsi_host_template fnic_host_template = {
+static const struct scsi_host_template fnic_host_template = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = fnic_queuecommand,
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index e03967463561..f3c3a26a1384 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -781,28 +781,21 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len,
u8 rdata_flag)
{
- struct tm tm;
int j, i = 1, len;
- char *fc_trace, *fmt;
int ethhdr_len = sizeof(struct ethhdr) - 1;
int fcoehdr_len = sizeof(struct fcoe_hdr);
int fchdr_len = sizeof(struct fc_frame_header);
int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3;
+ char *fc_trace;
tdata->frame_type = tdata->frame_type & 0x7F;
len = *orig_len;
- time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
-
- fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
- len += scnprintf(fnic_dbgfs_prt->buffer + len,
- max_size - len,
- fmt,
- tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
- tm.tm_hour, tm.tm_min, tm.tm_sec,
- tdata->time_stamp.tv_nsec, tdata->host_no,
- tdata->frame_type, tdata->frame_len);
+ len += scnprintf(fnic_dbgfs_prt->buffer + len, max_size - len,
+ "%ptTs.%09lu ns%8x %c%8x\t",
+ &tdata->time_stamp.tv_sec, tdata->time_stamp.tv_nsec,
+ tdata->host_no, tdata->frame_type, tdata->frame_len);
fc_trace = (char *)FC_TRACE_ADDRESS(tdata);
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 0c768e7d06b9..f6305e3e60f4 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -219,7 +219,7 @@ static int hp_c2502_irqs[] = {
9, 5, 7, 3, 4, -1
};
-static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
+static int generic_NCR5380_init_one(const struct scsi_host_template *tpnt,
struct device *pdev, int base, int irq, int board)
{
bool is_pmio = base <= 0xffff;
@@ -689,7 +689,7 @@ static int generic_NCR5380_dma_residual(struct NCR5380_hostdata *hostdata)
#include "NCR5380.c"
-static struct scsi_host_template driver_template = {
+static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.proc_name = DRV_MODULE_NAME,
.name = "Generic NCR5380/NCR53C400 SCSI",
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 7d56a236a011..d2eddad099a2 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -222,7 +222,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
}
}
-static struct scsi_host_template gvp11_scsi_template = {
+static const struct scsi_host_template gvp11_scsi_template = {
.module = THIS_MODULE,
.name = "GVP Series II SCSI",
.show_info = wd33c93_show_info,
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 6f8a52a1b808..fb7c52c119df 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -207,6 +207,7 @@ struct hisi_sas_cq {
int rd_point;
int id;
int irq_no;
+ spinlock_t poll_lock;
};
struct hisi_sas_dq {
@@ -344,7 +345,7 @@ struct hisi_sas_hw {
int delay_ms, int timeout_ms);
void (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba);
int complete_hdr_size;
- struct scsi_host_template *sht;
+ const struct scsi_host_template *sht;
};
#define HISI_SAS_MAX_DEBUGFS_DUMP (50)
@@ -484,6 +485,8 @@ struct hisi_hba {
struct dentry *debugfs_dump_dentry;
struct dentry *debugfs_bist_dentry;
struct dentry *debugfs_fifo_dentry;
+
+ int iopoll_q_cnt;
};
/* Generic HW DMA host memory structures */
@@ -653,16 +656,18 @@ extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
extern void hisi_sas_phy_bcast(struct hisi_sas_phy *phy);
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct sas_task *task,
- struct hisi_sas_slot *slot);
+ struct hisi_sas_slot *slot,
+ bool need_lock);
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
extern void hisi_sas_rst_work_handler(struct work_struct *work);
extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
-extern void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba);
extern void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no);
extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
enum hisi_sas_phy_event event);
extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
+extern void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba);
+extern void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba);
extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba);
extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 8c038ccf1c09..412431c901a7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -205,7 +205,7 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
}
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
- struct hisi_sas_slot *slot)
+ struct hisi_sas_slot *slot, bool need_lock)
{
int device_id = slot->device_id;
struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
@@ -239,9 +239,13 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
}
}
- spin_lock(&sas_dev->lock);
- list_del_init(&slot->entry);
- spin_unlock(&sas_dev->lock);
+ if (need_lock) {
+ spin_lock(&sas_dev->lock);
+ list_del_init(&slot->entry);
+ spin_unlock(&sas_dev->lock);
+ } else {
+ list_del_init(&slot->entry);
+ }
memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
@@ -529,10 +533,21 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
dq = &hisi_hba->dq[dq_index];
} else {
- struct Scsi_Host *shost = hisi_hba->shost;
- struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- int queue = qmap->mq_map[raw_smp_processor_id()];
+ int queue;
+
+ if (hisi_hba->iopoll_q_cnt) {
+ /*
+ * Use interrupt queue (queue 0) to deliver and complete
+ * internal IOs of libsas or libata when there is at least
+ * one iopoll queue
+ */
+ queue = 0;
+ } else {
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ queue = qmap->mq_map[raw_smp_processor_id()];
+ }
dq = &hisi_hba->dq[queue];
}
break;
@@ -672,6 +687,55 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
return sas_dev;
}
+static void hisi_sas_sync_poll_cq(struct hisi_sas_cq *cq)
+{
+ /* make sure CQ entries being processed are processed to completion */
+ spin_lock(&cq->poll_lock);
+ spin_unlock(&cq->poll_lock);
+}
+
+static bool hisi_sas_queue_is_poll(struct hisi_sas_cq *cq)
+{
+ struct hisi_hba *hisi_hba = cq->hisi_hba;
+
+ if (cq->id < hisi_hba->queue_count - hisi_hba->iopoll_q_cnt)
+ return false;
+ return true;
+}
+
+static void hisi_sas_sync_cq(struct hisi_sas_cq *cq)
+{
+ if (hisi_sas_queue_is_poll(cq))
+ hisi_sas_sync_poll_cq(cq);
+ else
+ synchronize_irq(cq->irq_no);
+}
+
+void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+
+ if (hisi_sas_queue_is_poll(cq))
+ hisi_sas_sync_poll_cq(cq);
+ }
+}
+EXPORT_SYMBOL_GPL(hisi_sas_sync_poll_cqs);
+
+void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+
+ hisi_sas_sync_cq(cq);
+ }
+}
+EXPORT_SYMBOL_GPL(hisi_sas_sync_cqs);
+
static void hisi_sas_tmf_aborted(struct sas_task *task)
{
struct hisi_sas_slot *slot = task->lldd_task;
@@ -683,10 +747,10 @@ static void hisi_sas_tmf_aborted(struct sas_task *task)
struct hisi_sas_cq *cq =
&hisi_hba->cq[slot->dlvry_queue];
/*
- * sync irq to avoid free'ing task
+ * sync irq or poll queue to avoid free'ing task
* before using task in IO completion
*/
- synchronize_irq(cq->irq_no);
+ hisi_sas_sync_cq(cq);
slot->task = NULL;
}
}
@@ -1021,7 +1085,7 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
}
static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
- struct hisi_sas_slot *slot)
+ struct hisi_sas_slot *slot, bool need_lock)
{
if (task) {
unsigned long flags;
@@ -1038,7 +1102,7 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task
spin_unlock_irqrestore(&task->task_state_lock, flags);
}
- hisi_sas_slot_task_free(hisi_hba, task, slot);
+ hisi_sas_slot_task_free(hisi_hba, task, slot, need_lock);
}
static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
@@ -1047,8 +1111,11 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, *slot2;
struct hisi_sas_device *sas_dev = device->lldd_dev;
+ spin_lock(&sas_dev->lock);
list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
- hisi_sas_do_release_task(hisi_hba, slot->task, slot);
+ hisi_sas_do_release_task(hisi_hba, slot->task, slot, false);
+
+ spin_unlock(&sas_dev->lock);
}
void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
@@ -1453,13 +1520,41 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
}
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
+static void hisi_sas_async_init_wait_phyup(void *data, async_cookie_t cookie)
+{
+ struct hisi_sas_phy *phy = data;
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct device *dev = hisi_hba->dev;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ int phy_no = phy->sas_phy.id;
+
+ phy->reset_completion = &completion;
+ hisi_sas_phy_enable(hisi_hba, phy_no, 1);
+ if (!wait_for_completion_timeout(&completion,
+ HISI_SAS_WAIT_PHYUP_TIMEOUT))
+ dev_warn(dev, "phy%d wait phyup timed out\n", phy_no);
+
+ phy->reset_completion = NULL;
+}
+
void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
{
struct Scsi_Host *shost = hisi_hba->shost;
+ ASYNC_DOMAIN_EXCLUSIVE(async);
+ int phy_no;
/* Init and wait for PHYs to come up and all libsas event finished. */
- hisi_hba->hw->phys_init(hisi_hba);
- msleep(1000);
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+
+ if (!(hisi_hba->phy_state & BIT(phy_no)))
+ continue;
+
+ async_schedule_domain(hisi_sas_async_init_wait_phyup,
+ phy, &async);
+ }
+
+ async_synchronize_full_domain(&async);
hisi_sas_refresh_port_id(hisi_hba);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
@@ -1540,11 +1635,11 @@ static int hisi_sas_abort_task(struct sas_task *task)
if (slot) {
/*
- * sync irq to avoid free'ing task
+ * sync irq or poll queue to avoid free'ing task
* before using task in IO completion
*/
cq = &hisi_hba->cq[slot->dlvry_queue];
- synchronize_irq(cq->irq_no);
+ hisi_sas_sync_cq(cq);
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
rc = TMF_RESP_FUNC_COMPLETE;
@@ -1574,7 +1669,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
*/
if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
if (task->lldd_task)
- hisi_sas_do_release_task(hisi_hba, task, slot);
+ hisi_sas_do_release_task(hisi_hba, task, slot, true);
}
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
@@ -1594,7 +1689,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
*/
if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) &&
qc && qc->scsicmd) {
- hisi_sas_do_release_task(hisi_hba, task, slot);
+ hisi_sas_do_release_task(hisi_hba, task, slot, true);
rc = TMF_RESP_FUNC_COMPLETE;
} else {
rc = hisi_sas_softreset_ata_disk(device);
@@ -1611,10 +1706,10 @@ static int hisi_sas_abort_task(struct sas_task *task)
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
task->lldd_task) {
/*
- * sync irq to avoid free'ing task
+ * sync irq or poll queue to avoid free'ing task
* before using task in IO completion
*/
- synchronize_irq(cq->irq_no);
+ hisi_sas_sync_cq(cq);
slot->task = NULL;
}
}
@@ -1885,10 +1980,10 @@ static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
struct hisi_sas_cq *cq =
&hisi_hba->cq[slot->dlvry_queue];
/*
- * sync irq to avoid free'ing task
+ * sync irq or poll queue to avoid free'ing task
* before using task in IO completion
*/
- synchronize_irq(cq->irq_no);
+ hisi_sas_sync_cq(cq);
slot->task = NULL;
}
@@ -1992,18 +2087,6 @@ void hisi_sas_phy_bcast(struct hisi_sas_phy *phy)
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast);
-void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
-{
- int i;
-
- for (i = 0; i < hisi_hba->cq_nvecs; i++) {
- struct hisi_sas_cq *cq = &hisi_hba->cq[i];
-
- synchronize_irq(cq->irq_no);
- }
-}
-EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs);
-
int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
@@ -2101,6 +2184,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
/* Completion queue structure */
cq->id = i;
cq->hisi_hba = hisi_hba;
+ spin_lock_init(&cq->poll_lock);
/* Delivery queue structure */
spin_lock_init(&dq->lock);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index d643c5a49aa9..0aa8c9c88535 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1258,7 +1258,11 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
slot_err_v1_hw(hisi_hba, task, slot);
if (unlikely(slot->abort)) {
- sas_task_abort(task);
+ if (dev_is_sata(device) && task->ata_task.use_ncq)
+ sas_ata_device_link_abort(device, true);
+ else
+ sas_task_abort(task);
+
return;
}
goto out;
@@ -1306,7 +1310,7 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
}
out:
- hisi_sas_slot_task_free(hisi_hba, task, slot);
+ hisi_sas_slot_task_free(hisi_hba, task, slot, true);
if (task->task_done)
task->task_done(task);
@@ -1735,7 +1739,7 @@ static struct attribute *host_v1_hw_attrs[] = {
ATTRIBUTE_GROUPS(host_v1_hw);
-static struct scsi_host_template sht_v1_hw = {
+static const struct scsi_host_template sht_v1_hw = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
.module = THIS_MODULE,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index cded42f4ca44..cd78e4c983aa 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2404,7 +2404,11 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
error_info[2], error_info[3]);
if (unlikely(slot->abort)) {
- sas_task_abort(task);
+ if (dev_is_sata(device) && task->ata_task.use_ncq)
+ sas_ata_device_link_abort(device, true);
+ else
+ sas_task_abort(task);
+
return;
}
goto out;
@@ -2462,7 +2466,7 @@ out:
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- hisi_sas_slot_task_free(hisi_hba, task, slot);
+ hisi_sas_slot_task_free(hisi_hba, task, slot, true);
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
spin_lock_irqsave(&device->done_lock, flags);
@@ -3551,7 +3555,7 @@ static void map_queues_v2_hw(struct Scsi_Host *shost)
}
}
-static struct scsi_host_template sht_v2_hw = {
+static const struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
.module = THIS_MODULE,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 0c3fcb807806..20e1607c6282 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -30,6 +30,7 @@
#define SATA_INITI_D2H_STORE_ADDR_LO 0x60
#define SATA_INITI_D2H_STORE_ADDR_HI 0x64
#define CFG_MAX_TAG 0x68
+#define TRANS_LOCK_ICT_TIME 0X70
#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
#define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
#define HGC_GET_ITV_TIME 0x90
@@ -552,6 +553,11 @@ static int prot_mask;
module_param(prot_mask, int, 0444);
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
+/* the index of iopoll queues are bigger than interrupt queues' */
+static int experimental_iopoll_q_cnt;
+module_param(experimental_iopoll_q_cnt, int, 0444);
+MODULE_PARM_DESC(experimental_iopoll_q_cnt, "number of queues to be used as poll mode, def=0");
+
static void debugfs_work_handler_v3_hw(struct work_struct *work);
static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba);
@@ -599,15 +605,38 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\
})
+static void interrupt_enable_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0);
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555);
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
+ }
+}
+
static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
{
+ struct pci_dev *pdev = hisi_hba->pci_dev;
int i, j;
/* Global registers init */
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
(u32)((1ULL << hisi_hba->queue_count) - 1));
- hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0);
hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
+ /* time / CLK_AHB = 2.5s / 2ns = 0x4A817C80 */
+ hisi_sas_write32(hisi_hba, TRANS_LOCK_ICT_TIME, 0x4A817C80);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
@@ -619,20 +648,17 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff);
hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
- hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555);
hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
- for (i = 0; i < hisi_hba->queue_count; i++)
- hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0);
-
hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
+ if (pdev->revision < 0x30)
+ hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0);
+
+ interrupt_enable_v3_hw(hisi_hba);
for (i = 0; i < hisi_hba->n_phy; i++) {
enum sas_linkrate max;
struct hisi_sas_phy *phy = &hisi_hba->phy[i];
@@ -649,29 +675,28 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
- hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
- hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
- hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
- hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
- hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME,
0x30f4240);
- /* used for 12G negotiate */
- hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
+ /* set value through firmware for 920B and later version */
+ if (pdev->revision < 0x30) {
+ hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
+ hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
+ /* used for 12G negotiate */
+ hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
+ }
+
/* get default FFE configuration for BIST */
for (j = 0; j < FFE_CFG_MAX; j++) {
u32 val = hisi_sas_phy_read32(hisi_hba, i,
@@ -883,6 +908,7 @@ static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba,
CFG_ABT_SET_QUERY_IPTT);
+ spin_lock(&sas_dev->lock);
list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) {
cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK;
cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) |
@@ -890,6 +916,7 @@ static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT,
cfg_abt_set_query_iptt);
}
+ spin_unlock(&sas_dev->lock);
cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF);
hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT,
cfg_abt_set_query_iptt);
@@ -2189,6 +2216,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type);
u32 dw3 = le32_to_cpu(complete_hdr->dw3);
+ u32 dw0 = le32_to_cpu(complete_hdr->dw0);
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
@@ -2198,8 +2226,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
* but I/O information has been written to the host memory, we examine
* response IU.
*/
- if (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) &&
- (complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))
+ if (!(dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) &&
+ (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))
return false;
ts->residual = trans_tx_fail_type;
@@ -2215,7 +2243,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- if ((complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
+ if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
(sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
ts->stat = SAS_PROTO_RESPONSE;
} else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
@@ -2320,7 +2348,11 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
error_info[0], error_info[1],
error_info[2], error_info[3]);
if (unlikely(slot->abort)) {
- sas_task_abort(task);
+ if (dev_is_sata(device) && task->ata_task.use_ncq)
+ sas_ata_device_link_abort(device, true);
+ else
+ sas_task_abort(task);
+
return;
}
goto out;
@@ -2374,7 +2406,7 @@ out:
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- hisi_sas_slot_task_free(hisi_hba, task, slot);
+ hisi_sas_slot_task_free(hisi_hba, task, slot, true);
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
spin_lock_irqsave(&device->done_lock, flags);
@@ -2391,23 +2423,25 @@ out:
task->task_done(task);
}
-static irqreturn_t cq_thread_v3_hw(int irq_no, void *p)
+static int complete_v3_hw(struct hisi_sas_cq *cq)
{
- struct hisi_sas_cq *cq = p;
- struct hisi_hba *hisi_hba = cq->hisi_hba;
- struct hisi_sas_slot *slot;
struct hisi_sas_complete_v3_hdr *complete_queue;
- u32 rd_point = cq->rd_point, wr_point;
+ struct hisi_hba *hisi_hba = cq->hisi_hba;
+ u32 rd_point, wr_point;
int queue = cq->id;
+ int completed;
+ rd_point = cq->rd_point;
complete_queue = hisi_hba->complete_hdr[queue];
wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
(0x14 * queue));
+ completed = (wr_point + HISI_SAS_QUEUE_SLOTS - rd_point) % HISI_SAS_QUEUE_SLOTS;
while (rd_point != wr_point) {
struct hisi_sas_complete_v3_hdr *complete_hdr;
struct device *dev = hisi_hba->dev;
+ struct hisi_sas_slot *slot;
u32 dw0, dw1, dw3;
int iptt;
@@ -2451,6 +2485,28 @@ static irqreturn_t cq_thread_v3_hw(int irq_no, void *p)
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+ return completed;
+}
+
+static int queue_complete_v3_hw(struct Scsi_Host *shost, unsigned int queue)
+{
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ struct hisi_sas_cq *cq = &hisi_hba->cq[queue];
+ int completed;
+
+ spin_lock(&cq->poll_lock);
+ completed = complete_v3_hw(cq);
+ spin_unlock(&cq->poll_lock);
+
+ return completed;
+}
+
+static irqreturn_t cq_thread_v3_hw(int irq_no, void *p)
+{
+ struct hisi_sas_cq *cq = p;
+
+ complete_v3_hw(cq);
+
return IRQ_HANDLED;
}
@@ -2474,8 +2530,9 @@ static void hisi_sas_v3_free_vectors(void *data)
static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
{
- int vectors;
- int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
+ /* Allocate all MSI vectors to avoid re-insertion issue */
+ int max_msi = HISI_SAS_MSI_COUNT_V3_HW;
+ int vectors, min_msi;
struct Scsi_Host *shost = hisi_hba->shost;
struct pci_dev *pdev = hisi_hba->pci_dev;
struct irq_affinity desc = {
@@ -2492,11 +2549,10 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
return -ENOENT;
- hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
- shost->nr_hw_queues = hisi_hba->cq_nvecs;
+ hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW - hisi_hba->iopoll_q_cnt;
+ shost->nr_hw_queues = hisi_hba->cq_nvecs + hisi_hba->iopoll_q_cnt;
- devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
- return 0;
+ return devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
}
static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
@@ -2626,7 +2682,7 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
u32 status, reg_val;
int rc;
- interrupt_disable_v3_hw(hisi_hba);
+ hisi_sas_sync_poll_cqs(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
hisi_sas_stop_phys(hisi_hba);
@@ -2656,6 +2712,7 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
struct device *dev = hisi_hba->dev;
int rc;
+ interrupt_disable_v3_hw(hisi_hba);
rc = disable_host_v3_hw(hisi_hba);
if (rc) {
dev_err(dev, "soft reset: disable host failed rc=%d\n", rc);
@@ -2824,6 +2881,18 @@ static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
}
static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
+static ssize_t iopoll_q_cnt_v3_hw_show(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ hisi_hba->iopoll_q_cnt);
+}
+static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw);
+
static int slave_configure_v3_hw(struct scsi_device *sdev)
{
struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
@@ -2853,6 +2922,7 @@ static struct attribute *host_v3_hw_attrs[] = {
&dev_attr_intr_conv_v3_hw.attr,
&dev_attr_intr_coal_ticks_v3_hw.attr,
&dev_attr_intr_coal_count_v3_hw.attr,
+ &dev_attr_iopoll_q_cnt_v3_hw.attr,
NULL
};
@@ -2940,6 +3010,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO),
HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI),
HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG),
+ HISI_SAS_DEBUGFS_REG(TRANS_LOCK_ICT_TIME),
HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL),
HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL),
HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME),
@@ -3039,7 +3110,7 @@ static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
- hisi_sas_sync_irqs(hisi_hba);
+ hisi_sas_sync_cqs(hisi_hba);
}
static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
@@ -3211,12 +3282,34 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
static void hisi_sas_map_queues(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
- struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ struct blk_mq_queue_map *qmap;
+ int i, qoff;
+
+ for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
+ qmap = &shost->tag_set.map[i];
+ if (i == HCTX_TYPE_DEFAULT) {
+ qmap->nr_queues = hisi_hba->cq_nvecs;
+ } else if (i == HCTX_TYPE_POLL) {
+ qmap->nr_queues = hisi_hba->iopoll_q_cnt;
+ } else {
+ qmap->nr_queues = 0;
+ continue;
+ }
- blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, BASE_VECTORS_V3_HW);
+ /* At least one interrupt hardware queue */
+ if (!qmap->nr_queues)
+ WARN_ON(i == HCTX_TYPE_DEFAULT);
+ qmap->queue_offset = qoff;
+ if (i == HCTX_TYPE_POLL)
+ blk_mq_map_queues(qmap);
+ else
+ blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
+ BASE_VECTORS_V3_HW);
+ qoff += qmap->nr_queues;
+ }
}
-static struct scsi_host_template sht_v3_hw = {
+static const struct scsi_host_template sht_v3_hw = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
.module = THIS_MODULE,
@@ -3245,6 +3338,7 @@ static struct scsi_host_template sht_v3_hw = {
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.host_reset = hisi_sas_host_reset,
.host_tagset = 1,
+ .mq_poll = queue_complete_v3_hw,
};
static const struct hisi_sas_hw hisi_sas_v3_hw = {
@@ -3304,6 +3398,13 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
if (hisi_sas_get_fw_info(hisi_hba) < 0)
goto err_out;
+ if (experimental_iopoll_q_cnt < 0 ||
+ experimental_iopoll_q_cnt >= hisi_hba->queue_count)
+ dev_err(dev, "iopoll queue count %d cannot exceed or equal 16, using default 0\n",
+ experimental_iopoll_q_cnt);
+ else
+ hisi_hba->iopoll_q_cnt = experimental_iopoll_q_cnt;
+
if (hisi_sas_alloc(hisi_hba)) {
hisi_sas_free(hisi_hba);
goto err_out;
@@ -4859,6 +4960,10 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_cmd_len = 16;
shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
+ if (hisi_hba->iopoll_q_cnt)
+ shost->nr_maps = 3;
+ else
+ shost->nr_maps = 1;
sha->sas_ha_name = DRV_NAME;
sha->dev = dev;
@@ -4977,6 +5082,7 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_prepare(hisi_hba);
+ interrupt_disable_v3_hw(hisi_hba);
rc = disable_host_v3_hw(hisi_hba);
if (rc)
dev_err(dev, "FLR: disable host failed rc=%d\n", rc);
@@ -5006,6 +5112,21 @@ enum {
hip08,
};
+static void enable_host_v3_hw(struct hisi_hba *hisi_hba)
+{
+ u32 reg_val;
+
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
+ (u32)((1ULL << hisi_hba->queue_count) - 1));
+
+ phys_init_v3_hw(hisi_hba);
+ reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL);
+ reg_val &= ~AM_CTRL_SHUTDOWN_REQ_MSK;
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL, reg_val);
+}
+
static int _suspend_v3_hw(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
@@ -5028,14 +5149,20 @@ static int _suspend_v3_hw(struct device *device)
scsi_block_requests(shost);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
flush_workqueue(hisi_hba->wq);
+ interrupt_disable_v3_hw(hisi_hba);
+
+#ifdef CONFIG_PM
+ if (atomic_read(&device->power.usage_count)) {
+ dev_err(dev, "PM suspend: host status cannot be suspended\n");
+ rc = -EBUSY;
+ goto err_out;
+ }
+#endif
rc = disable_host_v3_hw(hisi_hba);
if (rc) {
dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc);
- clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
- clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
- scsi_unblock_requests(shost);
- return rc;
+ goto err_out_recover_host;
}
hisi_sas_init_mem(hisi_hba);
@@ -5046,6 +5173,17 @@ static int _suspend_v3_hw(struct device *device)
dev_warn(dev, "end of suspending controller\n");
return 0;
+
+err_out_recover_host:
+ enable_host_v3_hw(hisi_hba);
+#ifdef CONFIG_PM
+err_out:
+#endif
+ interrupt_enable_v3_hw(hisi_hba);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
+ return rc;
}
static int _resume_v3_hw(struct device *device)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f7f62e56afca..f0bc8bbb3938 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -219,7 +219,7 @@ EXPORT_SYMBOL(scsi_remove_host);
int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
struct device *dma_dev)
{
- struct scsi_host_template *sht = shost->hostt;
+ const struct scsi_host_template *sht = shost->hostt;
int error = -EINVAL;
shost_printk(KERN_INFO, shost, "%s\n",
@@ -341,9 +341,6 @@ static void scsi_host_dev_release(struct device *dev)
struct Scsi_Host *shost = dev_to_shost(dev);
struct device *parent = dev->parent;
- /* In case scsi_remove_host() has not been called. */
- scsi_proc_hostdir_rm(shost->hostt);
-
/* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
rcu_barrier();
@@ -392,7 +389,7 @@ static struct device_type scsi_host_type = {
* Return value:
* Pointer to a new Scsi_Host
**/
-struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
+struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
int index;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index f6da34850af9..af18d20f3079 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -967,7 +967,7 @@ ATTRIBUTE_GROUPS(hpsa_shost);
#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
HPSA_MAX_CONCURRENT_PASSTHRUS)
-static struct scsi_host_template hpsa_driver_template = {
+static const struct scsi_host_template hpsa_driver_template = {
.module = THIS_MODULE,
.name = HPSA,
.proc_name = HPSA,
@@ -9108,7 +9108,6 @@ static void hpsa_remove_one(struct pci_dev *pdev)
free_percpu(h->lockup_detected); /* init_one 2 */
h->lockup_detected = NULL; /* init_one 2 */
- /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
hpda_free_ctlr_info(h); /* init_one 1 */
}
@@ -9476,8 +9475,6 @@ static void hpsa_free_performant_mode(struct ctlr_info *h)
static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
- unsigned long transMethod = CFGTBL_Trans_Performant |
- CFGTBL_Trans_use_short_tags;
int i, rc;
if (hpsa_simple_mode)
@@ -9489,14 +9486,10 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
/* Check for I/O accelerator mode support */
if (trans_support & CFGTBL_Trans_io_accel1) {
- transMethod |= CFGTBL_Trans_io_accel1 |
- CFGTBL_Trans_enable_directed_msix;
rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
if (rc)
return rc;
} else if (trans_support & CFGTBL_Trans_io_accel2) {
- transMethod |= CFGTBL_Trans_io_accel2 |
- CFGTBL_Trans_enable_directed_msix;
rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
if (rc)
return rc;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 7e8903718245..06ccb51bf6a9 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1159,7 +1159,7 @@ static int hptiop_slave_config(struct scsi_device *sdev)
return 0;
}
-static struct scsi_host_template driver_template = {
+static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = driver_name,
.queuecommand = hptiop_queuecommand,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 1a0c0b7289d2..ce9eb00e2ca0 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3611,7 +3611,7 @@ static struct attribute *ibmvfc_host_attrs[] = {
ATTRIBUTE_GROUPS(ibmvfc_host);
-static struct scsi_host_template driver_template = {
+static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "IBM POWER Virtual FC Adapter",
.proc_name = IBMVFC_NAME,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index e8770310a64b..385f812b8793 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3698,16 +3698,6 @@ static int ibmvscsis_check_true(struct se_portal_group *se_tpg)
return 1;
}
-static int ibmvscsis_check_false(struct se_portal_group *se_tpg)
-{
- return 0;
-}
-
-static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd)
{
return target_put_sess_cmd(se_cmd);
@@ -3726,11 +3716,6 @@ static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
spin_unlock_bh(&vscsi->intr_lock);
}
-static u32 ibmvscsis_sess_get_index(struct se_session *se_sess)
-{
- return 0;
-}
-
static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
{
struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
@@ -3765,15 +3750,6 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
-{
-}
-
-static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
{
struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
@@ -3982,15 +3958,9 @@ static const struct target_core_fabric_ops ibmvscsis_ops = {
.tpg_get_default_depth = ibmvscsis_get_default_depth,
.tpg_check_demo_mode = ibmvscsis_check_true,
.tpg_check_demo_mode_cache = ibmvscsis_check_true,
- .tpg_check_demo_mode_write_protect = ibmvscsis_check_false,
- .tpg_check_prod_mode_write_protect = ibmvscsis_check_false,
- .tpg_get_inst_index = ibmvscsis_tpg_get_inst_index,
.check_stop_free = ibmvscsis_check_stop_free,
.release_cmd = ibmvscsis_release_cmd,
- .sess_get_index = ibmvscsis_sess_get_index,
.write_pending = ibmvscsis_write_pending,
- .set_default_node_attributes = ibmvscsis_set_default_node_attrs,
- .get_cmd_state = ibmvscsis_get_cmd_state,
.queue_data_in = ibmvscsis_queue_data_in,
.queue_status = ibmvscsis_queue_status,
.queue_tm_rsp = ibmvscsis_queue_tm_rsp,
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 7a499d621c25..07db98161a03 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1096,7 +1096,7 @@ static int imm_adjust_queue(struct scsi_device *device)
return 0;
}
-static struct scsi_host_template imm_template = {
+static const struct scsi_host_template imm_template = {
.module = THIS_MODULE,
.proc_name = "imm",
.show_info = imm_show_info,
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 375261d67619..2a50fda3a628 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2788,7 +2788,7 @@ static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
initio_release_scb(host, cblk); /* Release SCB for current channel */
}
-static struct scsi_host_template initio_template = {
+static const struct scsi_host_template initio_template = {
.proc_name = "INI9100U",
.name = "Initio INI-9X00U/UW SCSI device driver",
.queuecommand = i91u_queuecommand,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index c74053f0b72f..4e13797b2a4a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -58,7 +58,6 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/libata.h>
#include <linux/hdreg.h>
#include <linux/reboot.h>
#include <linux/stringify.h>
@@ -595,10 +594,6 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
trace_entry->time = jiffies;
trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
trace_entry->type = type;
- if (ipr_cmd->ioa_cfg->sis64)
- trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
- else
- trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
trace_entry->u.add_data = add_data;
@@ -636,7 +631,6 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
- struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
dma_addr_t dma_addr = ipr_cmd->dma_addr;
int hrrq_id;
@@ -651,18 +645,15 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
if (ipr_cmd->ioa_cfg->sis64) {
ioarcb->u.sis64_addr_data.data_ioadl_addr =
cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
- ioasa64->u.gata.status = 0;
} else {
ioarcb->write_ioadl_addr =
cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
- ioasa->u.gata.status = 0;
}
ioasa->hdr.ioasc = 0;
ioasa->hdr.residual_data_len = 0;
ipr_cmd->scsi_cmd = NULL;
- ipr_cmd->qc = NULL;
ipr_cmd->sense_buffer[0] = 0;
ipr_cmd->dma_use_sg = 0;
}
@@ -806,48 +797,6 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
return 0;
}
-/**
- * __ipr_sata_eh_done - done function for aborted SATA commands
- * @ipr_cmd: ipr command struct
- *
- * This function is invoked for ops generated to SATA
- * devices which are being aborted.
- *
- * Return value:
- * none
- **/
-static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
-{
- struct ata_queued_cmd *qc = ipr_cmd->qc;
- struct ipr_sata_port *sata_port = qc->ap->private_data;
-
- qc->err_mask |= AC_ERR_OTHER;
- sata_port->ioasa.status |= ATA_BUSY;
- ata_qc_complete(qc);
- if (ipr_cmd->eh_comp)
- complete(ipr_cmd->eh_comp);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
-}
-
-/**
- * ipr_sata_eh_done - done function for aborted SATA commands
- * @ipr_cmd: ipr command struct
- *
- * This function is invoked for ops generated to SATA
- * devices which are being aborted.
- *
- * Return value:
- * none
- **/
-static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
-{
- struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
- unsigned long hrrq_flags;
-
- spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
- __ipr_sata_eh_done(ipr_cmd);
- spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
-}
/**
* __ipr_scsi_eh_done - mid-layer done function for aborted ops
@@ -920,8 +869,6 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
if (ipr_cmd->scsi_cmd)
ipr_cmd->done = __ipr_scsi_eh_done;
- else if (ipr_cmd->qc)
- ipr_cmd->done = __ipr_sata_eh_done;
ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
IPR_IOASC_IOA_WAS_RESET);
@@ -1143,31 +1090,6 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
}
/**
- * ipr_update_ata_class - Update the ata class in the resource entry
- * @res: resource entry struct
- * @proto: cfgte device bus protocol value
- *
- * Return value:
- * none
- **/
-static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
-{
- switch (proto) {
- case IPR_PROTO_SATA:
- case IPR_PROTO_SAS_STP:
- res->ata_class = ATA_DEV_ATA;
- break;
- case IPR_PROTO_SATA_ATAPI:
- case IPR_PROTO_SAS_STP_ATAPI:
- res->ata_class = ATA_DEV_ATAPI;
- break;
- default:
- res->ata_class = ATA_DEV_UNKNOWN;
- break;
- }
-}
-
-/**
* ipr_init_res_entry - Initialize a resource entry struct.
* @res: resource entry struct
* @cfgtew: config table entry wrapper struct
@@ -1179,7 +1101,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
struct ipr_config_table_entry_wrapper *cfgtew)
{
int found = 0;
- unsigned int proto;
struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
struct ipr_resource_entry *gscsi_res = NULL;
@@ -1190,10 +1111,8 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
res->resetting_device = 0;
res->reset_occurred = 0;
res->sdev = NULL;
- res->sata_port = NULL;
if (ioa_cfg->sis64) {
- proto = cfgtew->u.cfgte64->proto;
res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
res->qmodel = IPR_QUEUEING_MODEL64(res);
@@ -1239,7 +1158,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
set_bit(res->target, ioa_cfg->target_ids);
}
} else {
- proto = cfgtew->u.cfgte->proto;
res->qmodel = IPR_QUEUEING_MODEL(res);
res->flags = cfgtew->u.cfgte->flags;
if (res->flags & IPR_IS_IOA_RESOURCE)
@@ -1252,8 +1170,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
res->lun = cfgtew->u.cfgte->res_addr.lun;
res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
}
-
- ipr_update_ata_class(res, proto);
}
/**
@@ -1339,7 +1255,6 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
struct ipr_config_table_entry_wrapper *cfgtew)
{
char buffer[IPR_MAX_RES_PATH_LENGTH];
- unsigned int proto;
int new_path = 0;
if (res->ioa_cfg->sis64) {
@@ -1351,7 +1266,6 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
sizeof(struct ipr_std_inq_data));
res->qmodel = IPR_QUEUEING_MODEL64(res);
- proto = cfgtew->u.cfgte64->proto;
res->res_handle = cfgtew->u.cfgte64->res_handle;
res->dev_id = cfgtew->u.cfgte64->dev_id;
@@ -1380,11 +1294,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
sizeof(struct ipr_std_inq_data));
res->qmodel = IPR_QUEUEING_MODEL(res);
- proto = cfgtew->u.cfgte->proto;
res->res_handle = cfgtew->u.cfgte->res_handle;
}
-
- ipr_update_ata_class(res, proto);
}
/**
@@ -4496,17 +4407,6 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
**/
static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
- struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
- struct ipr_resource_entry *res;
- unsigned long lock_flags = 0;
-
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- res = (struct ipr_resource_entry *)sdev->hostdata;
-
- if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
- qdepth = IPR_MAX_CMD_PER_ATA_LUN;
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-
scsi_change_queue_depth(sdev, qdepth);
return sdev->queue_depth;
}
@@ -4799,68 +4699,13 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
return NULL;
}
-static struct ata_port_info sata_port_info;
-
-/**
- * ipr_target_alloc - Prepare for commands to a SCSI target
- * @starget: scsi target struct
- *
- * If the device is a SATA device, this function allocates an
- * ATA port with libata, else it does nothing.
- *
- * Return value:
- * 0 on success / non-0 on failure
- **/
-static int ipr_target_alloc(struct scsi_target *starget)
-{
- struct Scsi_Host *shost = dev_to_shost(&starget->dev);
- struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
- struct ipr_sata_port *sata_port;
- struct ata_port *ap;
- struct ipr_resource_entry *res;
- unsigned long lock_flags;
-
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- res = ipr_find_starget(starget);
- starget->hostdata = NULL;
-
- if (res && ipr_is_gata(res)) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
- if (!sata_port)
- return -ENOMEM;
-
- ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
- if (ap) {
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- sata_port->ioa_cfg = ioa_cfg;
- sata_port->ap = ap;
- sata_port->res = res;
-
- res->sata_port = sata_port;
- ap->private_data = sata_port;
- starget->hostdata = sata_port;
- } else {
- kfree(sata_port);
- return -ENOMEM;
- }
- }
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-
- return 0;
-}
-
/**
* ipr_target_destroy - Destroy a SCSI target
* @starget: scsi target struct
*
- * If the device was a SATA device, this function frees the libata
- * ATA port, else it does nothing.
- *
**/
static void ipr_target_destroy(struct scsi_target *starget)
{
- struct ipr_sata_port *sata_port = starget->hostdata;
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
@@ -4874,12 +4719,6 @@ static void ipr_target_destroy(struct scsi_target *starget)
clear_bit(starget->id, ioa_cfg->target_ids);
}
}
-
- if (sata_port) {
- starget->hostdata = NULL;
- ata_sas_port_destroy(sata_port->ap);
- kfree(sata_port);
- }
}
/**
@@ -4922,11 +4761,8 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = (struct ipr_resource_entry *) sdev->hostdata;
if (res) {
- if (res->sata_port)
- res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
sdev->hostdata = NULL;
res->sdev = NULL;
- res->sata_port = NULL;
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
}
@@ -4944,7 +4780,6 @@ static int ipr_slave_configure(struct scsi_device *sdev)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
struct ipr_resource_entry *res;
- struct ata_port *ap = NULL;
unsigned long lock_flags = 0;
char buffer[IPR_MAX_RES_PATH_LENGTH];
@@ -4964,15 +4799,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
IPR_VSET_RW_TIMEOUT);
blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
}
- if (ipr_is_gata(res) && res->sata_port)
- ap = res->sata_port->ap;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ap) {
- scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
- ata_sas_slave_configure(sdev, ap);
- }
-
if (ioa_cfg->sis64)
sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
ipr_format_res_path(ioa_cfg,
@@ -4984,37 +4812,6 @@ static int ipr_slave_configure(struct scsi_device *sdev)
}
/**
- * ipr_ata_slave_alloc - Prepare for commands to a SATA device
- * @sdev: scsi device struct
- *
- * This function initializes an ATA port so that future commands
- * sent through queuecommand will work.
- *
- * Return value:
- * 0 on success
- **/
-static int ipr_ata_slave_alloc(struct scsi_device *sdev)
-{
- struct ipr_sata_port *sata_port = NULL;
- int rc = -ENXIO;
-
- ENTER;
- if (sdev->sdev_target)
- sata_port = sdev->sdev_target->hostdata;
- if (sata_port) {
- rc = ata_sas_port_init(sata_port->ap);
- if (rc == 0)
- rc = ata_sas_sync_probe(sata_port->ap);
- }
-
- if (rc)
- ipr_slave_destroy(sdev);
-
- LEAVE;
- return rc;
-}
-
-/**
* ipr_slave_alloc - Prepare for commands to a device.
* @sdev: scsi device struct
*
@@ -5047,8 +4844,10 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
res->needs_sync_complete = 1;
rc = 0;
if (ipr_is_gata(res)) {
+ sdev_printk(KERN_ERR, sdev, "SATA devices are no longer "
+ "supported by this driver. Skipping device.\n");
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return ipr_ata_slave_alloc(sdev);
+ return -ENXIO;
}
}
@@ -5092,23 +4891,6 @@ static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
}
/**
- * ipr_match_res - Match function for specified resource entry
- * @ipr_cmd: ipr command struct
- * @resource: resource entry to match
- *
- * Returns:
- * 1 if command matches sdev / 0 if command does not match sdev
- **/
-static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
-{
- struct ipr_resource_entry *res = resource;
-
- if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
- return 1;
- return 0;
-}
-
-/**
* ipr_wait_for_ops - Wait for matching commands to complete
* @ioa_cfg: ioa config struct
* @device: device to match (sdev)
@@ -5220,8 +5002,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
* This function issues a device reset to the affected device.
* If the device is a SCSI device, a LUN reset will be sent
* to the device first. If that does not work, a target reset
- * will be sent. If the device is a SATA device, a PHY reset will
- * be sent.
+ * will be sent.
*
* Return value:
* 0 on success / non-zero on failure
@@ -5232,7 +5013,6 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
struct ipr_cmd_pkt *cmd_pkt;
- struct ipr_ioarcb_ata_regs *regs;
u32 ioasc;
ENTER;
@@ -5240,87 +5020,22 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
ioarcb = &ipr_cmd->ioarcb;
cmd_pkt = &ioarcb->cmd_pkt;
- if (ipr_cmd->ioa_cfg->sis64) {
- regs = &ipr_cmd->i.ata_ioadl.regs;
+ if (ipr_cmd->ioa_cfg->sis64)
ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
- } else
- regs = &ioarcb->u.add_data.u.regs;
ioarcb->res_handle = res->res_handle;
cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
- if (ipr_is_gata(res)) {
- cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
- ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
- regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
- }
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
- if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
- if (ipr_cmd->ioa_cfg->sis64)
- memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
- sizeof(struct ipr_ioasa_gata));
- else
- memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
- sizeof(struct ipr_ioasa_gata));
- }
LEAVE;
return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
}
/**
- * ipr_sata_reset - Reset the SATA port
- * @link: SATA link to reset
- * @classes: class of the attached device
- * @deadline: unused
- *
- * This function issues a SATA phy reset to the affected ATA link.
- *
- * Return value:
- * 0 on success / non-zero on failure
- **/
-static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
- unsigned long deadline)
-{
- struct ipr_sata_port *sata_port = link->ap->private_data;
- struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
- struct ipr_resource_entry *res;
- unsigned long lock_flags = 0;
- int rc = -ENXIO, ret;
-
- ENTER;
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- while (ioa_cfg->in_reset_reload) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- }
-
- res = sata_port->res;
- if (res) {
- rc = ipr_device_reset(ioa_cfg, res);
- *classes = res->ata_class;
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-
- ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
- if (ret != SUCCESS) {
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- }
- } else
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-
- LEAVE;
- return rc;
-}
-
-/**
* __ipr_eh_dev_reset - Reset the device
* @scsi_cmd: scsi command struct
*
@@ -5333,12 +5048,9 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
**/
static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
{
- struct ipr_cmnd *ipr_cmd;
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res;
- struct ata_port *ap;
- int rc = 0, i;
- struct ipr_hrr_queue *hrrq;
+ int rc = 0;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@@ -5354,36 +5066,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return FAILED;
- for_each_hrrq(hrrq, ioa_cfg) {
- spin_lock(&hrrq->_lock);
- for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
- ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
-
- if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
- if (!ipr_cmd->qc)
- continue;
- if (ipr_cmnd_is_free(ipr_cmd))
- continue;
-
- ipr_cmd->done = ipr_sata_eh_done;
- if (!(ipr_cmd->qc->flags & ATA_QCFLAG_EH)) {
- ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
- ipr_cmd->qc->flags |= ATA_QCFLAG_EH;
- }
- }
- }
- spin_unlock(&hrrq->_lock);
- }
res->resetting_device = 1;
scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
- if (ipr_is_gata(res) && res->sata_port) {
- ap = res->sata_port->ap;
- spin_unlock_irq(scsi_cmd->device->host->host_lock);
- ata_std_error_handler(ap);
- spin_lock_irq(scsi_cmd->device->host->host_lock);
- } else
- rc = ipr_device_reset(ioa_cfg, res);
+ rc = ipr_device_reset(ioa_cfg, res);
res->resetting_device = 0;
res->reset_occurred = 1;
@@ -5407,12 +5093,8 @@ static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
rc = __ipr_eh_dev_reset(cmd);
spin_unlock_irq(cmd->device->host->host_lock);
- if (rc == SUCCESS) {
- if (ipr_is_gata(res) && res->sata_port)
- rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
- else
- rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
- }
+ if (rc == SUCCESS)
+ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
return rc;
}
@@ -6564,7 +6246,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
struct ipr_resource_entry *res;
struct ipr_ioarcb *ioarcb;
struct ipr_cmnd *ipr_cmd;
- unsigned long hrrq_flags, lock_flags;
+ unsigned long hrrq_flags;
int rc;
struct ipr_hrr_queue *hrrq;
int hrrq_id;
@@ -6574,13 +6256,6 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
scsi_cmd->result = (DID_OK << 16);
res = scsi_cmd->device->hostdata;
- if (ipr_is_gata(res) && res->sata_port) {
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return rc;
- }
-
hrrq_id = ipr_get_hrrq_index(ioa_cfg);
hrrq = &ioa_cfg->hrrq[hrrq_id];
@@ -6691,30 +6366,6 @@ err_nodev:
}
/**
- * ipr_ioctl - IOCTL handler
- * @sdev: scsi device struct
- * @cmd: IOCTL cmd
- * @arg: IOCTL arg
- *
- * Return value:
- * 0 on success / other on failure
- **/
-static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
- void __user *arg)
-{
- struct ipr_resource_entry *res;
-
- res = (struct ipr_resource_entry *)sdev->hostdata;
- if (res && ipr_is_gata(res)) {
- if (cmd == HDIO_GET_IDENTITY)
- return -ENOTTY;
- return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
- }
-
- return -EINVAL;
-}
-
-/**
* ipr_ioa_info - Get information about the card/driver
* @host: scsi host struct
*
@@ -6736,16 +6387,11 @@ static const char *ipr_ioa_info(struct Scsi_Host *host)
return buffer;
}
-static struct scsi_host_template driver_template = {
+static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "IPR",
.info = ipr_ioa_info,
- .ioctl = ipr_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ipr_ioctl,
-#endif
.queuecommand = ipr_queuecommand,
- .dma_need_drain = ata_scsi_dma_need_drain,
.eh_abort_handler = ipr_eh_abort,
.eh_device_reset_handler = ipr_eh_dev_reset,
.eh_host_reset_handler = ipr_eh_host_reset,
@@ -6753,7 +6399,6 @@ static struct scsi_host_template driver_template = {
.slave_configure = ipr_slave_configure,
.slave_destroy = ipr_slave_destroy,
.scan_finished = ipr_scan_finished,
- .target_alloc = ipr_target_alloc,
.target_destroy = ipr_target_destroy,
.change_queue_depth = ipr_change_queue_depth,
.bios_param = ipr_biosparam,
@@ -6767,418 +6412,6 @@ static struct scsi_host_template driver_template = {
.proc_name = IPR_NAME,
};
-/**
- * ipr_ata_phy_reset - libata phy_reset handler
- * @ap: ata port to reset
- *
- **/
-static void ipr_ata_phy_reset(struct ata_port *ap)
-{
- unsigned long flags;
- struct ipr_sata_port *sata_port = ap->private_data;
- struct ipr_resource_entry *res = sata_port->res;
- struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
- int rc;
-
- ENTER;
- spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- while (ioa_cfg->in_reset_reload) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- }
-
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
- goto out_unlock;
-
- rc = ipr_device_reset(ioa_cfg, res);
-
- if (rc) {
- ap->link.device[0].class = ATA_DEV_NONE;
- goto out_unlock;
- }
-
- ap->link.device[0].class = res->ata_class;
- if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
- ap->link.device[0].class = ATA_DEV_NONE;
-
-out_unlock:
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
- LEAVE;
-}
-
-/**
- * ipr_ata_post_internal - Cleanup after an internal command
- * @qc: ATA queued command
- *
- * Return value:
- * none
- **/
-static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
-{
- struct ipr_sata_port *sata_port = qc->ap->private_data;
- struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
- struct ipr_cmnd *ipr_cmd;
- struct ipr_hrr_queue *hrrq;
- unsigned long flags;
-
- spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- while (ioa_cfg->in_reset_reload) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- }
-
- for_each_hrrq(hrrq, ioa_cfg) {
- spin_lock(&hrrq->_lock);
- list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
- if (ipr_cmd->qc == qc) {
- ipr_device_reset(ioa_cfg, sata_port->res);
- break;
- }
- }
- spin_unlock(&hrrq->_lock);
- }
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
-}
-
-/**
- * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
- * @regs: destination
- * @tf: source ATA taskfile
- *
- * Return value:
- * none
- **/
-static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
- struct ata_taskfile *tf)
-{
- regs->feature = tf->feature;
- regs->nsect = tf->nsect;
- regs->lbal = tf->lbal;
- regs->lbam = tf->lbam;
- regs->lbah = tf->lbah;
- regs->device = tf->device;
- regs->command = tf->command;
- regs->hob_feature = tf->hob_feature;
- regs->hob_nsect = tf->hob_nsect;
- regs->hob_lbal = tf->hob_lbal;
- regs->hob_lbam = tf->hob_lbam;
- regs->hob_lbah = tf->hob_lbah;
- regs->ctl = tf->ctl;
-}
-
-/**
- * ipr_sata_done - done function for SATA commands
- * @ipr_cmd: ipr command struct
- *
- * This function is invoked by the interrupt handler for
- * ops generated by the SCSI mid-layer to SATA devices
- *
- * Return value:
- * none
- **/
-static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
-{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- struct ata_queued_cmd *qc = ipr_cmd->qc;
- struct ipr_sata_port *sata_port = qc->ap->private_data;
- struct ipr_resource_entry *res = sata_port->res;
- u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-
- spin_lock(&ipr_cmd->hrrq->_lock);
- if (ipr_cmd->ioa_cfg->sis64)
- memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
- sizeof(struct ipr_ioasa_gata));
- else
- memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
- sizeof(struct ipr_ioasa_gata));
- ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
-
- if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
- scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
-
- if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
- qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
- else
- qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
- spin_unlock(&ipr_cmd->hrrq->_lock);
- ata_qc_complete(qc);
-}
-
-/**
- * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
- * @ipr_cmd: ipr command struct
- * @qc: ATA queued command
- *
- **/
-static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
- struct ata_queued_cmd *qc)
-{
- u32 ioadl_flags = 0;
- struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
- struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
- struct ipr_ioadl64_desc *last_ioadl64 = NULL;
- int len = qc->nbytes;
- struct scatterlist *sg;
- unsigned int si;
- dma_addr_t dma_addr = ipr_cmd->dma_addr;
-
- if (len == 0)
- return;
-
- if (qc->dma_dir == DMA_TO_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_WRITE;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- } else if (qc->dma_dir == DMA_FROM_DEVICE)
- ioadl_flags = IPR_IOADL_FLAGS_READ;
-
- ioarcb->data_transfer_length = cpu_to_be32(len);
- ioarcb->ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
- ioarcb->u.sis64_addr_data.data_ioadl_addr =
- cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
-
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- ioadl64->flags = cpu_to_be32(ioadl_flags);
- ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
- ioadl64->address = cpu_to_be64(sg_dma_address(sg));
-
- last_ioadl64 = ioadl64;
- ioadl64++;
- }
-
- if (likely(last_ioadl64))
- last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
-}
-
-/**
- * ipr_build_ata_ioadl - Build an ATA scatter/gather list
- * @ipr_cmd: ipr command struct
- * @qc: ATA queued command
- *
- **/
-static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
- struct ata_queued_cmd *qc)
-{
- u32 ioadl_flags = 0;
- struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
- struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
- struct ipr_ioadl_desc *last_ioadl = NULL;
- int len = qc->nbytes;
- struct scatterlist *sg;
- unsigned int si;
-
- if (len == 0)
- return;
-
- if (qc->dma_dir == DMA_TO_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_WRITE;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- ioarcb->data_transfer_length = cpu_to_be32(len);
- ioarcb->ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
- } else if (qc->dma_dir == DMA_FROM_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_READ;
- ioarcb->read_data_transfer_length = cpu_to_be32(len);
- ioarcb->read_ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
- }
-
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
- ioadl->address = cpu_to_be32(sg_dma_address(sg));
-
- last_ioadl = ioadl;
- ioadl++;
- }
-
- if (likely(last_ioadl))
- last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
-}
-
-/**
- * ipr_qc_defer - Get a free ipr_cmd
- * @qc: queued command
- *
- * Return value:
- * 0 if success
- **/
-static int ipr_qc_defer(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ipr_sata_port *sata_port = ap->private_data;
- struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
- struct ipr_cmnd *ipr_cmd;
- struct ipr_hrr_queue *hrrq;
- int hrrq_id;
-
- hrrq_id = ipr_get_hrrq_index(ioa_cfg);
- hrrq = &ioa_cfg->hrrq[hrrq_id];
-
- qc->lldd_task = NULL;
- spin_lock(&hrrq->_lock);
- if (unlikely(hrrq->ioa_is_dead)) {
- spin_unlock(&hrrq->_lock);
- return 0;
- }
-
- if (unlikely(!hrrq->allow_cmds)) {
- spin_unlock(&hrrq->_lock);
- return ATA_DEFER_LINK;
- }
-
- ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
- if (ipr_cmd == NULL) {
- spin_unlock(&hrrq->_lock);
- return ATA_DEFER_LINK;
- }
-
- qc->lldd_task = ipr_cmd;
- spin_unlock(&hrrq->_lock);
- return 0;
-}
-
-/**
- * ipr_qc_issue - Issue a SATA qc to a device
- * @qc: queued command
- *
- * Return value:
- * 0 if success
- **/
-static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ipr_sata_port *sata_port = ap->private_data;
- struct ipr_resource_entry *res = sata_port->res;
- struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
- struct ipr_cmnd *ipr_cmd;
- struct ipr_ioarcb *ioarcb;
- struct ipr_ioarcb_ata_regs *regs;
-
- if (qc->lldd_task == NULL)
- ipr_qc_defer(qc);
-
- ipr_cmd = qc->lldd_task;
- if (ipr_cmd == NULL)
- return AC_ERR_SYSTEM;
-
- qc->lldd_task = NULL;
- spin_lock(&ipr_cmd->hrrq->_lock);
- if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
- ipr_cmd->hrrq->ioa_is_dead)) {
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
- spin_unlock(&ipr_cmd->hrrq->_lock);
- return AC_ERR_SYSTEM;
- }
-
- ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
- ioarcb = &ipr_cmd->ioarcb;
-
- if (ioa_cfg->sis64) {
- regs = &ipr_cmd->i.ata_ioadl.regs;
- ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
- } else
- regs = &ioarcb->u.add_data.u.regs;
-
- memset(regs, 0, sizeof(*regs));
- ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
-
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
- ipr_cmd->qc = qc;
- ipr_cmd->done = ipr_sata_done;
- ipr_cmd->ioarcb.res_handle = res->res_handle;
- ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
- ipr_cmd->dma_use_sg = qc->n_elem;
-
- if (ioa_cfg->sis64)
- ipr_build_ata_ioadl64(ipr_cmd, qc);
- else
- ipr_build_ata_ioadl(ipr_cmd, qc);
-
- regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
- ipr_copy_sata_tf(regs, &qc->tf);
- memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
- ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
-
- switch (qc->tf.protocol) {
- case ATA_PROT_NODATA:
- case ATA_PROT_PIO:
- break;
-
- case ATA_PROT_DMA:
- regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
- break;
-
- case ATAPI_PROT_PIO:
- case ATAPI_PROT_NODATA:
- regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
- break;
-
- case ATAPI_PROT_DMA:
- regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
- regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
- break;
-
- default:
- WARN_ON(1);
- spin_unlock(&ipr_cmd->hrrq->_lock);
- return AC_ERR_INVALID;
- }
-
- ipr_send_command(ipr_cmd);
- spin_unlock(&ipr_cmd->hrrq->_lock);
-
- return 0;
-}
-
-/**
- * ipr_qc_fill_rtf - Read result TF
- * @qc: ATA queued command
- **/
-static void ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
-{
- struct ipr_sata_port *sata_port = qc->ap->private_data;
- struct ipr_ioasa_gata *g = &sata_port->ioasa;
- struct ata_taskfile *tf = &qc->result_tf;
-
- tf->feature = g->error;
- tf->nsect = g->nsect;
- tf->lbal = g->lbal;
- tf->lbam = g->lbam;
- tf->lbah = g->lbah;
- tf->device = g->device;
- tf->command = g->status;
- tf->hob_nsect = g->hob_nsect;
- tf->hob_lbal = g->hob_lbal;
- tf->hob_lbam = g->hob_lbam;
- tf->hob_lbah = g->hob_lbah;
-}
-
-static struct ata_port_operations ipr_sata_ops = {
- .phy_reset = ipr_ata_phy_reset,
- .hardreset = ipr_sata_reset,
- .post_internal_cmd = ipr_ata_post_internal,
- .qc_prep = ata_noop_qc_prep,
- .qc_defer = ipr_qc_defer,
- .qc_issue = ipr_qc_issue,
- .qc_fill_rtf = ipr_qc_fill_rtf,
- .port_start = ata_sas_port_start,
- .port_stop = ata_sas_port_stop
-};
-
-static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
- ATA_FLAG_SAS_HOST,
- .pio_mask = ATA_PIO4_ONLY,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ipr_sata_ops
-};
-
#ifdef CONFIG_PPC_PSERIES
static const u16 ipr_blocked_processors[] = {
PVR_NORTHSTAR,
@@ -10181,7 +9414,6 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
- ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 69444d21fca1..c77d6ca1a210 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -16,7 +16,6 @@
#include <asm/unaligned.h>
#include <linux/types.h>
#include <linux/completion.h>
-#include <linux/libata.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/irq_poll.h>
@@ -35,7 +34,6 @@
* This can be adjusted at runtime through sysfs device attributes.
*/
#define IPR_MAX_CMD_PER_LUN 6
-#define IPR_MAX_CMD_PER_ATA_LUN 1
/*
* IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
@@ -197,7 +195,6 @@
#define IPR_LUN_RESET 0x40
#define IPR_TARGET_RESET 0x20
#define IPR_BUS_RESET 0x10
-#define IPR_ATA_PHY_RESET 0x80
#define IPR_ID_HOST_RR_Q 0xC4
#define IPR_QUERY_IOA_CONFIG 0xC5
#define IPR_CANCEL_ALL_REQUESTS 0xCE
@@ -521,7 +518,6 @@ struct ipr_cmd_pkt {
#define IPR_RQTYPE_SCSICDB 0x00
#define IPR_RQTYPE_IOACMD 0x01
#define IPR_RQTYPE_HCAM 0x02
-#define IPR_RQTYPE_ATA_PASSTHRU 0x04
#define IPR_RQTYPE_PIPE 0x05
u8 reserved2;
@@ -546,30 +542,6 @@ struct ipr_cmd_pkt {
__be16 timeout;
}__attribute__ ((packed, aligned(4)));
-struct ipr_ioarcb_ata_regs { /* 22 bytes */
- u8 flags;
-#define IPR_ATA_FLAG_PACKET_CMD 0x80
-#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40
-#define IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION 0x20
- u8 reserved[3];
-
- __be16 data;
- u8 feature;
- u8 nsect;
- u8 lbal;
- u8 lbam;
- u8 lbah;
- u8 device;
- u8 command;
- u8 reserved2[3];
- u8 hob_feature;
- u8 hob_nsect;
- u8 hob_lbal;
- u8 hob_lbam;
- u8 hob_lbah;
- u8 ctl;
-}__attribute__ ((packed, aligned(2)));
-
struct ipr_ioadl_desc {
__be32 flags_and_data_len;
#define IPR_IOADL_FLAGS_MASK 0xff000000
@@ -591,15 +563,8 @@ struct ipr_ioadl64_desc {
__be64 address;
}__attribute__((packed, aligned (16)));
-struct ipr_ata64_ioadl {
- struct ipr_ioarcb_ata_regs regs;
- u16 reserved[5];
- struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
-}__attribute__((packed, aligned (16)));
-
struct ipr_ioarcb_add_data {
union {
- struct ipr_ioarcb_ata_regs regs;
struct ipr_ioadl_desc ioadl[5];
__be32 add_cmd_parms[10];
} u;
@@ -665,21 +630,6 @@ struct ipr_ioasa_gpdd {
__be32 ioa_data[2];
}__attribute__((packed, aligned (4)));
-struct ipr_ioasa_gata {
- u8 error;
- u8 nsect; /* Interrupt reason */
- u8 lbal;
- u8 lbam;
- u8 lbah;
- u8 device;
- u8 status;
- u8 alt_status; /* ATA CTL */
- u8 hob_nsect;
- u8 hob_lbal;
- u8 hob_lbam;
- u8 hob_lbah;
-}__attribute__((packed, aligned (4)));
-
struct ipr_auto_sense {
__be16 auto_sense_len;
__be16 ioa_data_len;
@@ -713,7 +663,6 @@ struct ipr_ioasa_hdr {
__be32 ioasc_specific; /* status code specific field */
#define IPR_ADDITIONAL_STATUS_FMT 0x80000000
#define IPR_AUTOSENSE_VALID 0x40000000
-#define IPR_ATA_DEVICE_WAS_RESET 0x20000000
#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff
#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8)
#define IPR_FIELD_POINTER_MASK 0x0000ffff
@@ -727,7 +676,6 @@ struct ipr_ioasa {
struct ipr_ioasa_vset vset;
struct ipr_ioasa_af_dasd dasd;
struct ipr_ioasa_gpdd gpdd;
- struct ipr_ioasa_gata gata;
} u;
struct ipr_auto_sense auto_sense;
@@ -741,7 +689,6 @@ struct ipr_ioasa64 {
struct ipr_ioasa_vset vset;
struct ipr_ioasa_af_dasd dasd;
struct ipr_ioasa_gpdd gpdd;
- struct ipr_ioasa_gata gata;
} u;
struct ipr_auto_sense auto_sense;
@@ -1279,13 +1226,6 @@ struct ipr_bus_attributes {
u32 max_xfer_rate;
};
-struct ipr_sata_port {
- struct ipr_ioa_cfg *ioa_cfg;
- struct ata_port *ap;
- struct ipr_resource_entry *res;
- struct ipr_ioasa_gata ioasa;
-};
-
struct ipr_resource_entry {
u8 needs_sync_complete:1;
u8 in_erp:1;
@@ -1323,7 +1263,6 @@ struct ipr_resource_entry {
struct ipr_ioa_cfg *ioa_cfg;
struct scsi_device *sdev;
- struct ipr_sata_port *sata_port;
struct list_head queue;
}; /* struct ipr_resource_entry */
@@ -1582,7 +1521,6 @@ struct ipr_ioa_cfg {
struct ipr_cmnd *reset_cmd;
int (*reset) (struct ipr_cmnd *);
- struct ata_host ata_host;
char ipr_cmd_label[8];
#define IPR_CMD_LABEL "ipr_cmd"
u32 max_cmds;
@@ -1604,7 +1542,6 @@ struct ipr_cmnd {
union {
struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
- struct ipr_ata64_ioadl ata_ioadl;
} i;
union {
struct ipr_ioasa ioasa;
@@ -1612,7 +1549,6 @@ struct ipr_cmnd {
} s;
struct list_head queue;
struct scsi_cmnd *scsi_cmd;
- struct ata_queued_cmd *qc;
struct completion completion;
struct timer_list timer;
struct work_struct work;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index e294d5d961eb..ac1e04b86d8f 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -149,7 +149,7 @@ static struct attribute *isci_host_attrs[] = {
ATTRIBUTE_GROUPS(isci_host);
-static struct scsi_host_template isci_sht = {
+static const struct scsi_host_template isci_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index c76f82fb8b63..9637d4bc2bc9 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -47,7 +47,7 @@ MODULE_DESCRIPTION("iSCSI/TCP data-path");
MODULE_LICENSE("GPL");
static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
-static struct scsi_host_template iscsi_sw_tcp_sht;
+static const struct scsi_host_template iscsi_sw_tcp_sht;
static struct iscsi_transport iscsi_sw_tcp_transport;
static unsigned int iscsi_max_lun = ~0;
@@ -771,13 +771,12 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
iscsi_set_param(cls_conn, param, buf, buflen);
break;
case ISCSI_PARAM_DATADGST_EN:
- iscsi_set_param(cls_conn, param, buf, buflen);
-
mutex_lock(&tcp_sw_conn->sock_lock);
if (!tcp_sw_conn->sock) {
mutex_unlock(&tcp_sw_conn->sock_lock);
return -ENOTCONN;
}
+ iscsi_set_param(cls_conn, param, buf, buflen);
tcp_sw_conn->sendpage = conn->datadgst_en ?
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
mutex_unlock(&tcp_sw_conn->sock_lock);
@@ -1072,7 +1071,7 @@ static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
return 0;
}
-static struct scsi_host_template iscsi_sw_tcp_sht = {
+static const struct scsi_host_template iscsi_sw_tcp_sht = {
.module = THIS_MODULE,
.name = "iSCSI Initiator over TCP/IP",
.queuecommand = iscsi_queuecommand,
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 60a88a95a8e2..0c842fb29aa0 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -104,7 +104,7 @@ static const struct esp_driver_ops jazz_esp_ops = {
static int esp_jazz_probe(struct platform_device *dev)
{
- struct scsi_host_template *tpnt = &scsi_esp_template;
+ const struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
struct esp *esp;
struct resource *res;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 127f3d7f19dc..0fda8905eabd 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2895,7 +2895,7 @@ EXPORT_SYMBOL_GPL(iscsi_host_add);
* This should be called by partial offload and software iscsi drivers.
* To access the driver specific memory use the iscsi_host_priv() macro.
*/
-struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+struct Scsi_Host *iscsi_host_alloc(const struct scsi_host_template *sht,
int dd_data_size, bool xmit_can_sleep)
{
struct Scsi_Host *shost;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 72fdb2e5d047..8c6afe724944 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -360,6 +360,33 @@ static void sas_destruct_ports(struct asd_sas_port *port)
}
}
+static bool sas_abort_cmd(struct request *req, void *data)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ struct domain_device *dev = data;
+
+ if (dev == cmd_to_domain_dev(cmd))
+ blk_abort_request(req);
+ return true;
+}
+
+static void sas_abort_device_scsi_cmds(struct domain_device *dev)
+{
+ struct sas_ha_struct *sas_ha = dev->port->ha;
+ struct Scsi_Host *shost = sas_ha->core.shost;
+
+ if (dev_is_expander(dev->dev_type))
+ return;
+
+ /*
+ * For removed device with active IOs, the user space applications have
+ * to spend very long time waiting for the timeout. This is not
+ * necessary because a removed device will not return the IOs.
+ * Abort the inflight IOs here so that EH can be quickly kicked in.
+ */
+ blk_mq_tagset_busy_iter(&shost->tag_set, sas_abort_cmd, dev);
+}
+
void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
{
if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
@@ -372,6 +399,8 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
}
if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
+ if (test_bit(SAS_DEV_GONE, &dev->state))
+ sas_abort_device_scsi_cmds(dev);
sas_rphy_unlink(dev->rphy);
list_move_tail(&dev->disco_list_node, &port->destroy_list);
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index dc670304f181..adcac57aaee6 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1198,37 +1198,37 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
sas_route_char(child, child_phy));
}
+static bool sas_eeds_valid(struct domain_device *parent,
+ struct domain_device *child)
+{
+ struct sas_discovery *disc = &parent->port->disc;
+
+ return (SAS_ADDR(disc->eeds_a) == SAS_ADDR(parent->sas_addr) ||
+ SAS_ADDR(disc->eeds_a) == SAS_ADDR(child->sas_addr)) &&
+ (SAS_ADDR(disc->eeds_b) == SAS_ADDR(parent->sas_addr) ||
+ SAS_ADDR(disc->eeds_b) == SAS_ADDR(child->sas_addr));
+}
+
static int sas_check_eeds(struct domain_device *child,
- struct ex_phy *parent_phy,
- struct ex_phy *child_phy)
+ struct ex_phy *parent_phy,
+ struct ex_phy *child_phy)
{
int res = 0;
struct domain_device *parent = child->parent;
+ struct sas_discovery *disc = &parent->port->disc;
- if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) {
+ if (SAS_ADDR(disc->fanout_sas_addr) != 0) {
res = -ENODEV;
pr_warn("edge ex %016llx phy S:%02d <--> edge ex %016llx phy S:%02d, while there is a fanout ex %016llx\n",
SAS_ADDR(parent->sas_addr),
parent_phy->phy_id,
SAS_ADDR(child->sas_addr),
child_phy->phy_id,
- SAS_ADDR(parent->port->disc.fanout_sas_addr));
- } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) {
- memcpy(parent->port->disc.eeds_a, parent->sas_addr,
- SAS_ADDR_SIZE);
- memcpy(parent->port->disc.eeds_b, child->sas_addr,
- SAS_ADDR_SIZE);
- } else if (((SAS_ADDR(parent->port->disc.eeds_a) ==
- SAS_ADDR(parent->sas_addr)) ||
- (SAS_ADDR(parent->port->disc.eeds_a) ==
- SAS_ADDR(child->sas_addr)))
- &&
- ((SAS_ADDR(parent->port->disc.eeds_b) ==
- SAS_ADDR(parent->sas_addr)) ||
- (SAS_ADDR(parent->port->disc.eeds_b) ==
- SAS_ADDR(child->sas_addr))))
- ;
- else {
+ SAS_ADDR(disc->fanout_sas_addr));
+ } else if (SAS_ADDR(disc->eeds_a) == 0) {
+ memcpy(disc->eeds_a, parent->sas_addr, SAS_ADDR_SIZE);
+ memcpy(disc->eeds_b, child->sas_addr, SAS_ADDR_SIZE);
+ } else if (!sas_eeds_valid(parent, child)) {
res = -ENODEV;
pr_warn("edge ex %016llx phy%02d <--> edge ex %016llx phy%02d link forms a third EEDS!\n",
SAS_ADDR(parent->sas_addr),
@@ -1240,11 +1240,56 @@ static int sas_check_eeds(struct domain_device *child,
return res;
}
-/* Here we spill over 80 columns. It is intentional.
- */
-static int sas_check_parent_topology(struct domain_device *child)
+static int sas_check_edge_expander_topo(struct domain_device *child,
+ struct ex_phy *parent_phy)
+{
+ struct expander_device *child_ex = &child->ex_dev;
+ struct expander_device *parent_ex = &child->parent->ex_dev;
+ struct ex_phy *child_phy;
+
+ child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
+
+ if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
+ child_phy->routing_attr != TABLE_ROUTING)
+ goto error;
+ } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) {
+ if (child_phy->routing_attr == SUBTRACTIVE_ROUTING)
+ return sas_check_eeds(child, parent_phy, child_phy);
+ else if (child_phy->routing_attr != TABLE_ROUTING)
+ goto error;
+ } else if (parent_phy->routing_attr == TABLE_ROUTING) {
+ if (child_phy->routing_attr != SUBTRACTIVE_ROUTING &&
+ (child_phy->routing_attr != TABLE_ROUTING ||
+ !child_ex->t2t_supp || !parent_ex->t2t_supp))
+ goto error;
+ }
+
+ return 0;
+error:
+ sas_print_parent_topology_bug(child, parent_phy, child_phy);
+ return -ENODEV;
+}
+
+static int sas_check_fanout_expander_topo(struct domain_device *child,
+ struct ex_phy *parent_phy)
{
struct expander_device *child_ex = &child->ex_dev;
+ struct ex_phy *child_phy;
+
+ child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
+
+ if (parent_phy->routing_attr == TABLE_ROUTING &&
+ child_phy->routing_attr == SUBTRACTIVE_ROUTING)
+ return 0;
+
+ sas_print_parent_topology_bug(child, parent_phy, child_phy);
+
+ return -ENODEV;
+}
+
+static int sas_check_parent_topology(struct domain_device *child)
+{
struct expander_device *parent_ex;
int i;
int res = 0;
@@ -1259,7 +1304,6 @@ static int sas_check_parent_topology(struct domain_device *child)
for (i = 0; i < parent_ex->num_phys; i++) {
struct ex_phy *parent_phy = &parent_ex->ex_phy[i];
- struct ex_phy *child_phy;
if (parent_phy->phy_state == PHY_VACANT ||
parent_phy->phy_state == PHY_NOT_PRESENT)
@@ -1268,40 +1312,14 @@ static int sas_check_parent_topology(struct domain_device *child)
if (!sas_phy_match_dev_addr(child, parent_phy))
continue;
- child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
-
switch (child->parent->dev_type) {
case SAS_EDGE_EXPANDER_DEVICE:
- if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
- if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
- child_phy->routing_attr != TABLE_ROUTING) {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
- res = -ENODEV;
- }
- } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) {
- if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) {
- res = sas_check_eeds(child, parent_phy, child_phy);
- } else if (child_phy->routing_attr != TABLE_ROUTING) {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
- res = -ENODEV;
- }
- } else if (parent_phy->routing_attr == TABLE_ROUTING) {
- if (child_phy->routing_attr == SUBTRACTIVE_ROUTING ||
- (child_phy->routing_attr == TABLE_ROUTING &&
- child_ex->t2t_supp && parent_ex->t2t_supp)) {
- /* All good */;
- } else {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
- res = -ENODEV;
- }
- }
+ if (sas_check_edge_expander_topo(child, parent_phy))
+ res = -ENODEV;
break;
case SAS_FANOUT_EXPANDER_DEVICE:
- if (parent_phy->routing_attr != TABLE_ROUTING ||
- child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
+ if (sas_check_fanout_expander_topo(child, parent_phy))
res = -ENODEV;
- }
break;
default:
break;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index cf55f8e3bd9f..5e3a93d13a91 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1036,7 +1036,6 @@ struct lpfc_hba {
#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */
#define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */
#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */
-#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
@@ -1190,7 +1189,6 @@ struct lpfc_hba {
#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP
#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP
#endif
- uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
uint32_t cfg_suppress_link_up;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 22f2e046e8eb..21c7ecd3ede5 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1644,6 +1644,12 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
!bf_get(lpfc_sliport_status_err, &portstat_reg))
return -EPERM;
+ /* There is no point to wait if the port is in an unrecoverable
+ * state.
+ */
+ if (lpfc_sli4_unrecoverable_port(&portstat_reg))
+ return -EIO;
+
/* wait for the SLI port firmware ready after firmware reset */
for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
msleep(10);
@@ -4365,13 +4371,22 @@ static DEVICE_ATTR_RW(lpfc_link_speed);
/*
# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
-# 0 = aer disabled or not supported
# 1 = aer supported and enabled (default)
-# Value range is [0,1]. Default value is 1.
+# PCIe error reporting is always enabled by the PCI core, so this always
+# shows 1.
+#
+# N.B. Parts of LPFC_ATTR open-coded since some of the underlying
+# infrastructure (phba->cfg_aer_support) is gone.
*/
-LPFC_ATTR(aer_support, 1, 0, 1,
- "Enable PCIe device AER support");
-lpfc_param_show(aer_support)
+static uint lpfc_aer_support = 1;
+module_param(lpfc_aer_support, uint, S_IRUGO);
+MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
+static ssize_t
+lpfc_aer_support_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", lpfc_aer_support);
+}
/**
* lpfc_aer_support_store - Set the adapter for aer support
@@ -4382,76 +4397,27 @@ lpfc_param_show(aer_support)
* @count: unused variable.
*
* Description:
- * If the val is 1 and currently the device's AER capability was not
- * enabled, invoke the kernel's enable AER helper routine, trying to
- * enable the device's AER capability. If the helper routine enabling
- * AER returns success, update the device's cfg_aer_support flag to
- * indicate AER is supported by the device; otherwise, if the device
- * AER capability is already enabled to support AER, then do nothing.
- *
- * If the val is 0 and currently the device's AER support was enabled,
- * invoke the kernel's disable AER helper routine. After that, update
- * the device's cfg_aer_support flag to indicate AER is not supported
- * by the device; otherwise, if the device AER capability is already
- * disabled from supporting AER, then do nothing.
+ * PCIe error reporting is enabled by the PCI core, so drivers don't need
+ * to do anything. Retain this interface for backwards compatibility,
+ * but do nothing.
*
* Returns:
- * length of the buf on success if val is in range the intended mode
- * is supported.
- * -EINVAL if val out of range or intended mode is not supported.
+ * length of the buf on success
+ * -EINVAL if val out of range
**/
static ssize_t
lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- int val = 0, rc = -EINVAL;
+ int val = 0;
if (!isdigit(buf[0]))
return -EINVAL;
if (sscanf(buf, "%i", &val) != 1)
return -EINVAL;
- switch (val) {
- case 0:
- if (phba->hba_flag & HBA_AER_ENABLED) {
- rc = pci_disable_pcie_error_reporting(phba->pcidev);
- if (!rc) {
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~HBA_AER_ENABLED;
- spin_unlock_irq(&phba->hbalock);
- phba->cfg_aer_support = 0;
- rc = strlen(buf);
- } else
- rc = -EPERM;
- } else {
- phba->cfg_aer_support = 0;
- rc = strlen(buf);
- }
- break;
- case 1:
- if (!(phba->hba_flag & HBA_AER_ENABLED)) {
- rc = pci_enable_pcie_error_reporting(phba->pcidev);
- if (!rc) {
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag |= HBA_AER_ENABLED;
- spin_unlock_irq(&phba->hbalock);
- phba->cfg_aer_support = 1;
- rc = strlen(buf);
- } else
- rc = -EPERM;
- } else {
- phba->cfg_aer_support = 1;
- rc = strlen(buf);
- }
- break;
- default:
- rc = -EINVAL;
- break;
- }
- return rc;
+ dev_info_once(dev, "PCIe error reporting automatically enabled by the PCI core; sysfs write ignored\n");
+ return strlen(buf);
}
static DEVICE_ATTR_RW(lpfc_aer_support);
@@ -4464,16 +4430,16 @@ static DEVICE_ATTR_RW(lpfc_aer_support);
* @count: unused variable.
*
* Description:
- * If the @buf contains 1 and the device currently has the AER support
- * enabled, then invokes the kernel AER helper routine
+ * If the @buf contains 1, invokes the kernel AER helper routine
* pci_aer_clear_nonfatal_status() to clean up the uncorrectable
* error status register.
*
* Notes:
*
* Returns:
- * -EINVAL if the buf does not contain the 1 or the device is not currently
- * enabled with the AER support.
+ * -EINVAL if the buf does not contain 1
+ * -EPERM if the OS cannot clear AER error status, i.e., when platform
+ * firmware owns the AER Capability
**/
static ssize_t
lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
@@ -4491,8 +4457,7 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
if (val != 1)
return -EINVAL;
- if (phba->hba_flag & HBA_AER_ENABLED)
- rc = pci_aer_clear_nonfatal_status(phba->pcidev);
+ rc = pci_aer_clear_nonfatal_status(phba->pcidev);
if (rc == 0)
return strlen(buf);
@@ -5893,8 +5858,8 @@ int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
module_param(lpfc_fabric_cgn_frequency, int, 0444);
MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq");
-int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
-module_param(lpfc_acqe_cgn_frequency, int, 0444);
+unsigned char lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
+module_param(lpfc_acqe_cgn_frequency, byte, 0444);
MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq");
int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */
@@ -7277,7 +7242,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
- lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 852b025e2fec..9a322a3a2150 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -134,8 +134,8 @@ lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
if (mlist) {
list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
list) {
- lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
list_del(&mlast->list);
+ lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
kfree(mlast);
}
lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 976fd5ee7f7e..d4e46a08f94d 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -134,7 +134,6 @@ void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb);
-int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
void lpfc_disc_list_loopmap(struct lpfc_vport *);
void lpfc_disc_start(struct lpfc_vport *);
@@ -248,6 +247,7 @@ irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id);
int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
uint32_t len);
@@ -458,6 +458,8 @@ void lpfc_get_cfgparam(struct lpfc_hba *);
void lpfc_get_vport_cfgparam(struct lpfc_vport *);
int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
void lpfc_free_sysfs_attr(struct lpfc_vport *);
+bool lpfc_error_lost_link(struct lpfc_vport *vport, u32 ulp_status,
+ u32 ulp_word4);
extern const struct attribute_group *lpfc_hba_groups[];
extern const struct attribute_group *lpfc_vport_groups[];
extern struct scsi_host_template lpfc_template;
@@ -662,7 +664,7 @@ extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
extern unsigned long lpfc_no_hba_reset[];
-extern int lpfc_acqe_cgn_frequency;
+extern unsigned char lpfc_acqe_cgn_frequency;
extern int lpfc_fabric_cgn_frequency;
extern int lpfc_use_cgn_signal;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index e941a99aa965..f3bdcebe67f5 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -476,8 +476,8 @@ lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
struct lpfc_dmabuf *mlast, *next_mlast;
list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) {
- lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
list_del(&mlast->list);
+ lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
kfree(mlast);
}
lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
@@ -958,7 +958,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
- if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
+ if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0226 NS query failed due to link event: "
"ulp_status x%x ulp_word4 x%x fc_flag x%x "
@@ -1181,7 +1181,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
- if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
+ if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4166 NS query failed due to link event: "
"ulp_status x%x ulp_word4 x%x fc_flag x%x "
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index f5252e45a48a..bdf34af4ef36 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2157,10 +2157,13 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
int i;
+ size_t bsize;
memset(mybuf, 0, sizeof(mybuf));
- if (copy_from_user(mybuf, buf, nbytes))
+ bsize = min(nbytes, (sizeof(mybuf) - 1));
+
+ if (copy_from_user(mybuf, buf, bsize))
return -EFAULT;
pbuf = &mybuf[0];
@@ -2181,7 +2184,7 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
qp->lock_conflict.wq_access = 0;
}
}
- return nbytes;
+ return bsize;
}
#endif
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 35b252f1ef73..a3c8550e9985 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1088,7 +1088,7 @@ stop_rr_fcf_flogi:
}
/* Do not register VFI if the driver aborted FLOGI */
- if (!lpfc_error_lost_link(ulp_status, ulp_word4))
+ if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
@@ -1207,7 +1207,7 @@ flogifail:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
- if (!lpfc_error_lost_link(ulp_status, ulp_word4)) {
+ if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
/* FLOGI failed, so just use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -2087,7 +2087,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ulp_word4);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(ulp_status, ulp_word4))
+ if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
@@ -2208,14 +2208,15 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
* outstanding UNREG_RPI mbox command completes, unless we
* are going offline. This logic does not apply for Fabric DIDs
*/
- if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+ if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) &&
((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
!(vport->fc_flag & FC_OFFLINE_MODE)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4110 Issue PLOGI x%x deferred "
- "on NPort x%x rpi x%x Data: x%px\n",
+ "on NPort x%x rpi x%x flg x%x Data:"
+ " x%px\n",
ndlp->nlp_defer_did, ndlp->nlp_DID,
- ndlp->nlp_rpi, ndlp);
+ ndlp->nlp_rpi, ndlp->nlp_flag, ndlp);
/* We can only defer 1st PLOGI */
if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
@@ -2382,7 +2383,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->fc4_prli_sent);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(ulp_status, ulp_word4))
+ if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
@@ -3037,15 +3038,16 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, ulp_status,
ulp_word4);
- if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
+ if (lpfc_error_lost_link(vport, ulp_status, ulp_word4))
skip_recovery = 1;
- goto out;
- }
}
/* Call state machine. This will unregister the rpi if needed. */
lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
+ if (skip_recovery)
+ goto out;
+
/* The driver sets this flag for an NPIV instance that doesn't want to
* log into the remote port.
*/
@@ -4928,7 +4930,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((cmd == ELS_CMD_FLOGI) &&
(phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
- !lpfc_error_lost_link(ulp_status, ulp_word4)) {
+ !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
/* FLOGI retry policy */
retry = 1;
/* retry FLOGI forever */
@@ -4942,7 +4944,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else if (cmdiocb->retry >= 32)
delay = 1000;
} else if ((cmd == ELS_CMD_FDISC) &&
- !lpfc_error_lost_link(ulp_status, ulp_word4)) {
+ !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
/* retry FDISCs every second up to devloss */
retry = 1;
maxretry = vport->cfg_devloss_tmo;
@@ -5203,14 +5205,9 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
*
* This routine is the completion callback function to the Logout (LOGO)
* Accept (ACC) Response ELS command. This routine is invoked to indicate
- * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
- * release the ndlp if it has the last reference remaining (reference count
- * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp
- * field to NULL to inform the following lpfc_els_free_iocb() routine no
- * ndlp reference count needs to be decremented. Otherwise, the ndlp
- * reference use-count shall be decremented by the lpfc_els_free_iocb()
- * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
- * IOCB data structure.
+ * the completion of the LOGO process. If the node has transitioned to NPR,
+ * this routine unregisters the RPI if it is still registered. The
+ * lpfc_els_free_iocb() is invoked to release the IOCB data structure.
**/
static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
@@ -5251,19 +5248,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
goto out;
- /* NPort Recovery mode or node is just allocated */
- if (!lpfc_nlp_not_used(ndlp)) {
- /* A LOGO is completing and the node is in NPR state.
- * Just unregister the RPI because the node is still
- * required.
- */
+ if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
lpfc_unreg_rpi(vport, ndlp);
- } else {
- /* Indicate the node has already released, should
- * not reference to it from within lpfc_els_free_iocb.
- */
- cmdiocb->ndlp = NULL;
- }
+
}
out:
/*
@@ -5283,9 +5270,8 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* RPI (Remote Port Index) mailbox command to the @phba. It simply releases
* the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
* decrements the ndlp reference count held for this completion callback
- * function. After that, it invokes the lpfc_nlp_not_used() to check
- * whether there is only one reference left on the ndlp. If so, it will
- * perform one more decrement and trigger the release of the ndlp.
+ * function. After that, it invokes the lpfc_drop_node to check
+ * whether it is appropriate to release the node.
**/
void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
@@ -5455,18 +5441,20 @@ out:
* these conditions and release the RPI.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
- (vport && vport->port_type == LPFC_NPIV_PORT) &&
- !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
- ndlp->nlp_flag & NLP_RELEASE_RPI) {
- if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
- ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
- lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
- spin_unlock_irq(&ndlp->lock);
- lpfc_drop_node(vport, ndlp);
+ vport && vport->port_type == LPFC_NPIV_PORT &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
+ if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+ lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
+ }
}
+
+ lpfc_drop_node(vport, ndlp);
}
/* Release the originating I/O reference. */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 66cd0b1dbbd0..67bfdddb897c 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4333,13 +4333,14 @@ out:
/* If the node is not registered with the scsi or nvme
* transport, remove the fabric node. The failed reg_login
- * is terminal.
+ * is terminal and forces the removal of the last node
+ * reference.
*/
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(&ndlp->lock);
- lpfc_nlp_not_used(ndlp);
+ lpfc_nlp_put(ndlp);
}
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
@@ -5755,8 +5756,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
(NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -6704,25 +6705,6 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
}
-/* This routine free's the specified nodelist if it is not in use
- * by any other discovery thread. This routine returns 1 if the
- * ndlp has been freed. A return value of 0 indicates the ndlp is
- * not yet been released.
- */
-int
-lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
-{
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node not used: did:x%x flg:x%x refcnt:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref));
-
- if (kref_read(&ndlp->kref) == 1)
- if (lpfc_nlp_put(ndlp))
- return 1;
- return 0;
-}
-
/**
* lpfc_fcf_inuse - Check if FCF can be unregistered.
* @phba: Pointer to hba context object.
@@ -7269,3 +7251,38 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
lpfc_read_fcf_conn_tbl(phba, rec_ptr);
}
+
+/*
+ * lpfc_error_lost_link - IO failure from link event or FW reset check.
+ *
+ * @vport: Pointer to lpfc_vport data structure.
+ * @ulp_status: IO completion status.
+ * @ulp_word4: Reason code for the ulp_status.
+ *
+ * This function evaluates the ulp_status and ulp_word4 values
+ * for specific error values that indicate an internal link fault
+ * or fw reset event for the completing IO. Callers require this
+ * common data to decide next steps on the IO.
+ *
+ * Return:
+ * false - No link or reset error occurred.
+ * true - A link or reset error occurred.
+ */
+bool
+lpfc_error_lost_link(struct lpfc_vport *vport, u32 ulp_status, u32 ulp_word4)
+{
+ /* Mask off the extra port data to get just the reason code. */
+ u32 rsn_code = IOERR_PARAM_MASK & ulp_word4;
+
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (rsn_code == IOERR_SLI_ABORTED ||
+ rsn_code == IOERR_LINK_DOWN ||
+ rsn_code == IOERR_SLI_DOWN)) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI | LOG_ELS,
+ "0408 Report link error true: <x%x:x%x>\n",
+ ulp_status, ulp_word4);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 5c283936ff08..19b2d2754f32 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -4435,16 +4435,4 @@ lpfc_is_LC_HBA(unsigned short device)
return 0;
}
-/*
- * Determine if failed because of a link event or firmware reset.
- */
-static inline int
-lpfc_error_lost_link(u32 ulp_status, u32 ulp_word4)
-{
- return (ulp_status == IOSTAT_LOCAL_REJECT &&
- (ulp_word4 == IOERR_SLI_ABORTED ||
- ulp_word4 == IOERR_LINK_DOWN ||
- ulp_word4 == IOERR_SLI_DOWN));
-}
-
#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 58fa39c403a0..082f8a109e55 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -536,9 +536,9 @@ struct sli4_wcqe_xri_aborted {
/* completion queue entry structure for rqe completion */
struct lpfc_rcqe {
uint32_t word0;
-#define lpfc_rcqe_bindex_SHIFT 16
-#define lpfc_rcqe_bindex_MASK 0x0000FFF
-#define lpfc_rcqe_bindex_WORD word0
+#define lpfc_rcqe_iv_SHIFT 31
+#define lpfc_rcqe_iv_MASK 0x00000001
+#define lpfc_rcqe_iv_WORD word0
#define lpfc_rcqe_status_SHIFT 8
#define lpfc_rcqe_status_MASK 0x000000FF
#define lpfc_rcqe_status_WORD word0
@@ -546,6 +546,7 @@ struct lpfc_rcqe {
#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
+#define FC_STATUS_RQ_DMA_FAILURE 0x14 /* DMA failure */
uint32_t word1;
#define lpfc_rcqe_fcf_id_v1_SHIFT 0
#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F
@@ -4813,8 +4814,8 @@ struct cmf_sync_wqe {
#define cmf_sync_cqid_WORD word11
uint32_t read_bytes;
uint32_t word13;
-#define cmf_sync_period_SHIFT 16
-#define cmf_sync_period_MASK 0x0000ffff
+#define cmf_sync_period_SHIFT 24
+#define cmf_sync_period_MASK 0x000000ff
#define cmf_sync_period_WORD word13
uint32_t word14;
uint32_t word15;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 61958a24a43d..088bd75fb5d7 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -32,7 +32,6 @@
#include <linux/spinlock.h>
#include <linux/sched/clock.h>
#include <linux/ctype.h>
-#include <linux/aer.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/miscdevice.h>
@@ -1280,7 +1279,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
/*
* lpfc_idle_stat_delay_work - idle_stat tracking
*
- * This routine tracks per-cq idle_stat and determines polling decisions.
+ * This routine tracks per-eq idle_stat and determines polling decisions.
*
* Return codes:
* None
@@ -1291,7 +1290,7 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
struct lpfc_hba *phba = container_of(to_delayed_work(work),
struct lpfc_hba,
idle_stat_delay_work);
- struct lpfc_queue *cq;
+ struct lpfc_queue *eq;
struct lpfc_sli4_hdw_queue *hdwq;
struct lpfc_idle_stat *idle_stat;
u32 i, idle_percent;
@@ -1307,10 +1306,10 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
for_each_present_cpu(i) {
hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
- cq = hdwq->io_cq;
+ eq = hdwq->hba_eq;
- /* Skip if we've already handled this cq's primary CPU */
- if (cq->chann != i)
+ /* Skip if we've already handled this eq's primary CPU */
+ if (eq->chann != i)
continue;
idle_stat = &phba->sli4_hba.idle_stat[i];
@@ -1334,9 +1333,9 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
idle_percent = 100 - idle_percent;
if (idle_percent < 15)
- cq->poll_mode = LPFC_QUEUE_WORK;
+ eq->poll_mode = LPFC_QUEUE_WORK;
else
- cq->poll_mode = LPFC_IRQ_POLL;
+ eq->poll_mode = LPFC_THREADED_IRQ;
idle_stat->prev_idle = wall_idle;
idle_stat->prev_wall = wall;
@@ -2148,7 +2147,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
/* fall through for not able to recover */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3152 Unrecoverable error\n");
- phba->link_state = LPFC_HBA_ERROR;
+ lpfc_sli4_offline_eratt(phba);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -4358,6 +4357,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_io_buf *lpfc_cmd;
int idx, cnt;
+ unsigned long iflags;
qp = phba->sli4_hba.hdwq;
cnt = 0;
@@ -4372,12 +4372,13 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
lpfc_cmd->hdwq_no = idx;
lpfc_cmd->hdwq = qp;
lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
- spin_lock(&qp->io_buf_list_put_lock);
+ spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags);
list_add_tail(&lpfc_cmd->list,
&qp->lpfc_io_buf_list_put);
qp->put_io_bufs++;
qp->total_io_bufs++;
- spin_unlock(&qp->io_buf_list_put_lock);
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
+ iflags);
}
}
return cnt;
@@ -7291,6 +7292,8 @@ lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
/* Find out if the FW has a new set of congestion parameters. */
len = sizeof(struct lpfc_cgn_param);
pdata = kzalloc(len, GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
pdata, len);
@@ -9567,8 +9570,7 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
/* Final checks. The port status should be clean. */
if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
&reg_data.word0) ||
- (bf_get(lpfc_sliport_status_err, &reg_data) &&
- !bf_get(lpfc_sliport_status_rn, &reg_data))) {
+ lpfc_sli4_unrecoverable_port(&reg_data)) {
phba->work_status[0] =
readl(phba->sli4_hba.u.if_type2.
ERR1regaddr);
@@ -12024,7 +12026,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
goto out_iounmap_all;
} else {
error = -ENOMEM;
- goto out_iounmap_all;
+ goto out_iounmap_ctrl;
}
}
@@ -12042,7 +12044,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
dev_err(&pdev->dev,
"ioremap failed for SLI4 HBA dpp registers.\n");
error = -ENOMEM;
- goto out_iounmap_ctrl;
+ goto out_iounmap_all;
}
phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
}
@@ -12067,9 +12069,11 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
return 0;
out_iounmap_all:
- iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+ if (phba->sli4_hba.drbl_regs_memmap_p)
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
out_iounmap_ctrl:
- iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+ if (phba->sli4_hba.ctrl_regs_memmap_p)
+ iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
out_iounmap_conf:
iounmap(phba->sli4_hba.conf_regs_memmap_p);
@@ -12105,6 +12109,7 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
iounmap(phba->sli4_hba.dpp_regs_memmap_p);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
+ break;
default:
dev_printk(KERN_ERR, &phba->pcidev->dev,
"FATAL - unsupported SLI4 interface type - %d\n",
@@ -12563,7 +12568,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
goto found_same;
new_cpu = cpumask_next(
new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
+ if (new_cpu >= nr_cpu_ids)
new_cpu = first_cpu;
}
/* At this point, we leave the CPU as unassigned */
@@ -12577,7 +12582,7 @@ found_same:
* selecting the same IRQ.
*/
start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu == nr_cpumask_bits)
+ if (start_cpu >= nr_cpu_ids)
start_cpu = first_cpu;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12613,7 +12618,7 @@ found_same:
goto found_any;
new_cpu = cpumask_next(
new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
+ if (new_cpu >= nr_cpu_ids)
new_cpu = first_cpu;
}
/* We should never leave an entry unassigned */
@@ -12631,7 +12636,7 @@ found_any:
* selecting the same IRQ.
*/
start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu == nr_cpumask_bits)
+ if (start_cpu >= nr_cpu_ids)
start_cpu = first_cpu;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12704,7 +12709,7 @@ found_any:
goto found_hdwq;
}
new_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
+ if (new_cpu >= nr_cpu_ids)
new_cpu = first_cpu;
}
@@ -12719,7 +12724,7 @@ found_any:
goto found_hdwq;
new_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
+ if (new_cpu >= nr_cpu_ids)
new_cpu = first_cpu;
}
@@ -12730,7 +12735,7 @@ found_any:
found_hdwq:
/* We found an available entry, copy the IRQ info */
start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu == nr_cpumask_bits)
+ if (start_cpu >= nr_cpu_ids)
start_cpu = first_cpu;
cpup->hdwq = new_cpup->hdwq;
logit:
@@ -13114,8 +13119,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
}
eqhdl->irq = rc;
- rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0,
- name, eqhdl);
+ rc = request_threaded_irq(eqhdl->irq,
+ &lpfc_sli4_hba_intr_handler,
+ &lpfc_sli4_hba_intr_handler_th,
+ IRQF_ONESHOT, name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 152245f7cacc..82730a89ecb5 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1893,38 +1893,38 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
pnvme_rport->port_id,
pnvme_fcreq);
- /* If the hba is getting reset, this flag is set. It is
- * cleared when the reset is complete and rings reestablished.
- */
- spin_lock_irqsave(&phba->hbalock, flags);
- /* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_IOQ_FLUSH) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "6139 Driver in reset cleanup - flushing "
- "NVME Req now. hba_flag x%x\n",
- phba->hba_flag);
- return;
- }
-
lpfc_nbuf = freqpriv->nvme_buf;
if (!lpfc_nbuf) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6140 NVME IO req has no matching lpfc nvme "
"io buffer. Skipping abort req.\n");
return;
} else if (!lpfc_nbuf->nvmeCmd) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6141 lpfc NVME IO req has no nvme_fcreq "
"io buffer. Skipping abort req.\n");
return;
}
- nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
/* Guard against IO completion being called at same time */
- spin_lock(&lpfc_nbuf->buf_lock);
+ spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags);
+
+ /* If the hba is getting reset, this flag is set. It is
+ * cleared when the reset is complete and rings reestablished.
+ */
+ spin_lock(&phba->hbalock);
+ /* driver queued commands are in process of being flushed */
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6139 Driver in reset cleanup - flushing "
+ "NVME Req now. hba_flag x%x\n",
+ phba->hba_flag);
+ return;
+ }
+
+ nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
/*
* The lpfc_nbuf and the mapped nvme_fcreq in the driver's
@@ -1971,8 +1971,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
lpfc_nvme_abort_fcreq_cmpl);
- spin_unlock(&lpfc_nbuf->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
/* Make sure HBA is alive */
lpfc_issue_hb_tmo(phba);
@@ -1998,8 +1998,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
out_unlock:
- spin_unlock(&lpfc_nbuf->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
return;
}
@@ -2265,6 +2265,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
}
if (!vport->localport ||
test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
+ phba->link_state == LPFC_HBA_ERROR ||
vport->load_flag & FC_UNLOADING)
return;
@@ -2630,7 +2631,8 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* return values is ignored. The upcall is a courtesy to the
* transport.
*/
- if (vport->load_flag & FC_UNLOADING)
+ if (vport->load_flag & FC_UNLOADING ||
+ unlikely(vport->phba->link_state == LPFC_HBA_ERROR))
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
ret = nvme_fc_unregister_remoteport(remoteport);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index e989f130434e..49aa86c477c6 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4273,7 +4273,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
"x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
cmd->result, *lp, *(lp + 3),
- (u64)scsi_get_lba(cmd),
+ (cmd->device->sector_size) ?
+ (u64)scsi_get_lba(cmd) : 0,
cmd->retries, scsi_get_resid(cmd));
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index c5b69f313af3..22708f66be64 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -34,7 +34,6 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/aer.h>
#include <linux/crash_dump.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -83,7 +82,8 @@ static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
int);
static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
struct lpfc_queue *eq,
- struct lpfc_eqe *eqe);
+ struct lpfc_eqe *eqe,
+ enum lpfc_poll_mode poll_mode);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
@@ -630,7 +630,7 @@ lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
static int
lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
- uint8_t rearm)
+ u8 rearm, enum lpfc_poll_mode poll_mode)
{
struct lpfc_eqe *eqe;
int count = 0, consumed = 0;
@@ -640,7 +640,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
eqe = lpfc_sli4_eq_get(eq);
while (eqe) {
- lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
+ lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode);
__lpfc_sli4_consume_eqe(phba, eq, eqe);
consumed++;
@@ -1932,7 +1932,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
unsigned long iflags;
u32 ret_val;
u32 atot, wtot, max;
- u16 warn_sync_period = 0;
+ u8 warn_sync_period = 0;
/* First address any alarm / warning activity */
atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
@@ -5204,13 +5204,9 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
volatile struct MAILBOX_word0 mb;
struct lpfc_sli *psli;
void __iomem *to_slim;
- uint32_t hba_aer_enabled;
spin_lock_irq(&phba->hbalock);
- /* Take PCIe device Advanced Error Reporting (AER) state */
- hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
-
psli = &phba->sli;
/* Restart HBA */
@@ -5251,10 +5247,6 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
/* Give the INITFF and Post time to settle. */
mdelay(100);
- /* Reset HBA AER if it was enabled, note hba_flag was reset above */
- if (hba_aer_enabled)
- pci_disable_pcie_error_reporting(phba->pcidev);
-
lpfc_hba_down_post(phba);
return 0;
@@ -5273,7 +5265,6 @@ static int
lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
- uint32_t hba_aer_enabled;
int rc;
/* Restart HBA */
@@ -5281,9 +5272,6 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
"0296 Restart HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
- /* Take PCIe device Advanced Error Reporting (AER) state */
- hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
-
rc = lpfc_sli4_brdreset(phba);
if (rc) {
phba->link_state = LPFC_HBA_ERROR;
@@ -5301,10 +5289,6 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
psli->stats_start = ktime_get_seconds();
- /* Reset HBA AER if it was enabled, note hba_flag was reset above */
- if (hba_aer_enabled)
- pci_disable_pcie_error_reporting(phba->pcidev);
-
hba_down_queue:
lpfc_hba_down_post(phba);
lpfc_sli4_queue_destroy(phba);
@@ -5725,25 +5709,6 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
}
phba->fcp_embed_io = 0; /* SLI4 FC support only */
- /* Enable PCIe device Advanced Error Reporting (AER) if configured */
- if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
- rc = pci_enable_pcie_error_reporting(phba->pcidev);
- if (!rc) {
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2709 This device supports "
- "Advanced Error Reporting (AER)\n");
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag |= HBA_AER_ENABLED;
- spin_unlock_irq(&phba->hbalock);
- } else {
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2708 This device does not support "
- "Advanced Error Reporting (AER): %d\n",
- rc);
- phba->cfg_aer_support = 0;
- }
- }
-
if (phba->sli_rev == 3) {
phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
@@ -7993,7 +7958,7 @@ out_rdf:
* lpfc_init_idle_stat_hb - Initialize idle_stat tracking
* @phba: pointer to lpfc hba data structure.
*
- * This routine initializes the per-cq idle_stat to dynamically dictate
+ * This routine initializes the per-eq idle_stat to dynamically dictate
* polling decisions.
*
* Return codes:
@@ -8003,16 +7968,16 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
{
int i;
struct lpfc_sli4_hdw_queue *hdwq;
- struct lpfc_queue *cq;
+ struct lpfc_queue *eq;
struct lpfc_idle_stat *idle_stat;
u64 wall;
for_each_present_cpu(i) {
hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
- cq = hdwq->io_cq;
+ eq = hdwq->hba_eq;
- /* Skip if we've already handled this cq's primary CPU */
- if (cq->chann != i)
+ /* Skip if we've already handled this eq's primary CPU */
+ if (eq->chann != i)
continue;
idle_stat = &phba->sli4_hba.idle_stat[i];
@@ -8021,13 +7986,14 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
idle_stat->prev_wall = wall;
if (phba->nvmet_support ||
- phba->cmf_active_mode != LPFC_CFG_OFF)
- cq->poll_mode = LPFC_QUEUE_WORK;
+ phba->cmf_active_mode != LPFC_CFG_OFF ||
+ phba->intr_type != MSIX)
+ eq->poll_mode = LPFC_QUEUE_WORK;
else
- cq->poll_mode = LPFC_IRQ_POLL;
+ eq->poll_mode = LPFC_THREADED_IRQ;
}
- if (!phba->nvmet_support)
+ if (!phba->nvmet_support && phba->intr_type == MSIX)
schedule_delayed_work(&phba->idle_stat_delay_work,
msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
}
@@ -8080,16 +8046,16 @@ int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
/**
* lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
* @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ *
+ * Called after cancellation of cmf_timer.
**/
void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
{
- spin_lock(&rx_monitor->lock);
kfree(rx_monitor->ring);
rx_monitor->ring = NULL;
rx_monitor->entries = 0;
rx_monitor->head_idx = 0;
rx_monitor->tail_idx = 0;
- spin_unlock(&rx_monitor->lock);
}
/**
@@ -9053,25 +9019,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
mod_timer(&phba->eratt_poll,
jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
- /* Enable PCIe device Advanced Error Reporting (AER) if configured */
- if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
- rc = pci_enable_pcie_error_reporting(phba->pcidev);
- if (!rc) {
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2829 This device supports "
- "Advanced Error Reporting (AER)\n");
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag |= HBA_AER_ENABLED;
- spin_unlock_irq(&phba->hbalock);
- } else {
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2830 This device does not support "
- "Advanced Error Reporting (AER)\n");
- phba->cfg_aer_support = 0;
- }
- rc = 0;
- }
-
/*
* The port is ready, set the host's link state to LINK_DOWN
* in preparation for link interrupts.
@@ -9273,7 +9220,8 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (mbox_pending)
/* process and rearm the EQ */
- lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
+ lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
+ LPFC_QUEUE_WORK);
else
/* Always clear and re-arm the EQ */
sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
@@ -9895,7 +9843,8 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
* port for twice the regular mailbox command timeout value.
*
* 0 - no timeout on waiting for bootstrap mailbox register ready.
- * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
+ * MBXERR_ERROR - wait for bootstrap mailbox register timed out or port
+ * is in an unrecoverable state.
**/
static int
lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
@@ -9903,6 +9852,23 @@ lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint32_t db_ready;
unsigned long timeout;
struct lpfc_register bmbx_reg;
+ struct lpfc_register portstat_reg = {-1};
+
+ /* Sanity check - there is no point to wait if the port is in an
+ * unrecoverable state.
+ */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0) ||
+ lpfc_sli4_unrecoverable_port(&portstat_reg)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3858 Skipping bmbx ready because "
+ "Port Status x%x\n",
+ portstat_reg.word0);
+ return MBXERR_ERROR;
+ }
+ }
timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
* 1000) + jiffies;
@@ -11291,7 +11257,8 @@ inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
* will be handled through a sched from polling timer
* function which is currently triggered every 1msec.
*/
- lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
+ lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM,
+ LPFC_QUEUE_WORK);
}
/**
@@ -14719,6 +14686,38 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
+ case FC_STATUS_RQ_DMA_FAILURE:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2564 RQE DMA Error x%x, x%08x x%08x x%08x "
+ "x%08x\n",
+ status, rcqe->word0, rcqe->word1,
+ rcqe->word2, rcqe->word3);
+
+ /* If IV set, no further recovery */
+ if (bf_get(lpfc_rcqe_iv, rcqe))
+ break;
+
+ /* recycle consumed resource */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
+ dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
+ if (!dma_buf) {
+ hrq->RQ_no_buf_found++;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ break;
+ }
+ hrq->RQ_rcv_buf++;
+ hrq->RQ_buf_posted--;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_in_buf_free(phba, &dma_buf->dbuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2565 Unexpected RQE Status x%x, w0-3 x%08x "
+ "x%08x x%08x x%08x\n",
+ status, rcqe->word0, rcqe->word1,
+ rcqe->word2, rcqe->word3);
+ break;
}
out:
return workposted;
@@ -14840,7 +14839,6 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
* @cq: Pointer to CQ to be processed
* @handler: Routine to process each cqe
* @delay: Pointer to usdelay to set in case of rescheduling of the handler
- * @poll_mode: Polling mode we were called from
*
* This routine processes completion queue entries in a CQ. While a valid
* queue element is found, the handler is called. During processing checks
@@ -14858,8 +14856,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
static bool
__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
- struct lpfc_cqe *), unsigned long *delay,
- enum lpfc_poll_mode poll_mode)
+ struct lpfc_cqe *), unsigned long *delay)
{
struct lpfc_cqe *cqe;
bool workposted = false;
@@ -14900,10 +14897,6 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
arm = false;
}
- /* Note: complete the irq_poll softirq before rearming CQ */
- if (poll_mode == LPFC_IRQ_POLL)
- irq_poll_complete(&cq->iop);
-
/* Track the max number of CQEs processed in 1 EQ */
if (count > cq->CQ_max_cqe)
cq->CQ_max_cqe = count;
@@ -14953,17 +14946,17 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
case LPFC_MCQ:
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_sp_handle_mcqe,
- &delay, LPFC_QUEUE_WORK);
+ &delay);
break;
case LPFC_WCQ:
if (cq->subtype == LPFC_IO)
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_fp_handle_cqe,
- &delay, LPFC_QUEUE_WORK);
+ &delay);
else
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_sp_handle_cqe,
- &delay, LPFC_QUEUE_WORK);
+ &delay);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -15240,6 +15233,38 @@ drop:
hrq->RQ_no_posted_buf++;
/* Post more buffers if possible */
break;
+ case FC_STATUS_RQ_DMA_FAILURE:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2575 RQE DMA Error x%x, x%08x x%08x x%08x "
+ "x%08x\n",
+ status, rcqe->word0, rcqe->word1,
+ rcqe->word2, rcqe->word3);
+
+ /* If IV set, no further recovery */
+ if (bf_get(lpfc_rcqe_iv, rcqe))
+ break;
+
+ /* recycle consumed resource */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
+ dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
+ if (!dma_buf) {
+ hrq->RQ_no_buf_found++;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ break;
+ }
+ hrq->RQ_rcv_buf++;
+ hrq->RQ_buf_posted--;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_rq_buf_free(phba, &dma_buf->hbuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2576 Unexpected RQE Status x%x, w0-3 x%08x "
+ "x%08x x%08x x%08x\n",
+ status, rcqe->word0, rcqe->word1,
+ rcqe->word2, rcqe->word3);
+ break;
}
out:
return workposted;
@@ -15308,45 +15333,64 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
}
/**
- * lpfc_sli4_sched_cq_work - Schedules cq work
- * @phba: Pointer to HBA context object.
- * @cq: Pointer to CQ
- * @cqid: CQ ID
- *
- * This routine checks the poll mode of the CQ corresponding to
- * cq->chann, then either schedules a softirq or queue_work to complete
- * cq work.
+ * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
+ * @cq: Pointer to CQ to be processed
*
- * queue_work path is taken if in NVMET mode, or if poll_mode is in
- * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
+ * This routine calls the cq processing routine with the handler for
+ * fast path CQEs.
*
+ * The CQ routine returns two values: the first is the calling status,
+ * which indicates whether work was queued to the background discovery
+ * thread. If true, the routine should wakeup the discovery thread;
+ * the second is the delay parameter. If non-zero, rather than rearming
+ * the CQ and yet another interrupt, the CQ handler should be queued so
+ * that it is processed in a subsequent polling action. The value of
+ * the delay indicates when to reschedule it.
**/
-static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
- struct lpfc_queue *cq, uint16_t cqid)
+static void
+__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
{
- int ret = 0;
+ struct lpfc_hba *phba = cq->phba;
+ unsigned long delay;
+ bool workposted = false;
+ int ret;
- switch (cq->poll_mode) {
- case LPFC_IRQ_POLL:
- /* CGN mgmt is mutually exclusive from softirq processing */
- if (phba->cmf_active_mode == LPFC_CFG_OFF) {
- irq_poll_sched(&cq->iop);
- break;
- }
- fallthrough;
- case LPFC_QUEUE_WORK:
- default:
+ /* process and rearm the CQ */
+ workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
+ &delay);
+
+ if (delay) {
if (is_kdump_kernel())
- ret = queue_work(phba->wq, &cq->irqwork);
+ ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
+ delay);
else
- ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
+ ret = queue_delayed_work_on(cq->chann, phba->wq,
+ &cq->sched_irqwork, delay);
if (!ret)
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0383 Cannot schedule queue work "
- "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
- cqid, cq->queue_id,
- raw_smp_processor_id());
+ "0367 Cannot schedule queue work "
+ "for cqid=%d on CPU %d\n",
+ cq->queue_id, cq->chann);
}
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_hba_process_cq - fast-path work handler when started by
+ * interrupt
+ * @work: pointer to work element
+ *
+ * translates from the work handler and calls the fast-path handler.
+ **/
+static void
+lpfc_sli4_hba_process_cq(struct work_struct *work)
+{
+ struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
+
+ __lpfc_sli4_hba_process_cq(cq);
}
/**
@@ -15354,6 +15398,7 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
* @phba: Pointer to HBA context object.
* @eq: Pointer to the queue structure.
* @eqe: Pointer to fast-path event queue entry.
+ * @poll_mode: poll_mode to execute processing the cq.
*
* This routine process a event queue entry from the fast-path event queue.
* It will check the MajorCode and MinorCode to determine this is for a
@@ -15364,11 +15409,12 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
**/
static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
- struct lpfc_eqe *eqe)
+ struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode)
{
struct lpfc_queue *cq = NULL;
uint32_t qidx = eq->hdwq;
uint16_t cqid, id;
+ int ret;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -15428,70 +15474,25 @@ work_cq:
else
cq->isr_timestamp = 0;
#endif
- lpfc_sli4_sched_cq_work(phba, cq, cqid);
-}
-/**
- * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
- * @cq: Pointer to CQ to be processed
- * @poll_mode: Enum lpfc_poll_state to determine poll mode
- *
- * This routine calls the cq processing routine with the handler for
- * fast path CQEs.
- *
- * The CQ routine returns two values: the first is the calling status,
- * which indicates whether work was queued to the background discovery
- * thread. If true, the routine should wakeup the discovery thread;
- * the second is the delay parameter. If non-zero, rather than rearming
- * the CQ and yet another interrupt, the CQ handler should be queued so
- * that it is processed in a subsequent polling action. The value of
- * the delay indicates when to reschedule it.
- **/
-static void
-__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
- enum lpfc_poll_mode poll_mode)
-{
- struct lpfc_hba *phba = cq->phba;
- unsigned long delay;
- bool workposted = false;
- int ret = 0;
-
- /* process and rearm the CQ */
- workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
- &delay, poll_mode);
-
- if (delay) {
+ switch (poll_mode) {
+ case LPFC_THREADED_IRQ:
+ __lpfc_sli4_hba_process_cq(cq);
+ break;
+ case LPFC_QUEUE_WORK:
+ default:
if (is_kdump_kernel())
- ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
- delay);
+ ret = queue_work(phba->wq, &cq->irqwork);
else
- ret = queue_delayed_work_on(cq->chann, phba->wq,
- &cq->sched_irqwork, delay);
+ ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
if (!ret)
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0367 Cannot schedule queue work "
- "for cqid=%d on CPU %d\n",
- cq->queue_id, cq->chann);
+ "0383 Cannot schedule queue work "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id,
+ raw_smp_processor_id());
+ break;
}
-
- /* wake up worker thread if there are works to be done */
- if (workposted)
- lpfc_worker_wake_up(phba);
-}
-
-/**
- * lpfc_sli4_hba_process_cq - fast-path work handler when started by
- * interrupt
- * @work: pointer to work element
- *
- * translates from the work handler and calls the fast-path handler.
- **/
-static void
-lpfc_sli4_hba_process_cq(struct work_struct *work)
-{
- struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
-
- __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
}
/**
@@ -15506,7 +15507,7 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
struct lpfc_queue *cq = container_of(to_delayed_work(work),
struct lpfc_queue, sched_irqwork);
- __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
+ __lpfc_sli4_hba_process_cq(cq);
}
/**
@@ -15532,8 +15533,9 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
* and returns for these events. This function is called without any lock
* held. It gets the hbalock to access and update SLI data structures.
*
- * This function returns IRQ_HANDLED when interrupt is handled else it
- * returns IRQ_NONE.
+ * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD
+ * when interrupt is scheduled to be handled from a threaded irq context, or
+ * else returns IRQ_NONE.
**/
irqreturn_t
lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
@@ -15542,8 +15544,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
struct lpfc_hba_eq_hdl *hba_eq_hdl;
struct lpfc_queue *fpeq;
unsigned long iflag;
- int ecount = 0;
int hba_eqidx;
+ int ecount = 0;
struct lpfc_eq_intr_info *eqi;
/* Get the driver's phba structure from the dev_id */
@@ -15572,30 +15574,41 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
- eqi->icnt++;
-
- fpeq->last_cpu = raw_smp_processor_id();
+ switch (fpeq->poll_mode) {
+ case LPFC_THREADED_IRQ:
+ /* CGN mgmt is mutually exclusive from irq processing */
+ if (phba->cmf_active_mode == LPFC_CFG_OFF)
+ return IRQ_WAKE_THREAD;
+ fallthrough;
+ case LPFC_QUEUE_WORK:
+ default:
+ eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
+ eqi->icnt++;
- if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
- fpeq->q_flag & HBA_EQ_DELAY_CHK &&
- phba->cfg_auto_imax &&
- fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
- phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
- lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
+ fpeq->last_cpu = raw_smp_processor_id();
- /* process and rearm the EQ */
- ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
+ if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
+ fpeq->q_flag & HBA_EQ_DELAY_CHK &&
+ phba->cfg_auto_imax &&
+ fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
+ phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
+ lpfc_sli4_mod_hba_eq_delay(phba, fpeq,
+ LPFC_MAX_AUTO_EQ_DELAY);
- if (unlikely(ecount == 0)) {
- fpeq->EQ_no_entry++;
- if (phba->intr_type == MSIX)
- /* MSI-X treated interrupt served as no EQ share INT */
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "0358 MSI-X interrupt with no EQE\n");
- else
- /* Non MSI-X treated on interrupt as EQ share INT */
- return IRQ_NONE;
+ /* process and rearm the EQ */
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
+ LPFC_QUEUE_WORK);
+
+ if (unlikely(ecount == 0)) {
+ fpeq->EQ_no_entry++;
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0358 MSI-X interrupt with no EQE\n");
+ else
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
}
return IRQ_HANDLED;
@@ -16152,13 +16165,69 @@ out:
return status;
}
-static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
+/**
+ * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within
+ * threaded irq context.
+ *
+ * Returns
+ * IRQ_HANDLED - interrupt is handled
+ * IRQ_NONE - otherwise
+ **/
+irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id)
{
- struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
+ struct lpfc_hba *phba;
+ struct lpfc_hba_eq_hdl *hba_eq_hdl;
+ struct lpfc_queue *fpeq;
+ int ecount = 0;
+ int hba_eqidx;
+ struct lpfc_eq_intr_info *eqi;
- __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
+ /* Get the driver's phba structure from the dev_id */
+ hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
+ phba = hba_eq_hdl->phba;
+ hba_eqidx = hba_eq_hdl->idx;
- return 1;
+ if (unlikely(!phba))
+ return IRQ_NONE;
+ if (unlikely(!phba->sli4_hba.hdwq))
+ return IRQ_NONE;
+
+ /* Get to the EQ struct associated with this vector */
+ fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
+ if (unlikely(!fpeq))
+ return IRQ_NONE;
+
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id());
+ eqi->icnt++;
+
+ fpeq->last_cpu = raw_smp_processor_id();
+
+ if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
+ fpeq->q_flag & HBA_EQ_DELAY_CHK &&
+ phba->cfg_auto_imax &&
+ fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
+ phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
+ lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
+
+ /* process and rearm the EQ */
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
+ LPFC_THREADED_IRQ);
+
+ if (unlikely(ecount == 0)) {
+ fpeq->EQ_no_entry++;
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3358 MSI-X interrupt with no EQE\n");
+ else
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
+ return IRQ_HANDLED;
}
/**
@@ -16302,8 +16371,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (cq->queue_id > phba->sli4_hba.cq_max)
phba->sli4_hba.cq_max = cq->queue_id;
-
- irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
out:
mempool_free(mbox, phba->mbox_mem_pool);
return status;
@@ -21899,20 +21966,20 @@ lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
static struct lpfc_io_buf *
lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
{
- struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
struct lpfc_io_buf *lpfc_ncmd_next;
unsigned long iflag;
struct lpfc_epd_pool *epd_pool;
epd_pool = &phba->epd_pool;
- lpfc_ncmd = NULL;
spin_lock_irqsave(&epd_pool->lock, iflag);
if (epd_pool->count > 0) {
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ list_for_each_entry_safe(iter, lpfc_ncmd_next,
&epd_pool->list, list) {
- list_del(&lpfc_ncmd->list);
+ list_del(&iter->list);
epd_pool->count--;
+ lpfc_ncmd = iter;
break;
}
}
@@ -22109,10 +22176,6 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
struct lpfc_dmabuf *pcmd;
u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
- /* sanity check on queue memory */
- if (!datap)
- return -ENODEV;
-
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -22329,10 +22392,10 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
/* Free sgl pool */
list_for_each_entry_safe(list_entry, tmp,
buf_list, list_node) {
+ list_del(&list_entry->list_node);
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
list_entry->dma_sgl,
list_entry->dma_phys_sgl);
- list_del(&list_entry->list_node);
kfree(list_entry);
}
@@ -22479,10 +22542,10 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
list_for_each_entry_safe(list_entry, tmp,
buf_list,
list_node) {
+ list_del(&list_entry->list_node);
dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
list_entry->fcp_cmnd,
list_entry->fcp_cmd_rsp_dma_handle);
- list_del(&list_entry->list_node);
kfree(list_entry);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3b62c4032c31..2541a8fba093 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -140,7 +140,7 @@ struct lpfc_rqb {
enum lpfc_poll_mode {
LPFC_QUEUE_WORK,
- LPFC_IRQ_POLL
+ LPFC_THREADED_IRQ,
};
struct lpfc_idle_stat {
@@ -279,8 +279,6 @@ struct lpfc_queue {
struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */
-#define LPFC_IRQ_POLL_WEIGHT 256
- struct irq_poll iop;
enum lpfc_poll_mode poll_mode;
};
@@ -1180,3 +1178,22 @@ static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
return q->q_pgs[idx / q->entry_cnt_per_pg] +
(q->entry_size * (idx % q->entry_cnt_per_pg));
}
+
+/**
+ * lpfc_sli4_unrecoverable_port - Check ERR and RN bits in portstat_reg
+ * @portstat_reg: portstat_reg pointer containing portstat_reg contents
+ *
+ * Description:
+ * Use only for SLI4 interface type-2 or later. If ERR is set && RN is 0, then
+ * port is deemed unrecoverable.
+ *
+ * Returns:
+ * true - ERR && !RN
+ * false - otherwise
+ */
+static inline bool
+lpfc_sli4_unrecoverable_port(struct lpfc_register *portstat_reg)
+{
+ return bf_get(lpfc_sliport_status_err, portstat_reg) &&
+ !bf_get(lpfc_sliport_status_rn, portstat_reg);
+}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 0238208cdd11..5fda8ac6b883 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.10"
+#define LPFC_DRIVER_VERSION "14.2.0.12"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index f75928f7773e..6a019132109c 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -392,7 +392,7 @@ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
mac53c94_priv(cmd)->this_residual = total;
}
-static struct scsi_host_template mac53c94_template = {
+static const struct scsi_host_template mac53c94_template = {
.proc_name = "53c94",
.name = "53C94",
.queuecommand = mac53c94_queue,
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 6d23ab5aee56..3f0061b00494 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -289,7 +289,7 @@ static struct esp_driver_ops mac_esp_ops = {
static int esp_mac_probe(struct platform_device *dev)
{
- struct scsi_host_template *tpnt = &scsi_esp_template;
+ const struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
struct esp *esp;
int err;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index bf491af9f0d6..e92f1a73cc9b 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1441,6 +1441,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
*/
if (cmdid == CMDID_INT_CMDS) {
scb = &adapter->int_scb;
+ cmd = scb->cmd;
list_del_init(&scb->list);
scb->state = SCB_FREE;
@@ -4100,7 +4101,7 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
return rval;
}
-static struct scsi_host_template megaraid_template = {
+static const struct scsi_host_template megaraid_template = {
.module = THIS_MODULE,
.name = "MegaRAID",
.proc_name = "megaraid_legacy",
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 132de68c14e9..ef2b6380e19a 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -325,7 +325,7 @@ ATTRIBUTE_GROUPS(megaraid_sdev);
/*
* Scsi host template for megaraid unified driver
*/
-static struct scsi_host_template megaraid_template_g = {
+static const struct scsi_host_template megaraid_template_g = {
.module = THIS_MODULE,
.name = "LSI Logic MegaRAID driver",
.proc_name = "megaraid",
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 4919ea54b827..3554f6b07727 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -23,8 +23,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.719.03.00-rc1"
-#define MEGASAS_RELDATE "Sep 29, 2021"
+#define MEGASAS_VERSION "07.725.01.00-rc1"
+#define MEGASAS_RELDATE "Mar 2, 2023"
#define MEGASAS_MSIX_NAME_LEN 32
@@ -1519,6 +1519,8 @@ struct megasas_ctrl_info {
#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
MEGASAS_MAX_DEV_PER_CHANNEL)
+#define MEGASAS_MAX_SUPPORTED_LD_IDS 240
+
#define MEGASAS_MAX_SECTORS (2*1024)
#define MEGASAS_MAX_SECTORS_IEEE (2*128)
#define MEGASAS_DBG_LVL 1
@@ -1720,11 +1722,9 @@ struct megasas_sge_skinny {
} __packed;
union megasas_sgl {
-
- struct megasas_sge32 sge32[1];
- struct megasas_sge64 sge64[1];
- struct megasas_sge_skinny sge_skinny[1];
-
+ DECLARE_FLEX_ARRAY(struct megasas_sge32, sge32);
+ DECLARE_FLEX_ARRAY(struct megasas_sge64, sge64);
+ DECLARE_FLEX_ARRAY(struct megasas_sge_skinny, sge_skinny);
} __attribute__ ((packed));
struct megasas_header {
@@ -1758,7 +1758,8 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:16;
+ u32 reserved:15;
+ u32 support_memdump:1;
u32 support_fw_exposed_dev_list:1;
u32 support_nvme_passthru:1;
u32 support_64bit_mode:1;
@@ -1792,7 +1793,8 @@ typedef union _MFI_CAPABILITIES {
u32 support_64bit_mode:1;
u32 support_nvme_passthru:1;
u32 support_fw_exposed_dev_list:1;
- u32 reserved:16;
+ u32 support_memdump:1;
+ u32 reserved:15;
#endif
} mfi_capabilities;
__le32 reg;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3ceece988338..317c944c68e3 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3298,7 +3298,7 @@ fw_crash_buffer_show(struct device *cdev,
spin_lock_irqsave(&instance->crashdump_lock, flags);
buff_offset = instance->fw_crash_buffer_offset;
- if (!instance->crash_dump_buf &&
+ if (!instance->crash_dump_buf ||
!((instance->fw_crash_state == AVAILABLE) ||
(instance->fw_crash_state == COPYING))) {
dev_err(&instance->pdev->dev,
@@ -3505,7 +3505,7 @@ ATTRIBUTE_GROUPS(megaraid_host);
/*
* Scsi host template for megaraid_sas driver
*/
-static struct scsi_host_template megasas_template = {
+static const struct scsi_host_template megasas_template = {
.module = THIS_MODULE,
.name = "Avago SAS based MegaRAID driver",
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index da1cad1ee123..4463a538102a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -358,7 +358,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
ld = MR_TargetIdToLdGet(i, drv_map);
/* For non existing VDs, iterate to next VD*/
- if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+ if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS)
continue;
raid = MR_LdRaidGet(ld, drv_map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 6597e118c805..8a83f3fc2b86 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1201,6 +1201,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_nvme_passthru = 1;
drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1;
+ if (reset_devices)
+ drv_ops->mfi_capabilities.support_memdump = 1;
+
if (instance->consistent_mask_64bit)
drv_ops->mfi_capabilities.support_64bit_mode = 1;
@@ -4768,7 +4771,7 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
devhandle = megasas_get_tm_devhandle(scmd->device);
if (devhandle == (u16)ULONG_MAX) {
- ret = SUCCESS;
+ ret = FAILED;
sdev_printk(KERN_INFO, scmd->device,
"task abort issued for invalid devhandle\n");
mutex_unlock(&instance->reset_mutex);
@@ -4838,7 +4841,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
devhandle = megasas_get_tm_devhandle(scmd->device);
if (devhandle == (u16)ULONG_MAX) {
- ret = SUCCESS;
+ ret = FAILED;
sdev_printk(KERN_INFO, scmd->device,
"target reset issued for invalid devhandle\n");
mutex_unlock(&instance->reset_mutex);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 84b541a57b7b..e276583c590c 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1830,7 +1830,7 @@ static int mesh_shutdown(struct macio_dev *mdev)
return 0;
}
-static struct scsi_host_template mesh_template = {
+static const struct scsi_host_template mesh_template = {
.proc_name = "mesh",
.name = "MESH",
.queuecommand = mesh_queue,
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 0a2af48915a5..2fc196499c89 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2017-2022 Broadcom Inc. All rights reserved.
+ * Copyright 2017-2023 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_CNFG_H
#define MPI30_CNFG_H 1
@@ -63,8 +63,9 @@
#define MPI3_PCIE_LINK_PGAD_LINKNUM_MASK (0x000000ff)
#define MPI3_SECURITY_PGAD_FORM_MASK (0xf0000000)
#define MPI3_SECURITY_PGAD_FORM_GET_NEXT_SLOT (0x00000000)
-#define MPI3_SECURITY_PGAD_FORM_SOT_NUM (0x10000000)
+#define MPI3_SECURITY_PGAD_FORM_SLOT_NUM (0x10000000)
#define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00)
+#define MPI3_SECURITY_PGAD_SLOT_GROUP_SHIFT (8)
#define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff)
struct mpi3_config_request {
__le16 host_tag;
@@ -135,7 +136,6 @@ struct mpi3_config_page_header {
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_ACTIVE (0x00000000)
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_PARTIAL (0x08000000)
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_SLUMBER (0x10000000)
-#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_SHIFT (0)
#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_MASK (0x04000000)
#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_SHIFT (26)
#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_MASK (0x02000000)
@@ -201,6 +201,11 @@ struct mpi3_config_page_header {
#define MPI3_TEMP_SENSOR_LOCATION_DRAM (0x3)
#define MPI3_MFGPAGE_VENDORID_BROADCOM (0x1000)
#define MPI3_MFGPAGE_DEVID_SAS4116 (0x00a5)
+#define MPI3_MFGPAGE_DEVID_SAS5116_MPI (0x00b3)
+#define MPI3_MFGPAGE_DEVID_SAS5116_NVME (0x00b4)
+#define MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT (0x00b5)
+#define MPI3_MFGPAGE_DEVID_SAS5116_NVME_MGMT (0x00b6)
+#define MPI3_MFGPAGE_DEVID_SAS5116_PCIE_SWITCH (0x00b8)
struct mpi3_man_page0 {
struct mpi3_config_page_header header;
u8 chip_revision[8];
@@ -466,7 +471,7 @@ struct mpi3_man_page9 {
#define MPI3_MAN9_PAGEVERSION (0x00)
struct mpi3_man10_istwi_ctrlr_entry {
- __le16 slave_address;
+ __le16 target_address;
__le16 flags;
u8 scl_low_override;
u8 scl_high_override;
@@ -476,8 +481,8 @@ struct mpi3_man10_istwi_ctrlr_entry {
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_MASK (0x000c)
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_100K (0x0000)
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_400K (0x0004)
-#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_SLAVE_ENABLED (0x0002)
-#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_MASTER_ENABLED (0x0001)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_TARGET_ENABLED (0x0002)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_INITIATOR_ENABLED (0x0001)
#ifndef MPI3_MAN10_ISTWI_CTRLR_MAX
#define MPI3_MAN10_ISTWI_CTRLR_MAX (1)
#endif
@@ -1160,7 +1165,7 @@ struct mpi3_io_unit_page12 {
struct mpi3_iounit13_allowed_function {
__le16 sub_function;
u8 function_code;
- u8 fuction_flags;
+ u8 function_flags;
};
#define MPI3_IOUNIT13_FUNCTION_FLAGS_ADMIN_BLOCKED (0x04)
#define MPI3_IOUNIT13_FUNCTION_FLAGS_OOB_BLOCKED (0x02)
@@ -1176,6 +1181,48 @@ struct mpi3_io_unit_page13 {
#define MPI3_IOUNIT13_PAGEVERSION (0x00)
#define MPI3_IOUNIT13_FLAGS_ADMIN_BLOCKED (0x0002)
#define MPI3_IOUNIT13_FLAGS_OOB_BLOCKED (0x0001)
+#ifndef MPI3_IOUNIT14_MD_MAX
+#define MPI3_IOUNIT14_MD_MAX (1)
+#endif
+struct mpi3_iounit14_pagemetadata {
+ u8 page_type;
+ u8 page_number;
+ u8 reserved02;
+ u8 page_flags;
+};
+#define MPI3_IOUNIT14_PAGEMETADATA_PAGEFLAGS_OOBWRITE_ALLOWED (0x02)
+#define MPI3_IOUNIT14_PAGEMETADATA_PAGEFLAGS_HOSTWRITE_ALLOWED (0x01)
+struct mpi3_io_unit_page14 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 reserved09[3];
+ u8 num_pages;
+ u8 reserved0d[3];
+ struct mpi3_iounit14_pagemetadata page_metadata[MPI3_IOUNIT14_MD_MAX];
+};
+#define MPI3_IOUNIT14_PAGEVERSION (0x00)
+#define MPI3_IOUNIT14_FLAGS_READONLY (0x01)
+#ifndef MPI3_IOUNIT15_PBD_MAX
+#define MPI3_IOUNIT15_PBD_MAX (1)
+#endif
+struct mpi3_io_unit_page15 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ u8 power_budgeting_capability;
+ u8 reserved11[3];
+ u8 num_power_budget_data;
+ u8 reserved15[3];
+ __le32 power_budget_data[MPI3_IOUNIT15_PBD_MAX];
+};
+#define MPI3_IOUNIT15_PAGEVERSION (0x00)
+#define MPI3_IOUNIT15_FLAGS_EPRINIT_INITREQUIRED (0x04)
+#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_MASK (0x03)
+#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_NOT_SUPPORTED (0x00)
+#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITHOUT_POWER_BRAKE_GPIO (0x01)
+#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITH_POWER_BRAKE_GPIO (0x02)
+#define MPI3_IOUNIT15_NUMPOWERBUDGETDATA_POWER_BUDGETING_DISABLED (0x00)
struct mpi3_ioc_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
@@ -1273,6 +1320,7 @@ struct mpi3_driver_page0 {
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_INTERNAL_DEVS (0x00000002)
struct mpi3_driver_page1 {
struct mpi3_config_page_header header;
__le32 flags;
@@ -1340,7 +1388,7 @@ union mpi3_driver2_trigger_element {
#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE (0x01)
struct mpi3_driver_page2 {
struct mpi3_config_page_header header;
- __le64 master_trigger;
+ __le64 global_trigger;
__le32 reserved10[3];
u8 num_triggers;
u8 reserved1d[3];
@@ -1348,11 +1396,13 @@ struct mpi3_driver_page2 {
};
#define MPI3_DRIVER2_PAGEVERSION (0x00)
-#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_TRACE_RELEASE (0x8000000000000000ULL)
-#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_FW_RELEASE (0x4000000000000000ULL)
-#define MPI3_DRIVER2_MASTERTRIGGER_SNAPDUMP (0x2000000000000000ULL)
-#define MPI3_DRIVER2_MASTERTRIGGER_DEVICE_REMOVAL_ENABLED (0x0000000000000004ULL)
-#define MPI3_DRIVER2_MASTERTRIGGER_TASK_MANAGEMENT_ENABLED (0x0000000000000002ULL)
+#define MPI3_DRIVER2_GLOBALTRIGGER_DIAG_TRACE_RELEASE (0x8000000000000000ULL)
+#define MPI3_DRIVER2_GLOBALTRIGGER_DIAG_FW_RELEASE (0x4000000000000000ULL)
+#define MPI3_DRIVER2_GLOBALTRIGGER_SNAPDUMP_ENABLED (0x2000000000000000ULL)
+#define MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED (0x1000000000000000ULL)
+#define MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED (0x0800000000000000ULL)
+#define MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED (0x0000000000000004ULL)
+#define MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED (0x0000000000000002ULL)
struct mpi3_driver_page10 {
struct mpi3_config_page_header header;
__le16 flags;
@@ -1395,6 +1445,12 @@ union mpi3_security_nonce {
u8 byte[64];
};
+union mpi3_security_root_digest {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
union mpi3_security0_cert_chain {
__le32 dword[1024];
__le16 word[2048];
@@ -1467,6 +1523,32 @@ struct mpi3_security_page1 {
};
#define MPI3_SECURITY1_PAGEVERSION (0x00)
+#ifndef MPI3_SECURITY2_TRUSTED_ROOT_MAX
+#define MPI3_SECURITY2_TRUSTED_ROOT_MAX 1
+#endif
+struct mpi3_security2_trusted_root {
+ u8 level;
+ u8 hash_algorithm;
+ __le16 trusted_root_flags;
+ __le32 reserved04[3];
+ union mpi3_security_root_digest root_digest;
+};
+#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_HASHALGOSOURCE_MASK (0x0006)
+#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_HASHALGOSOURCE_SHIFT (1)
+#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_HASHALGOSOURCE_HA_FIELD (0x0000)
+#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_HASHALGOSOURCE_AKI (0x0002)
+#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_USERPROVISIONED_YES (0x0001)
+struct mpi3_security_page2 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08[2];
+ union mpi3_security_mac mac;
+ union mpi3_security_nonce nonce;
+ __le32 reserved90[3];
+ u8 num_roots;
+ u8 reserved9d[3];
+ struct mpi3_security2_trusted_root trusted_root[MPI3_SECURITY2_TRUSTED_ROOT_MAX];
+};
+#define MPI3_SECURITY2_PAGEVERSION (0x00)
struct mpi3_sas_io_unit0_phy_data {
u8 io_unit_port;
u8 port_flags;
@@ -2351,6 +2433,10 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_NVME_MAX (0x5f)
#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x80)
#define MPI3_DEVICE0_ASTATUS_VD_MAX (0x8f)
+#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK (0xe000)
+#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT (0x0000)
+#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB (0x2000)
+#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB (0x4000)
#define MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE (0x0080)
#define MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED (0x0010)
#define MPI3_DEVICE0_FLAGS_HIDDEN (0x0008)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index 64c58815988a..47035b811902 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2018-2022 Broadcom Inc. All rights reserved.
+ * Copyright 2018-2023 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_IMAGE_H
#define MPI30_IMAGE_H 1
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
index 3c03610ecfa6..af86d12c8e49 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_init.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ * Copyright 2016-2023 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_INIT_H
#define MPI30_INIT_H 1
@@ -56,6 +56,7 @@ struct mpi3_scsi_io_request {
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_MASK (0x000000f0)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING (0x00000010)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE (0x00000020)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_PROD_SPECIFIC (0x00000080)
#define MPI3_SCSIIO_METASGL_INDEX (3)
struct mpi3_scsi_io_reply {
@@ -114,4 +115,24 @@ struct mpi3_scsi_io_reply {
#define MPI3_SCSI_RSP_ARI0_MASK (0xff000000)
#define MPI3_SCSI_RSP_ARI0_SHIFT (24)
#define MPI3_SCSI_TASKTAG_UNKNOWN (0xffff)
+#define MPI3_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x08)
+#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK_SET (0x02)
+#define MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_ACA (0x08)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK_SET (0x09)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_ASYNC_EVENT (0x0a)
+#define MPI3_SCSITASKMGMT_TASKTYPE_I_T_NEXUS_RESET (0x0b)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE (0x00)
+#define MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME (0x02)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED (0x04)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED (0x05)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED (0x08)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN (0x09)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG (0x0a)
+#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED (0x81)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index 1c6c6730df5c..f5e9c2309ce6 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ * Copyright 2016-2023 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_IOC_H
#define MPI30_IOC_H 1
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
index b7a5df01120d..7c15e5851ce4 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ * Copyright 2016-2023 Broadcom Inc. All rights reserved.
*
*/
#ifndef MPI30_PCI_H
#define MPI30_PCI_H 1
-
+#ifndef MPI3_NVME_ENCAP_CMD_MAX
+#define MPI3_NVME_ENCAP_CMD_MAX (1)
+#endif
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
index e587f77ccd68..4a93c67d335f 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ * Copyright 2016-2023 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_SAS_H
#define MPI30_SAS_H 1
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index 9b76b9632751..441cfc2c7f09 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ * Copyright 2016-2023 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_TRANSPORT_H
#define MPI30_TRANSPORT_H 1
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (26)
+#define MPI3_VERSION_UNIT (27)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 23de2603e71f..dfe6b87fe288 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -2,7 +2,7 @@
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2022 Broadcom Inc.
+ * Copyright (C) 2017-2023 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
@@ -55,8 +55,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.2.0.3.0"
-#define MPI3MR_DRIVER_RELDATE "08-September-2022"
+#define MPI3MR_DRIVER_VERSION "8.4.1.0.0"
+#define MPI3MR_DRIVER_RELDATE "16-March-2023"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -126,6 +126,7 @@ extern atomic64_t event_counter;
#define MPI3MR_RAID_ERRREC_RESET_TIMEOUT 180
#define MPI3MR_PREPARE_FOR_RESET_TIMEOUT 180
#define MPI3MR_RESET_ACK_TIMEOUT 30
+#define MPI3MR_MUR_TIMEOUT 120
#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
@@ -652,7 +653,11 @@ union _form_spec_inf {
struct tgt_dev_vd vd_inf;
};
-
+enum mpi3mr_dev_state {
+ MPI3MR_DEV_CREATED = 1,
+ MPI3MR_DEV_REMOVE_HS_STARTED = 2,
+ MPI3MR_DEV_DELETED = 3,
+};
/**
* struct mpi3mr_tgt_dev - target device data structure
@@ -676,6 +681,7 @@ union _form_spec_inf {
* @enclosure_logical_id: Enclosure logical identifier
* @dev_spec: Device type specific information
* @ref_count: Reference count
+ * @state: device state
*/
struct mpi3mr_tgt_dev {
struct list_head list;
@@ -697,6 +703,7 @@ struct mpi3mr_tgt_dev {
u64 enclosure_logical_id;
union _form_spec_inf dev_spec;
struct kref ref_count;
+ enum mpi3mr_dev_state state;
};
/**
@@ -902,6 +909,7 @@ struct scmd_priv {
* @admin_reply_ephase:Admin reply queue expected phase
* @admin_reply_base: Admin reply queue base virtual address
* @admin_reply_dma: Admin reply queue base dma address
+ * @admin_reply_q_in_use: Queue is handled by poll/ISR
* @ready_timeout: Controller ready timeout
* @intr_info: Interrupt cookie pointer
* @intr_info_count: Number of interrupt cookies
@@ -1055,6 +1063,7 @@ struct mpi3mr_ioc {
u8 admin_reply_ephase;
void *admin_reply_base;
dma_addr_t admin_reply_dma;
+ atomic_t admin_reply_q_in_use;
u32 ready_timeout;
@@ -1390,4 +1399,7 @@ void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc);
void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
+int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc);
+void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander);
#endif /*MPI3MR_H_INCLUDED*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index bff637702397..08645a99ad6b 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -2,7 +2,7 @@
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2022 Broadcom Inc.
+ * Copyright (C) 2017-2023 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
@@ -886,7 +886,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
* each time through the loop.
*/
*prp_entry = cpu_to_le64(dma_addr);
- if (*prp1_entry & sgemod_mask) {
+ if (*prp_entry & sgemod_mask) {
dprint_bsg_err(mrioc,
"%s: PRP address collides with SGE modifier\n",
__func__);
@@ -895,7 +895,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
*prp_entry &= ~sgemod_mask;
*prp_entry |= sgemod_val;
prp_entry++;
- prp_entry_dma++;
+ prp_entry_dma += prp_size;
}
/*
@@ -922,6 +922,7 @@ err_out:
/**
* mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler
* @job: BSG job reference
+ * @reply_payload_rcv_len: length of payload recvd
*
* This function is the top level handler for MPI Pass through
* command, this does basic validation of the input data buffers,
@@ -1471,6 +1472,7 @@ static int mpi3mr_bsg_request(struct bsg_job *job)
/**
* mpi3mr_bsg_exit - de-registration from bsg layer
+ * @mrioc: Adapter instance reference
*
* This will be called during driver unload and all
* bsg resources allocated during load will be freed.
@@ -1505,6 +1507,7 @@ static void mpi3mr_bsg_node_release(struct device *dev)
/**
* mpi3mr_bsg_init - registration with bsg layer
+ * @mrioc: Adapter instance reference
*
* This will be called during driver load and it will
* register driver with bsg layer
diff --git a/drivers/scsi/mpi3mr/mpi3mr_debug.h b/drivers/scsi/mpi3mr/mpi3mr_debug.h
index ee6edd8322e6..e94f7520d153 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_debug.h
+++ b/drivers/scsi/mpi3mr/mpi3mr_debug.h
@@ -2,7 +2,7 @@
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2022 Broadcom Inc.
+ * Copyright (C) 2017-2023 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 758f7ca9e0ee..075fa67e95ee 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -2,7 +2,7 @@
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2022 Broadcom Inc.
+ * Copyright (C) 2017-2023 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
@@ -415,7 +415,7 @@ out:
le64_to_cpu(scsi_reply->sense_data_buffer_address));
}
-static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
{
u32 exp_phase = mrioc->admin_reply_ephase;
u32 admin_reply_ci = mrioc->admin_reply_ci;
@@ -423,12 +423,17 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
u64 reply_dma = 0;
struct mpi3_default_reply_descriptor *reply_desc;
+ if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
+ return 0;
+
reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
admin_reply_ci;
if ((le16_to_cpu(reply_desc->reply_flags) &
- MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
+ atomic_dec(&mrioc->admin_reply_q_in_use);
return 0;
+ }
do {
if (mrioc->unrecoverable)
@@ -454,6 +459,7 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
mrioc->admin_reply_ci = admin_reply_ci;
mrioc->admin_reply_ephase = exp_phase;
+ atomic_dec(&mrioc->admin_reply_q_in_use);
return num_admin_replies;
}
@@ -1092,7 +1098,7 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
- timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
+ timeout = MPI3MR_MUR_TIMEOUT * 10;
do {
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
@@ -1192,7 +1198,7 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
*/
static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
{
- u32 ioc_config, ioc_status, timeout;
+ u32 ioc_config, ioc_status, timeout, host_diagnostic;
int retval = 0;
enum mpi3mr_iocstate ioc_state;
u64 base_info;
@@ -1246,6 +1252,23 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
retval, mpi3mr_iocstate_name(ioc_state));
}
if (ioc_state != MRIOC_STATE_RESET) {
+ if (ioc_state == MRIOC_STATE_FAULT) {
+ timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+ mpi3mr_print_fault_info(mrioc);
+ do {
+ host_diagnostic =
+ readl(&mrioc->sysif_regs->host_diagnostic);
+ if (!(host_diagnostic &
+ MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "controller is not present at the bringup\n");
+ goto out_device_not_present;
+ }
+ msleep(100);
+ } while (--timeout);
+ }
mpi3mr_print_fault_info(mrioc);
ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
retval = mpi3mr_issue_reset(mrioc,
@@ -2503,7 +2526,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
mrioc->unrecoverable = 1;
goto schedule_work;
case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
- return;
+ goto schedule_work;
case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
break;
@@ -2597,14 +2620,13 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
mrioc->num_admin_req = mrioc->admin_req_q_sz /
MPI3MR_ADMIN_REQ_FRAME_SZ;
mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
- mrioc->admin_req_base = NULL;
mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
MPI3MR_ADMIN_REPLY_FRAME_SZ;
mrioc->admin_reply_ci = 0;
mrioc->admin_reply_ephase = 1;
- mrioc->admin_reply_base = NULL;
+ atomic_set(&mrioc->admin_reply_q_in_use, 0);
if (!mrioc->admin_req_base) {
mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
@@ -3813,27 +3835,34 @@ retry_init:
mpi3mr_print_ioc_info(mrioc);
- dprint_init(mrioc, "allocating config page buffers\n");
- mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
- MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
- if (!mrioc->cfg_page)
- goto out_failed_noretry;
-
- mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+ if (!mrioc->cfg_page) {
+ dprint_init(mrioc, "allocating config page buffers\n");
+ mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+ mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL);
+ if (!mrioc->cfg_page) {
+ retval = -1;
+ goto out_failed_noretry;
+ }
+ }
- retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
- if (retval) {
- ioc_err(mrioc,
- "%s :Failed to allocated reply sense buffers %d\n",
- __func__, retval);
- goto out_failed_noretry;
+ if (!mrioc->init_cmds.reply) {
+ retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc,
+ "%s :Failed to allocated reply sense buffers %d\n",
+ __func__, retval);
+ goto out_failed_noretry;
+ }
}
- retval = mpi3mr_alloc_chain_bufs(mrioc);
- if (retval) {
- ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
- retval);
- goto out_failed_noretry;
+ if (!mrioc->chain_sgl_list) {
+ retval = mpi3mr_alloc_chain_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
}
retval = mpi3mr_issue_iocinit(mrioc);
@@ -3879,8 +3908,10 @@ retry_init:
dprint_init(mrioc, "allocating memory for throttle groups\n");
sz = sizeof(struct mpi3mr_throttle_group_info);
mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
- if (!mrioc->throttle_groups)
+ if (!mrioc->throttle_groups) {
+ retval = -1;
goto out_failed_noretry;
+ }
}
retval = mpi3mr_enable_events(mrioc);
@@ -3900,6 +3931,7 @@ out_failed:
mpi3mr_memset_buffers(mrioc);
goto retry_init;
}
+ retval = -1;
out_failed_noretry:
ioc_err(mrioc, "controller initialization failed\n");
mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
@@ -4012,6 +4044,7 @@ retry_init:
ioc_err(mrioc,
"cannot create minimum number of operational queues expected:%d created:%d\n",
mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
+ retval = -1;
goto out_failed_noretry;
}
@@ -4078,6 +4111,7 @@ out_failed:
mpi3mr_memset_buffers(mrioc);
goto retry_init;
}
+ retval = -1;
out_failed_noretry:
ioc_err(mrioc, "controller %s is failed\n",
(is_resume)?"resume":"re-initialization");
@@ -4155,6 +4189,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
if (mrioc->admin_reply_base)
memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
+ atomic_set(&mrioc->admin_reply_q_in_use, 0);
if (mrioc->init_cmds.reply) {
memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
@@ -4350,13 +4385,20 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
mrioc->admin_req_base, mrioc->admin_req_dma);
mrioc->admin_req_base = NULL;
}
-
+ if (mrioc->cfg_page) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz,
+ mrioc->cfg_page, mrioc->cfg_page_dma);
+ mrioc->cfg_page = NULL;
+ }
if (mrioc->pel_seqnum_virt) {
dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
mrioc->pel_seqnum_virt = NULL;
}
+ kfree(mrioc->throttle_groups);
+ mrioc->throttle_groups = NULL;
+
kfree(mrioc->logdata_buf);
mrioc->logdata_buf = NULL;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 6eaeba41072c..d627355303d7 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -2,7 +2,7 @@
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2022 Broadcom Inc.
+ * Copyright (C) 2017-2023 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
@@ -652,6 +652,7 @@ static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
mpi3mr_tgtdev_get(tgtdev);
INIT_LIST_HEAD(&tgtdev->list);
list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
+ tgtdev->state = MPI3MR_DEV_CREATED;
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
}
@@ -659,20 +660,25 @@ static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
* mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
* @mrioc: Adapter instance reference
* @tgtdev: Target device
+ * @must_delete: Must delete the target device from the list irrespective
+ * of the device state.
*
* Remove the target device from the target device list
*
* Return: Nothing.
*/
static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
- struct mpi3mr_tgt_dev *tgtdev)
+ struct mpi3mr_tgt_dev *tgtdev, bool must_delete)
{
unsigned long flags;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- if (!list_empty(&tgtdev->list)) {
- list_del_init(&tgtdev->list);
- mpi3mr_tgtdev_put(tgtdev);
+ if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) {
+ if (!list_empty(&tgtdev->list)) {
+ list_del_init(&tgtdev->list);
+ tgtdev->state = MPI3MR_DEV_DELETED;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
}
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
}
@@ -1036,7 +1042,7 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
tgtdev->perst_id);
if (tgtdev->host_exposed)
mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
- mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
mpi3mr_tgtdev_put(tgtdev);
}
}
@@ -1281,12 +1287,12 @@ static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
if (!tgtdev->host_exposed)
mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
}
- if (tgtdev->starget && tgtdev->starget->hostdata) {
- if (delete)
- mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
- }
+
+ if (delete)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+
if (cleanup) {
- mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
mpi3mr_tgtdev_put(tgtdev);
}
@@ -1604,7 +1610,7 @@ static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
if (tgtdev->host_exposed)
mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
- mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
mpi3mr_tgtdev_put(tgtdev);
break;
case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
@@ -1762,7 +1768,7 @@ static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
if (tgtdev->host_exposed)
mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
- mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
mpi3mr_tgtdev_put(tgtdev);
break;
default:
@@ -2016,12 +2022,18 @@ static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
int retval = 0;
struct mpi3mr_tgt_dev *tgtdev = NULL;
u16 perst_id = 0;
+ unsigned long flags;
perst_id = le16_to_cpu(dev_pg0->persistent_id);
if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID)
return retval;
- tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
+ if (tgtdev)
+ tgtdev->state = MPI3MR_DEV_CREATED;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
if (tgtdev) {
mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
mpi3mr_tgtdev_put(tgtdev);
@@ -2219,6 +2231,14 @@ static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
u8 retrycount = 5;
struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE))
+ tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
if (drv_cmd)
goto issue_cmd;
@@ -3311,19 +3331,19 @@ static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
{
u8 retry_count = 5;
int cmd_idx = -1;
+ unsigned long flags;
+ spin_lock_irqsave(&mrioc->chain_buf_lock, flags);
do {
- spin_lock(&mrioc->chain_buf_lock);
cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
mrioc->chain_buf_count);
if (cmd_idx < mrioc->chain_buf_count) {
set_bit(cmd_idx, mrioc->chain_bitmap);
- spin_unlock(&mrioc->chain_buf_lock);
break;
}
- spin_unlock(&mrioc->chain_buf_lock);
cmd_idx = -1;
} while (retry_count--);
+ spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags);
return cmd_idx;
}
@@ -3720,6 +3740,7 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
mpi3mr_poll_pend_io_completions(mrioc);
mpi3mr_ioc_enable_intr(mrioc);
mpi3mr_poll_pend_io_completions(mrioc);
+ mpi3mr_process_admin_reply_q(mrioc);
}
switch (tm_type) {
case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
@@ -3995,10 +4016,14 @@ static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
stgt_priv_data = sdev_priv_data->tgt_priv_data;
dev_handle = stgt_priv_data->dev_handle;
if (stgt_priv_data->dev_removed) {
+ struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd);
sdev_printk(KERN_INFO, scmd->device,
"%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
mrioc->name, dev_handle);
- retval = FAILED;
+ if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID)
+ retval = SUCCESS;
+ else
+ retval = FAILED;
goto out;
}
sdev_printk(KERN_INFO, scmd->device,
@@ -4063,10 +4088,14 @@ static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
stgt_priv_data = sdev_priv_data->tgt_priv_data;
dev_handle = stgt_priv_data->dev_handle;
if (stgt_priv_data->dev_removed) {
+ struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd);
sdev_printk(KERN_INFO, scmd->device,
"%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
mrioc->name, dev_handle);
- retval = FAILED;
+ if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID)
+ retval = SUCCESS;
+ else
+ retval = FAILED;
goto out;
}
sdev_printk(KERN_INFO, scmd->device,
@@ -4624,13 +4653,24 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
goto out;
}
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+
+ /* Avoid error handling escalation when device is removed or blocked */
+
+ if (scmd->device->host->shost_state == SHOST_RECOVERY &&
+ scmd->cmnd[0] == TEST_UNIT_READY &&
+ (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) {
+ scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
+ scsi_done(scmd);
+ goto out;
+ }
+
if (mrioc->reset_in_progress) {
retval = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
- stgt_priv_data = sdev_priv_data->tgt_priv_data;
-
if (atomic_read(&stgt_priv_data->block_io)) {
if (mrioc->stop_drv_processing) {
scmd->result = DID_NO_CONNECT << 16;
@@ -4641,7 +4681,6 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
goto out;
}
- dev_handle = stgt_priv_data->dev_handle;
if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
scsi_done(scmd);
@@ -4757,7 +4796,7 @@ out:
return retval;
}
-static struct scsi_host_template mpi3mr_driver_template = {
+static const struct scsi_host_template mpi3mr_driver_template = {
.module = THIS_MODULE,
.name = "MPI3 Storage Controller",
.proc_name = MPI3MR_DRIVER_NAME,
@@ -5077,6 +5116,8 @@ static void mpi3mr_remove(struct pci_dev *pdev)
struct workqueue_struct *wq;
unsigned long flags;
struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+ struct mpi3mr_hba_port *port, *hba_port_next;
+ struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
if (!shost)
return;
@@ -5108,7 +5149,7 @@ static void mpi3mr_remove(struct pci_dev *pdev)
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
list) {
mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
- mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
mpi3mr_tgtdev_put(tgtdev);
}
mpi3mr_stop_watchdog(mrioc);
@@ -5116,6 +5157,28 @@ static void mpi3mr_remove(struct pci_dev *pdev)
mpi3mr_free_mem(mrioc);
mpi3mr_cleanup_resources(mrioc);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
+ &mrioc->sas_expander_list, list) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ }
+ list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
+ ioc_info(mrioc,
+ "removing hba_port entry: %p port: %d from hba_port list\n",
+ port, port->port_id);
+ list_del(&port->list);
+ kfree(port);
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->sas_hba.num_phys) {
+ kfree(mrioc->sas_hba.phy);
+ mrioc->sas_hba.phy = NULL;
+ mrioc->sas_hba.num_phys = 0;
+ }
+
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
spin_unlock(&mrioc_list_lock);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 3b61815979da..82b55e955730 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -2,16 +2,13 @@
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2022 Broadcom Inc.
+ * Copyright (C) 2017-2023 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
#include "mpi3mr.h"
-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
- struct mpi3mr_sas_node *sas_expander);
-
/**
* mpi3mr_post_transport_req - Issue transport requests and wait
* @mrioc: Adapter instance reference
@@ -1552,7 +1549,8 @@ static void mpi3mr_sas_port_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
list_for_each_entry_safe(mr_sas_phy, next_phy,
&mr_sas_port->phy_list, port_siblings) {
- if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ if ((!mrioc->stop_drv_processing) &&
+ (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
dev_info(&mr_sas_port->port->dev,
"remove: sas_address(0x%016llx), phy(%d)\n",
(unsigned long long)
@@ -2060,7 +2058,7 @@ int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
sas_expander = kzalloc(sizeof(struct mpi3mr_sas_node),
GFP_KERNEL);
if (!sas_expander)
- return -1;
+ return -ENOMEM;
sas_expander->handle = handle;
sas_expander->num_phys = expander_pg0.num_phys;
@@ -2163,7 +2161,7 @@ out_fail:
*
* Return nothing.
*/
-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
struct mpi3mr_sas_node *sas_expander)
{
struct mpi3mr_sas_port *mr_sas_port, *next;
@@ -2357,15 +2355,16 @@ int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
tgtdev->host_exposed = 1;
if (!mpi3mr_sas_port_add(mrioc, tgtdev->dev_handle,
sas_address_parent, hba_port)) {
- tgtdev->host_exposed = 0;
retval = -1;
- } else if ((!tgtdev->starget)) {
- if (!mrioc->is_driver_loading)
+ } else if ((!tgtdev->starget) && (!mrioc->is_driver_loading)) {
mpi3mr_sas_port_remove(mrioc, sas_address,
sas_address_parent, hba_port);
- tgtdev->host_exposed = 0;
retval = -1;
}
+ if (retval) {
+ tgtdev->dev_spec.sas_sata_inf.hba_port = NULL;
+ tgtdev->host_exposed = 0;
+ }
return retval;
}
@@ -2394,6 +2393,7 @@ void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
mpi3mr_sas_port_remove(mrioc, sas_address, sas_address_parent,
hba_port);
tgtdev->host_exposed = 0;
+ tgtdev->dev_spec.sas_sata_inf.hba_port = NULL;
}
/**
@@ -2450,7 +2450,7 @@ static u8 mpi3mr_get_port_id_by_rphy(struct mpi3mr_ioc *mrioc, struct sas_rphy *
tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
rphy->identify.sas_address, rphy);
- if (tgtdev) {
+ if (tgtdev && tgtdev->dev_spec.sas_sata_inf.hba_port) {
port_id =
tgtdev->dev_spec.sas_sata_inf.hba_port->port_id;
mpi3mr_tgtdev_put(tgtdev);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 2ee9ea57554d..53f5492579cb 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -60,7 +60,6 @@
#include <linux/ktime.h>
#include <linux/kthread.h>
#include <asm/page.h> /* To get host page size per arch */
-#include <linux/aer.h>
#include "mpt3sas_base.h"
@@ -3535,7 +3534,6 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
if (pci_is_enabled(pdev)) {
pci_release_selected_regions(ioc->pdev, ioc->bars);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
}
@@ -3615,9 +3613,6 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
goto out_fail;
}
-/* AER (Advanced Error Reporting) hooks */
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
@@ -4761,21 +4756,15 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
int i = 0;
char desc[17] = {0};
u32 iounit_pg1_flags;
- u32 bios_version;
- bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
strncpy(desc, ioc->manu_pg0.ChipName, 16);
- ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
+ ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n",
desc,
(ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
(ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
(ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
ioc->facts.FWVersion.Word & 0x000000FF,
- ioc->pdev->revision,
- (bios_version & 0xFF000000) >> 24,
- (bios_version & 0x00FF0000) >> 16,
- (bios_version & 0x0000FF00) >> 8,
- bios_version & 0x000000FF);
+ ioc->pdev->revision);
_base_display_OEMs_branding(ioc);
@@ -6616,11 +6605,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
else if (rc == -EAGAIN)
goto try_32bit_dma;
total_sz += sense_sz;
- ioc_info(ioc,
- "sense pool(0x%p)- dma(0x%llx): depth(%d),"
- "element_size(%d), pool_size(%d kB)\n",
- ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
- SCSI_SENSE_BUFFERSIZE, sz / 1024);
/* reply pool, 4 byte align */
sz = ioc->reply_free_queue_depth * ioc->reply_sz;
rc = _base_allocate_reply_pool(ioc, sz);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8e24ebcebfe5..c3c1f466fe01 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -52,7 +52,6 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/aer.h>
#include <linux/raid_class.h>
#include <linux/blk-mq-pci.h>
#include <asm/unaligned.h>
@@ -11926,7 +11925,7 @@ static void scsih_map_queues(struct Scsi_Host *shost)
}
/* shost template for SAS 2.0 HBA devices */
-static struct scsi_host_template mpt2sas_driver_template = {
+static const struct scsi_host_template mpt2sas_driver_template = {
.module = THIS_MODULE,
.name = "Fusion MPT SAS Host",
.proc_name = MPT2SAS_DRIVER_NAME,
@@ -11964,7 +11963,7 @@ static struct raid_function_template mpt2sas_raid_functions = {
};
/* shost template for SAS 3.0 HBA devices */
-static struct scsi_host_template mpt3sas_driver_template = {
+static const struct scsi_host_template mpt3sas_driver_template = {
.module = THIS_MODULE,
.name = "Fusion MPT SAS Host",
.proc_name = MPT3SAS_DRIVER_NAME,
@@ -12930,10 +12929,10 @@ _mpt3sas_exit(void)
pr_info("mpt3sas version %s unloading\n",
MPT3SAS_DRIVER_VERSION);
- mpt3sas_ctl_exit(hbas_to_enumerate);
-
pci_unregister_driver(&mpt3sas_driver);
+ mpt3sas_ctl_exit(hbas_to_enumerate);
+
scsih_exit();
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index e5ecd6ada6cd..e8a4750f6ec4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -785,7 +785,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
goto out_fail;
}
port = sas_port_alloc_num(sas_node->parent_dev);
- if ((sas_port_add(port))) {
+ if (!port || (sas_port_add(port))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
goto out_fail;
@@ -824,6 +824,12 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
mpt3sas_port->remote_identify.sas_address;
}
+ if (!rphy) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_delete_port;
+ }
+
rphy->identify = mpt3sas_port->remote_identify;
if ((sas_rphy_add(rphy))) {
@@ -831,6 +837,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
__FILE__, __LINE__, __func__);
sas_rphy_free(rphy);
rphy = NULL;
+ goto out_delete_port;
}
if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -857,7 +864,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
rphy_to_expander_device(rphy), hba_port->port_id);
return mpt3sas_port;
- out_fail:
+out_delete_port:
+ sas_port_delete(port);
+
+out_fail:
list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
port_siblings)
list_del(&mpt3sas_phy->port_siblings);
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index 472fa043094f..98b99c0f5bc7 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -69,7 +69,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
m147_pcc->dma_cntrl = 0;
}
-static struct scsi_host_template mvme147_host_template = {
+static const struct scsi_host_template mvme147_host_template = {
.module = THIS_MODULE,
.proc_name = "MVME147",
.name = "MVME147 built-in SCSI",
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index cfe84473a515..49e2a5e7ce54 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -29,7 +29,7 @@ static const struct attribute_group *mvst_host_groups[];
#define SOC_SAS_NUM 2
-static struct scsi_host_template mvs_sht = {
+static const struct scsi_host_template mvs_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = sas_queuecommand,
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 60c65586f30e..73aa7059b556 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2168,7 +2168,7 @@ mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
-static struct scsi_host_template mvumi_template = {
+static const struct scsi_host_template mvumi_template = {
.module = THIS_MODULE,
.name = "Marvell Storage Controller",
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index e885c1dbf61f..ca2e932dd9b7 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -2203,7 +2203,7 @@ static struct attribute *myrb_shost_attrs[] = {
ATTRIBUTE_GROUPS(myrb_shost);
-static struct scsi_host_template myrb_template = {
+static const struct scsi_host_template myrb_template = {
.module = THIS_MODULE,
.name = "DAC960",
.proc_name = "myrb",
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 7eb8c39da366..a1eec65a9713 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -1915,7 +1915,7 @@ static void myrs_slave_destroy(struct scsi_device *sdev)
kfree(sdev->hostdata);
}
-static struct scsi_host_template myrs_template = {
+static const struct scsi_host_template myrs_template = {
.module = THIS_MODULE,
.name = "DAC960",
.proc_name = "myrs",
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 75bb0028ed74..b7987019686e 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -259,7 +259,7 @@ static void nsp32_dmessage(const char *, int, int, char *, ...);
/*
* max_sectors is currently limited up to 128.
*/
-static struct scsi_host_template nsp32_template = {
+static const struct scsi_host_template nsp32_template = {
.proc_name = "nsp32",
.name = "Workbit NinjaSCSI-32Bi/UDE",
.show_info = nsp32_show_info,
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 5d7dfefd6f6c..278c78d066c4 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -668,7 +668,7 @@ ATTRIBUTE_GROUPS(SYM53C500_shost);
/*
* scsi_host_template initializer
*/
-static struct scsi_host_template sym53c500_driver_template = {
+static const struct scsi_host_template sym53c500_driver_template = {
.module = THIS_MODULE,
.name = "SYM53C500",
.info = SYM53C500_info,
@@ -702,7 +702,7 @@ SYM53C500_config(struct pcmcia_device *link)
int ret;
int irq_level, port_base;
struct Scsi_Host *host;
- struct scsi_host_template *tpnt = &sym53c500_driver_template;
+ const struct scsi_host_template *tpnt = &sym53c500_driver_template;
struct sym53c500_data *data;
dev_dbg(&link->dev, "SYM53C500_config\n");
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index ec1a9ab61814..73cd25f30ca5 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3362,8 +3362,9 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dev = ccb->device;
status = le32_to_cpu(registerRespPayload->status);
device_id = le32_to_cpu(registerRespPayload->device_id);
- pm8001_dbg(pm8001_ha, MSG, " register device is status = %d\n",
- status);
+ pm8001_dbg(pm8001_ha, INIT,
+ "register device status %d phy_id 0x%x device_id %d\n",
+ status, pm8001_dev->attached_phy, device_id);
switch (status) {
case DEVREG_SUCCESS:
pm8001_dbg(pm8001_ha, MSG, "DEVREG_SUCCESS\n");
@@ -4278,7 +4279,7 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(1);
payload.device_id = cpu_to_le32(device_id);
- pm8001_dbg(pm8001_ha, MSG, "unregister device device_id = %d\n",
+ pm8001_dbg(pm8001_ha, INIT, "unregister device device_id %d\n",
device_id);
return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 7e589fe3e010..2e886c1d867d 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -43,7 +43,8 @@
#include "pm8001_chips.h"
#include "pm80xx_hwi.h"
-static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING;
+static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING |
+ PM8001_EVENT_LOGGING | PM8001_INIT_LOGGING;
module_param(logging_level, ulong, 0644);
MODULE_PARM_DESC(logging_level, " bits for enabling logging info.");
@@ -96,7 +97,7 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
/*
* The main structure which LLDD must register for scsi core.
*/
-static struct scsi_host_template pm8001_sht = {
+static const struct scsi_host_template pm8001_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.proc_name = DRV_NAME,
@@ -666,7 +667,7 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
* Currently we just set the fixed SAS address to our HBA, for manufacture,
* it should read from the EEPROM
*/
-static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
{
u8 i, j;
u8 sas_add[8];
@@ -679,6 +680,12 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
struct pm8001_ioctl_payload payload;
u16 deviceid;
int rc;
+ unsigned long time_remaining;
+
+ if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
+ pm8001_dbg(pm8001_ha, FAIL, "controller is in fatal error state\n");
+ return -EIO;
+ }
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
pm8001_ha->nvmd_completion = &completion;
@@ -703,16 +710,23 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
payload.offset = 0;
payload.func_specific = kzalloc(payload.rd_length, GFP_KERNEL);
if (!payload.func_specific) {
- pm8001_dbg(pm8001_ha, INIT, "mem alloc fail\n");
- return;
+ pm8001_dbg(pm8001_ha, FAIL, "mem alloc fail\n");
+ return -ENOMEM;
}
rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
if (rc) {
kfree(payload.func_specific);
- pm8001_dbg(pm8001_ha, INIT, "nvmd failed\n");
- return;
+ pm8001_dbg(pm8001_ha, FAIL, "nvmd failed\n");
+ return -EIO;
+ }
+ time_remaining = wait_for_completion_timeout(&completion,
+ msecs_to_jiffies(60*1000)); // 1 min
+ if (!time_remaining) {
+ kfree(payload.func_specific);
+ pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n");
+ return -EIO;
}
- wait_for_completion(&completion);
+
for (i = 0, j = 0; i <= 7; i++, j++) {
if (pm8001_ha->chip_id == chip_8001) {
@@ -751,6 +765,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
SAS_ADDR_SIZE);
#endif
+ return 0;
}
/*
@@ -1166,7 +1181,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
pm80xx_set_thermal_config(pm8001_ha);
}
- pm8001_init_sas_add(pm8001_ha);
+ if (pm8001_init_sas_add(pm8001_ha))
+ goto err_out_shost;
/* phy setting support for motherboard controller */
rc = pm8001_configure_phy_settings(pm8001_ha);
if (rc)
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index dc1f4d958e03..953572fc0d9e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -71,6 +71,7 @@
#define PM8001_DEV_LOGGING 0x80 /* development message logging */
#define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */
#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */
+#define PM8001_EVENT_LOGGING 0x400 /* HW event logging */
#define pm8001_info(HBA, fmt, ...) \
pr_info("%s:: %s %d: " fmt, \
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 9584cadc4201..39a12ee94a72 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -3239,9 +3239,9 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_port *port = &pm8001_ha->port[port_id];
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- pm8001_dbg(pm8001_ha, DEVIO,
- "port id %d, phy id %d link_rate %d portstate 0x%x\n",
- port_id, phy_id, link_rate, portstate);
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_SATA_PHY_UP phyid:%#x port_id:%#x link_rate:%d portstate:%#x\n",
+ phy_id, port_id, link_rate, portstate);
phy->port = port;
port->port_id = port_id;
@@ -3291,10 +3291,14 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
phy->phy_attached = 0;
switch (portstate) {
case PORT_VALID:
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_VALID\n",
+ phy_id, port_id);
break;
case PORT_INVALID:
- pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n",
- port_id);
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_INVALID\n",
+ phy_id, port_id);
pm8001_dbg(pm8001_ha, MSG,
" Last phy Down and port invalid\n");
if (port_sata) {
@@ -3306,18 +3310,21 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
sas_phy_disconnected(&phy->sas_phy);
break;
case PORT_IN_RESET:
- pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n",
- port_id);
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_IN_RESET\n",
+ phy_id, port_id);
break;
case PORT_NOT_ESTABLISHED:
- pm8001_dbg(pm8001_ha, MSG,
- " Phy Down and PORT_NOT_ESTABLISHED\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_NOT_ESTABLISHED\n",
+ phy_id, port_id);
port->port_attached = 0;
break;
case PORT_LOSTCOMM:
- pm8001_dbg(pm8001_ha, MSG, " Phy Down and PORT_LOSTCOMM\n");
- pm8001_dbg(pm8001_ha, MSG,
- " Last phy Down and port invalid\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_LOSTCOMM\n",
+ phy_id, port_id);
+ pm8001_dbg(pm8001_ha, MSG, " Last phy Down and port invalid\n");
if (port_sata) {
port->port_attached = 0;
phy->phy_type = 0;
@@ -3328,9 +3335,9 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- pm8001_dbg(pm8001_ha, DEVIO,
- " Phy Down and(default) = 0x%x\n",
- portstate);
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate:%#x\n",
+ phy_id, port_id, portstate);
break;
}
@@ -3410,6 +3417,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
u8 phy_id =
(u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+ u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
u16 eventType =
(u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
u8 status =
@@ -3425,26 +3433,29 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
switch (eventType) {
case HW_EVENT_SAS_PHY_UP:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_SAS_PHY_UP phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
hw_event_sas_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_PHY_UP:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n");
hw_event_sata_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_SPINUP_HOLD:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_SATA_SPINUP_HOLD phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD,
GFP_ATOMIC);
break;
case HW_EVENT_PHY_DOWN:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
hw_event_phy_down(pm8001_ha, piomb);
- phy->phy_attached = 0;
phy->phy_state = PHY_LINK_DISABLE;
break;
case HW_EVENT_PORT_INVALID:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_INVALID phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
@@ -3463,7 +3474,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
GFP_ATOMIC);
break;
case HW_EVENT_PHY_ERROR:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_ERROR phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
sas_phy_disconnected(&phy->sas_phy);
phy->phy_attached = 0;
sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC);
@@ -3477,34 +3490,39 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_INVALID_DWORD:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_INVALID_DWORD\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_INVALID_DWORD phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_DISPARITY_ERROR phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_DISPARITY_ERROR,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_CODE_VIOLATION:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_CODE_VIOLATION\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_CODE_VIOLATION phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_CODE_VIOLATION,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_MALFUNCTION:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_MALFUNCTION phyid:%#x\n", phy_id);
break;
case HW_EVENT_BROADCAST_SES:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n");
@@ -3515,25 +3533,30 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
GFP_ATOMIC);
break;
case HW_EVENT_INBOUND_CRC_ERROR:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_INBOUND_CRC_ERROR phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_INBOUND_CRC_ERROR,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_HARD_RESET_RECEIVED:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_HARD_RESET_RECEIVED phyid:%#x\n", phy_id);
sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC);
break;
case HW_EVENT_ID_FRAME_TIMEOUT:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_ID_FRAME_TIMEOUT phyid:%#x\n", phy_id);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_PHY_RESET_FAILED phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
port_id, phy_id, 0, 0);
@@ -3543,13 +3566,16 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
GFP_ATOMIC);
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_RESET_TIMER_TMO phyid:%#x port_id:%#x portstate:%#x\n",
+ phy_id, port_id, portstate);
if (!pm8001_ha->phy[phy_id].reset_completion) {
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
port_id, phy_id, 0, 0);
}
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
+ port->port_state = portstate;
sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
GFP_ATOMIC);
if (pm8001_ha->phy[phy_id].reset_completion) {
@@ -3560,8 +3586,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_RECOVERY_TIMER_TMO phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_PORT_RECOVERY_TIMER_TMO,
port_id, phy_id, 0, 0);
@@ -3575,24 +3602,32 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case HW_EVENT_PORT_RECOVER:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_RECOVER phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
hw_event_port_recover(pm8001_ha, piomb);
break;
case HW_EVENT_PORT_RESET_COMPLETE:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_RESET_COMPLETE phyid:%#x port_id:%#x portstate:%#x\n",
+ phy_id, port_id, portstate);
if (pm8001_ha->phy[phy_id].reset_completion) {
pm8001_ha->phy[phy_id].port_reset_status =
PORT_RESET_SUCCESS;
complete(pm8001_ha->phy[phy_id].reset_completion);
pm8001_ha->phy[phy_id].reset_completion = NULL;
}
+ phy->phy_attached = 1;
+ phy->phy_state = PHY_STATE_LINK_UP_SPCV;
+ port->port_state = portstate;
break;
case EVENT_BROADCAST_ASYNCH_EVENT:
pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n");
break;
default:
- pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type 0x%x\n",
- eventType);
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "Unknown event portid:%d phyid:%d event:0x%x status:0x%x\n",
+ port_id, phy_id, eventType, status);
break;
}
return 0;
@@ -4726,6 +4761,9 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
+ pm8001_dbg(pm8001_ha, INIT,
+ "register device req phy_id 0x%x port_id 0x%x\n", phy_id,
+ (port->port_id & 0xFF));
rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
@@ -4815,7 +4853,7 @@ static void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag);
payload.ppc_phyid =
cpu_to_le32(((operation & 0xF) << 8) | (phyid & 0xFF));
- pm8001_dbg(pm8001_ha, INIT,
+ pm8001_dbg(pm8001_ha, DISC,
" phy profile command for phy %x ,length is %d\n",
le32_to_cpu(payload.ppc_phyid), length);
for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 836ddc476764..9415a4819470 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3611,7 +3611,7 @@ static struct attribute *pmcraid_host_attrs[] = {
ATTRIBUTE_GROUPS(pmcraid_host);
/* host template structure for pmcraid driver */
-static struct scsi_host_template pmcraid_host_template = {
+static const struct scsi_host_template pmcraid_host_template = {
.module = THIS_MODULE,
.name = PMCRAID_DRIVER_NAME,
.queuecommand = pmcraid_queuecommand,
@@ -5346,7 +5346,7 @@ static int __init pmcraid_init(void)
}
pmcraid_major = MAJOR(dev);
- pmcraid_class = class_create(THIS_MODULE, PMCRAID_DEVFILE);
+ pmcraid_class = class_create(PMCRAID_DEVFILE);
if (IS_ERR(pmcraid_class)) {
error = PTR_ERR(pmcraid_class);
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index c6c1bc608224..909c49541984 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -972,7 +972,7 @@ static int ppa_adjust_queue(struct scsi_device *device)
return 0;
}
-static struct scsi_host_template ppa_template = {
+static const struct scsi_host_template ppa_template = {
.module = THIS_MODULE,
.proc_name = "ppa",
.show_info = ppa_show_info,
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 2b80cab70333..90495a832f34 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -323,7 +323,7 @@ done:
return IRQ_HANDLED;
}
-static struct scsi_host_template ps3rom_host_template = {
+static const struct scsi_host_template ps3rom_host_template = {
.name = DEVICE_NAME,
.slave_configure = ps3rom_slave_configure,
.queuecommand = ps3rom_queuecommand,
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 35e16600fc63..2a31ddc99dde 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -979,7 +979,7 @@ static int qedf_slave_configure(struct scsi_device *sdev)
return 0;
}
-static struct scsi_host_template qedf_host_template = {
+static const struct scsi_host_template qedf_host_template = {
.module = THIS_MODULE,
.name = QEDF_MODULE_NAME,
.this_id = -1,
@@ -2224,7 +2224,6 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
u16 prod_idx;
struct fcoe_cqe *cqe;
struct qedf_io_work *io_work;
- int num_handled = 0;
unsigned int cpu;
struct qedf_ioreq *io_req = NULL;
u16 xid;
@@ -2247,7 +2246,6 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
while (new_cqes) {
fp->completions++;
- num_handled++;
cqe = &que->cq[que->cq_cons_idx];
comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
@@ -3043,9 +3041,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
* addresses of our queues
*/
if (!qedf->p_cpuq) {
- status = -EINVAL;
QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
- goto mem_alloc_failure;
+ return -EINVAL;
}
qedf->global_queues = kzalloc((sizeof(struct global_queue *)
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 72942772b198..0e316cc24b19 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -17,7 +17,7 @@ extern int qedi_do_not_recover;
extern uint qedi_io_tracing;
-extern struct scsi_host_template qedi_host_template;
+extern const struct scsi_host_template qedi_host_template;
extern struct iscsi_transport qedi_iscsi_transport;
extern const struct qed_iscsi_ops *qedi_ops;
extern const struct qedi_debugfs_ops qedi_debugfs_ops[];
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 31ec429104e2..6ed8ef97642c 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -40,7 +40,7 @@ static int qedi_eh_host_reset(struct scsi_cmnd *cmd)
return qedi_recover_all_conns(qedi);
}
-struct scsi_host_template qedi_host_template = {
+const struct scsi_host_template qedi_host_template = {
.module = THIS_MODULE,
.name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver",
.proc_name = QEDI_MODULE_NAME,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index f2ee49756df8..45d359554182 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2450,6 +2450,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi_ops->ll2->stop(qedi->cdev);
}
+ cancel_delayed_work_sync(&qedi->recovery_work);
+ cancel_delayed_work_sync(&qedi->board_disable_work);
+
qedi_free_iscsi_pf_param(qedi);
rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 1e7f4d138e06..6e5e89aaa283 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4115,7 +4115,7 @@ qla1280_get_token(char *str)
}
-static struct scsi_host_template qla1280_driver_template = {
+static const struct scsi_host_template qla1280_driver_template = {
.module = THIS_MODULE,
.proc_name = "qla1280",
.name = "Qlogic ISP 1280/12160",
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 70cfc94c3d43..b00222459607 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2750,6 +2750,7 @@ static void
qla2x00_terminate_rport_io(struct fc_rport *rport)
{
fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+ scsi_qla_host_t *vha;
if (!fcport)
return;
@@ -2759,9 +2760,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
return;
+ vha = fcport->vha;
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+ qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
+ 0, WAIT_TARGET);
return;
}
/*
@@ -2786,6 +2790,15 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
qla2x00_port_logout(fcport->vha, fcport);
}
}
+
+ /* check for any straggling io left behind */
+ if (qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET)) {
+ ql_log(ql_log_warn, vha, 0x300b,
+ "IO not return. Resetting. \n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_chip_reset(vha);
+ }
}
static int
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ec0e987b71fa..dfee3b41bdf1 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -22,7 +22,6 @@
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/firmware.h>
-#include <linux/aer.h>
#include <linux/mutex.h>
#include <linux/btree.h>
@@ -466,6 +465,15 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id)
return res;
}
+struct tmf_arg {
+ struct qla_qpair *qpair;
+ struct fc_port *fcport;
+ struct scsi_qla_host *vha;
+ u64 lun;
+ u32 flags;
+ uint8_t modifier;
+};
+
struct els_logo_payload {
uint8_t opcode;
uint8_t rsvd[3];
@@ -545,6 +553,10 @@ struct srb_iocb {
uint32_t data;
struct completion comp;
__le16 comp_status;
+
+ uint8_t modifier;
+ uint8_t vp_index;
+ uint16_t loop_id;
} tmf;
struct {
#define SRB_FXDISC_REQ_DMA_VALID BIT_0
@@ -648,6 +660,7 @@ struct srb_iocb {
#define SRB_SA_UPDATE 25
#define SRB_ELS_CMD_HST_NOLOGIN 26
#define SRB_SA_REPLACE 27
+#define SRB_MARKER 28
struct qla_els_pt_arg {
u8 els_opcode;
@@ -2529,6 +2542,7 @@ enum rscn_addr_format {
typedef struct fc_port {
struct list_head list;
struct scsi_qla_host *vha;
+ struct list_head tmf_pending;
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
@@ -2549,6 +2563,8 @@ typedef struct fc_port {
unsigned int do_prli_nvme:1;
uint8_t nvme_flag;
+ uint8_t active_tmf;
+#define MAX_ACTIVE_TMF 8
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
@@ -5499,4 +5515,8 @@ struct ql_vnd_tgt_stats_resp {
_fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \
_fp->flags
+#define TMF_NOT_READY(_fcport) \
+ (!_fcport || IS_SESSION_DELETED(_fcport) || atomic_read(&_fcport->state) != FCS_ONLINE || \
+ !_fcport->vha->hw->flags.fw_started)
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9142df876c73..ba7831f24734 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -69,7 +69,7 @@ extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
+extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint64_t, uint32_t);
struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
@@ -192,6 +192,7 @@ extern int ql2xsecenable;
extern int ql2xenforce_iocb_limit;
extern int ql2xabts_wait_nvme;
extern u32 ql2xnvme_queues;
+extern int ql2xfc2target;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -213,7 +214,7 @@ extern void qla2x00_free_exchoffld_buffer(struct qla_hw_data *);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
-extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
+extern struct scsi_qla_host *qla2x00_create_host(const struct scsi_host_template *,
struct qla_hw_data *);
extern void qla2x00_free_host(struct scsi_qla_host *);
extern void qla2x00_relogin(struct scsi_qla_host *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1dbc1496ebed..a2d48d6b1dfc 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1840,7 +1840,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_PORT_ADDR:
fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
if (fcport) {
- if (fcport->flags & FCF_FCP2_DEVICE &&
+ if (ql2xfc2target &&
+ fcport->flags & FCF_FCP2_DEVICE &&
atomic_read(&fcport->state) == FCS_ONLINE) {
ql_dbg(ql_dbg_disc, vha, 0x2115,
"Delaying session delete for FCP2 portid=%06x %8phC ",
@@ -1995,6 +1996,11 @@ qla2x00_tmf_iocb_timeout(void *data)
int rc, h;
unsigned long flags;
+ if (sp->type == SRB_MARKER) {
+ complete(&tmf->u.tmf.comp);
+ return;
+ }
+
rc = qla24xx_async_abort_cmd(sp, false);
if (rc) {
spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
@@ -2012,24 +2018,131 @@ qla2x00_tmf_iocb_timeout(void *data)
}
}
+static void qla_marker_sp_done(srb_t *sp, int res)
+{
+ struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+ if (res != QLA_SUCCESS)
+ ql_dbg(ql_dbg_taskm, sp->vha, 0x8004,
+ "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n",
+ sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags,
+ sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id);
+
+ sp->u.iocb_cmd.u.tmf.data = res;
+ complete(&tmf->u.tmf.comp);
+}
+
+#define START_SP_W_RETRIES(_sp, _rval) \
+{\
+ int cnt = 5; \
+ do { \
+ _rval = qla2x00_start_sp(_sp); \
+ if (_rval == EAGAIN) \
+ msleep(1); \
+ else \
+ break; \
+ cnt--; \
+ } while (cnt); \
+}
+
+/**
+ * qla26xx_marker: send marker IOCB and wait for the completion of it.
+ * @arg: pointer to argument list.
+ * It is assume caller will provide an fcport pointer and modifier
+ */
+static int
+qla26xx_marker(struct tmf_arg *arg)
+{
+ struct scsi_qla_host *vha = arg->vha;
+ struct srb_iocb *tm_iocb;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+ fc_port_t *fcport = arg->fcport;
+
+ if (TMF_NOT_READY(arg->fcport)) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8039,
+ "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
+ fcport->loop_id, fcport->d_id.b24,
+ arg->modifier, arg->lun, arg->qpair->id);
+ return QLA_SUSPENDED;
+ }
+
+ /* ref: INIT */
+ sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_MARKER;
+ sp->name = "marker";
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done);
+ sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
+
+ tm_iocb = &sp->u.iocb_cmd;
+ init_completion(&tm_iocb->u.tmf.comp);
+ tm_iocb->u.tmf.modifier = arg->modifier;
+ tm_iocb->u.tmf.lun = arg->lun;
+ tm_iocb->u.tmf.loop_id = fcport->loop_id;
+ tm_iocb->u.tmf.vp_index = vha->vp_idx;
+
+ START_SP_W_RETRIES(sp, rval);
+
+ ql_dbg(ql_dbg_taskm, vha, 0x8006,
+ "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24,
+ arg->modifier, arg->lun, sp->qpair->id, rval);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x8031,
+ "Marker IOCB send failure (%x).\n", rval);
+ goto done_free_sp;
+ }
+
+ wait_for_completion(&tm_iocb->u.tmf.comp);
+ rval = tm_iocb->u.tmf.data;
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x8019,
+ "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24,
+ arg->modifier, arg->lun, sp->qpair->id, rval);
+ }
+
+done_free_sp:
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+done:
+ return rval;
+}
+
static void qla2x00_tmf_sp_done(srb_t *sp, int res)
{
struct srb_iocb *tmf = &sp->u.iocb_cmd;
+ if (res)
+ tmf->u.tmf.data = res;
complete(&tmf->u.tmf.comp);
}
-int
-qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
- uint32_t tag)
+static int
+__qla2x00_async_tm_cmd(struct tmf_arg *arg)
{
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = arg->vha;
struct srb_iocb *tm_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ fc_port_t *fcport = arg->fcport;
+
+ if (TMF_NOT_READY(arg->fcport)) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8032,
+ "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
+ fcport->loop_id, fcport->d_id.b24,
+ arg->modifier, arg->lun, arg->qpair->id);
+ return QLA_SUSPENDED;
+ }
+
/* ref: INIT */
- sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -2042,15 +2155,16 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
tm_iocb = &sp->u.iocb_cmd;
init_completion(&tm_iocb->u.tmf.comp);
- tm_iocb->u.tmf.flags = flags;
- tm_iocb->u.tmf.lun = lun;
+ tm_iocb->u.tmf.flags = arg->flags;
+ tm_iocb->u.tmf.lun = arg->lun;
+
+ START_SP_W_RETRIES(sp, rval);
ql_dbg(ql_dbg_taskm, vha, 0x802f,
- "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24,
+ arg->flags, arg->lun, sp->qpair->id, rval);
- rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
wait_for_completion(&tm_iocb->u.tmf.comp);
@@ -2062,15 +2176,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
"TM IOCB failed (%x).\n", rval);
}
- if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
- flags = tm_iocb->u.tmf.flags;
- lun = (uint16_t)tm_iocb->u.tmf.lun;
-
- /* Issue Marker IOCB */
- qla2x00_marker(vha, vha->hw->base_qpair,
- fcport->loop_id, lun,
- flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
- }
+ if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw))
+ rval = qla26xx_marker(arg);
done_free_sp:
/* ref: INIT */
@@ -2079,6 +2186,115 @@ done:
return rval;
}
+static void qla_put_tmf(fc_port_t *fcport)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ fcport->active_tmf--;
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+}
+
+static
+int qla_get_tmf(fc_port_t *fcport)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ int rc = 0;
+ LIST_HEAD(tmf_elem);
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ list_add_tail(&tmf_elem, &fcport->tmf_pending);
+
+ while (fcport->active_tmf >= MAX_ACTIVE_TMF) {
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ msleep(1);
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ if (TMF_NOT_READY(fcport)) {
+ ql_log(ql_log_warn, vha, 0x802c,
+ "Unable to acquire TM resource due to disruption.\n");
+ rc = EIO;
+ break;
+ }
+ if (fcport->active_tmf < MAX_ACTIVE_TMF &&
+ list_is_first(&tmf_elem, &fcport->tmf_pending))
+ break;
+ }
+
+ list_del(&tmf_elem);
+
+ if (!rc)
+ fcport->active_tmf++;
+
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ return rc;
+}
+
+int
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
+ uint32_t tag)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_qpair *qpair;
+ struct tmf_arg a;
+ int i, rval = QLA_SUCCESS;
+
+ if (TMF_NOT_READY(fcport))
+ return QLA_SUSPENDED;
+
+ a.vha = fcport->vha;
+ a.fcport = fcport;
+ a.lun = lun;
+ if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
+ a.modifier = MK_SYNC_ID_LUN;
+
+ if (qla_get_tmf(fcport))
+ return QLA_FUNCTION_FAILED;
+ } else {
+ a.modifier = MK_SYNC_ID;
+ }
+
+ if (vha->hw->mqenable) {
+ for (i = 0; i < vha->hw->num_qpairs; i++) {
+ qpair = vha->hw->queue_pair_map[i];
+ if (!qpair)
+ continue;
+
+ if (TMF_NOT_READY(fcport)) {
+ ql_log(ql_log_warn, vha, 0x8026,
+ "Unable to send TM due to disruption.\n");
+ rval = QLA_SUSPENDED;
+ break;
+ }
+
+ a.qpair = qpair;
+ a.flags = flags|TCF_NOTMCMD_TO_TARGET;
+ rval = __qla2x00_async_tm_cmd(&a);
+ if (rval)
+ break;
+ }
+ }
+
+ if (rval)
+ goto bailout;
+
+ a.qpair = vha->hw->base_qpair;
+ a.flags = flags;
+ rval = __qla2x00_async_tm_cmd(&a);
+
+bailout:
+ if (a.modifier == MK_SYNC_ID_LUN)
+ qla_put_tmf(fcport);
+
+ return rval;
+}
+
int
qla24xx_async_abort_command(srb_t *sp)
{
@@ -4860,7 +5076,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
if (use_tbl &&
ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES)
- strlcpy(ha->model_desc,
+ strscpy(ha->model_desc,
qla2x00_model_name[index * 2 + 1],
sizeof(ha->model_desc));
} else {
@@ -4868,14 +5084,14 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
if (use_tbl &&
ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES) {
- strlcpy(ha->model_number,
+ strscpy(ha->model_number,
qla2x00_model_name[index * 2],
sizeof(ha->model_number));
- strlcpy(ha->model_desc,
+ strscpy(ha->model_desc,
qla2x00_model_name[index * 2 + 1],
sizeof(ha->model_desc));
} else {
- strlcpy(ha->model_number, def,
+ strscpy(ha->model_number, def,
sizeof(ha->model_number));
}
}
@@ -5290,6 +5506,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
+ INIT_LIST_HEAD(&fcport->tmf_pending);
INIT_LIST_HEAD(&fcport->sess_cmd_list);
spin_lock_init(&fcport->sess_cmd_lock);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index b9b3e6f80ea9..6acfdcc48b16 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -522,21 +522,25 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
return (QLA_FUNCTION_FAILED);
}
+ mrk24 = (struct mrk_entry_24xx *)mrk;
+
mrk->entry_type = MARKER_TYPE;
mrk->modifier = type;
if (type != MK_SYNC_ALL) {
if (IS_FWI2_CAPABLE(ha)) {
- mrk24 = (struct mrk_entry_24xx *) mrk;
mrk24->nport_handle = cpu_to_le16(loop_id);
int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
mrk24->vp_index = vha->vp_idx;
- mrk24->handle = make_handle(req->id, mrk24->handle);
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16((uint16_t)lun);
}
}
+
+ if (IS_FWI2_CAPABLE(ha))
+ mrk24->handle = QLA_SKIP_HANDLE;
+
wmb();
qla2x00_start_iocbs(vha, req);
@@ -2541,7 +2545,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct srb_iocb *iocb = &sp->u.iocb_cmd;
- struct req_que *req = vha->req;
+ struct req_que *req = sp->qpair->req;
flags = iocb->u.tmf.flags;
lun = iocb->u.tmf.lun;
@@ -2557,7 +2561,8 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
tsk->port_id[2] = fcport->d_id.b.domain;
tsk->vp_index = fcport->vha->vp_idx;
- if (flags == TCF_LUN_RESET) {
+ if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET|
+ TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
int_to_scsilun(lun, &tsk->lun);
host_to_fcp_swap((uint8_t *)&tsk->lun,
sizeof(tsk->lun));
@@ -3852,9 +3857,9 @@ static int qla_get_iocbs_resource(struct srb *sp)
case SRB_NACK_LOGO:
case SRB_LOGOUT_CMD:
case SRB_CTRL_VP:
- push_it_through = true;
- fallthrough;
+ case SRB_MARKER:
default:
+ push_it_through = true;
get_exch = false;
}
@@ -3870,6 +3875,19 @@ static int qla_get_iocbs_resource(struct srb *sp)
return qla_get_fw_resources(sp->qpair, &sp->iores);
}
+static void
+qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
+{
+ mrk->entry_type = MARKER_TYPE;
+ mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier;
+ if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) {
+ mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id);
+ int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun);
+ host_to_fcp_swap(mrk->lun, sizeof(mrk->lun));
+ mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index;
+ }
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -3973,6 +3991,9 @@ qla2x00_start_sp(srb_t *sp)
case SRB_SA_REPLACE:
qla24xx_sa_replace_iocb(sp, pkt);
break;
+ case SRB_MARKER:
+ qla_marker_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 030625ebb4e6..a07c010b0843 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1862,9 +1862,9 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
}
}
-srb_t *
-qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
- struct req_que *req, void *iocb)
+static srb_t *
+qla_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+ struct req_que *req, void *iocb, u16 *ret_index)
{
struct qla_hw_data *ha = vha->hw;
sts_entry_t *pkt = iocb;
@@ -1899,7 +1899,22 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
return NULL;
}
- req->outstanding_cmds[index] = NULL;
+ *ret_index = index;
+ qla_put_fw_resources(sp->qpair, &sp->iores);
+ return sp;
+}
+
+srb_t *
+qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+ struct req_que *req, void *iocb)
+{
+ uint16_t index;
+ srb_t *sp;
+
+ sp = qla_get_sp_from_handle(vha, func, req, iocb, &index);
+ if (sp)
+ req->outstanding_cmds[index] = NULL;
+
return sp;
}
@@ -3112,7 +3127,6 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
}
bsg_reply->reply_payload_rcv_len = 0;
- qla_put_fw_resources(sp->qpair, &sp->iores);
done:
/* Return the vendor specific reply to API */
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
@@ -3236,13 +3250,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
- req->outstanding_cmds[handle] = NULL;
cp = GET_CMD_SP(sp);
if (cp == NULL) {
ql_dbg(ql_dbg_io, vha, 0x3018,
"Command already returned (0x%x/%p).\n",
sts->handle, sp);
+ req->outstanding_cmds[handle] = NULL;
return;
}
@@ -3513,6 +3527,9 @@ out:
if (rsp->status_srb == NULL)
sp->done(sp, res);
+
+ /* for io's, clearing of outstanding_cmds[handle] means scsi_done was called */
+ req->outstanding_cmds[handle] = NULL;
}
/**
@@ -3589,6 +3606,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
uint16_t que = MSW(pkt->handle);
struct req_que *req = NULL;
int res = DID_ERROR << 16;
+ u16 index;
ql_dbg(ql_dbg_async, vha, 0x502a,
"iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
@@ -3607,7 +3625,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
switch (pkt->entry_type) {
case NOTIFY_ACK_TYPE:
- case STATUS_TYPE:
case STATUS_CONT_TYPE:
case LOGINOUT_PORT_IOCB_TYPE:
case CT_IOCB_TYPE:
@@ -3627,6 +3644,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
case CTIO_TYPE7:
case CTIO_CRC2:
return 1;
+ case STATUS_TYPE:
+ sp = qla_get_sp_from_handle(vha, func, req, pkt, &index);
+ if (sp) {
+ sp->done(sp, res);
+ req->outstanding_cmds[index] = NULL;
+ return 0;
+ }
+ break;
}
fatal:
ql_log(ql_log_warn, vha, 0x5030,
@@ -3749,6 +3774,28 @@ static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
return rc;
}
+static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct mrk_entry_24xx *pkt)
+{
+ const char func[] = "MRK-IOCB";
+ srb_t *sp;
+ int res = QLA_SUCCESS;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ if (pkt->entry_status) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8025, "marker failure.\n");
+ res = QLA_COMMAND_ERROR;
+ }
+ sp->u.iocb_cmd.u.tmf.data = res;
+ sp->done(sp, res);
+}
+
/**
* qla24xx_process_response_queue() - Process response queue entries.
* @vha: SCSI driver HA context
@@ -3862,9 +3909,7 @@ process_err:
(struct nack_to_isp *)pkt);
break;
case MARKER_TYPE:
- /* Do nothing in this case, this check is to prevent it
- * from falling into default case
- */
+ qla_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx *)pkt);
break;
case ABORT_IOCB_TYPE:
qla24xx_abort_iocb_entry(vha, rsp->req,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 78661b658dcd..b67416951a5f 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -496,7 +496,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
struct qla_hw_data *ha = base_vha->hw;
scsi_qla_host_t *vha;
- struct scsi_host_template *sht = &qla2xxx_driver_template;
+ const struct scsi_host_template *sht = &qla2xxx_driver_template;
struct Scsi_Host *host;
vha = qla2x00_create_host(sht, ha);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index f726eb8449c5..083f94e43fba 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -691,7 +691,7 @@ qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
struct qla_hw_data *ha = vha->hw;
if (pci_is_pcie(ha->pdev))
- strlcpy(str, "PCIe iSA", str_len);
+ strscpy(str, "PCIe iSA", str_len);
return str;
}
@@ -1850,21 +1850,21 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
phost_info = &preg_hsi->hsi;
memset(preg_hsi, 0, sizeof(struct register_host_info));
phost_info->os_type = OS_TYPE_LINUX;
- strlcpy(phost_info->sysname, p_sysid->sysname,
+ strscpy(phost_info->sysname, p_sysid->sysname,
sizeof(phost_info->sysname));
- strlcpy(phost_info->nodename, p_sysid->nodename,
+ strscpy(phost_info->nodename, p_sysid->nodename,
sizeof(phost_info->nodename));
if (!strcmp(phost_info->nodename, "(none)"))
ha->mr.host_info_resend = true;
- strlcpy(phost_info->release, p_sysid->release,
+ strscpy(phost_info->release, p_sysid->release,
sizeof(phost_info->release));
- strlcpy(phost_info->version, p_sysid->version,
+ strscpy(phost_info->version, p_sysid->version,
sizeof(phost_info->version));
- strlcpy(phost_info->machine, p_sysid->machine,
+ strscpy(phost_info->machine, p_sysid->machine,
sizeof(phost_info->machine));
- strlcpy(phost_info->domainname, p_sysid->domainname,
+ strscpy(phost_info->domainname, p_sysid->domainname,
sizeof(phost_info->domainname));
- strlcpy(phost_info->hostdriver, QLA2XXX_VERSION,
+ strscpy(phost_info->hostdriver, QLA2XXX_VERSION,
sizeof(phost_info->hostdriver));
preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
ql_dbg(ql_dbg_init, vha, 0x0149,
@@ -1909,9 +1909,9 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
if (fx_type == FXDISC_GET_CONFIG_INFO) {
struct config_info_data *pinfo =
(struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
- strlcpy(vha->hw->model_number, pinfo->model_num,
+ strscpy(vha->hw->model_number, pinfo->model_num,
ARRAY_SIZE(vha->hw->model_number));
- strlcpy(vha->hw->model_desc, pinfo->model_description,
+ strscpy(vha->hw->model_desc, pinfo->model_description,
ARRAY_SIZE(vha->hw->model_desc));
memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
sizeof(vha->hw->mr.symbolic_name));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 545167627e48..bc89d3da8fd0 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -360,6 +360,13 @@ MODULE_PARM_DESC(ql2xnvme_queues,
"1 - Minimum number of queues supported\n"
"8 - Default value");
+int ql2xfc2target = 1;
+module_param(ql2xfc2target, int, 0444);
+MODULE_PARM_DESC(qla2xfc2target,
+ "Enables FC2 Target support. "
+ "0 - FC2 Target support is disabled. "
+ "1 - FC2 Target support is enabled (default).");
+
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
@@ -1072,43 +1079,6 @@ qc24_fail_command:
}
/*
- * qla2x00_eh_wait_on_command
- * Waits for the command to be returned by the Firmware for some
- * max time.
- *
- * Input:
- * cmd = Scsi Command to wait on.
- *
- * Return:
- * Completed in time : QLA_SUCCESS
- * Did not complete in time : QLA_FUNCTION_FAILED
- */
-static int
-qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
-{
-#define ABORT_POLLING_PERIOD 1000
-#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
- unsigned long wait_iter = ABORT_WAIT_ITER;
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
- struct qla_hw_data *ha = vha->hw;
- srb_t *sp = scsi_cmd_priv(cmd);
- int ret = QLA_SUCCESS;
-
- if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
- ql_dbg(ql_dbg_taskm, vha, 0x8005,
- "Return:eh_wait.\n");
- return ret;
- }
-
- while (sp->type && wait_iter--)
- msleep(ABORT_POLLING_PERIOD);
- if (sp->type)
- ret = QLA_FUNCTION_FAILED;
-
- return ret;
-}
-
-/*
* qla2x00_wait_for_hba_online
* Wait till the HBA is online after going through
* <= MAX_RETRIES_OF_ISP_ABORT or
@@ -1358,6 +1328,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return ret;
}
+#define ABORT_POLLING_PERIOD 1000
+#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
+
/*
* Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
*/
@@ -1371,41 +1344,73 @@ __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
struct req_que *req = qpair->req;
srb_t *sp;
struct scsi_cmnd *cmd;
+ unsigned long wait_iter = ABORT_WAIT_ITER;
+ bool found;
+ struct qla_hw_data *ha = vha->hw;
status = QLA_SUCCESS;
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- for (cnt = 1; status == QLA_SUCCESS &&
- cnt < req->num_outstanding_cmds; cnt++) {
- sp = req->outstanding_cmds[cnt];
- if (!sp)
- continue;
- if (sp->type != SRB_SCSI_CMD)
- continue;
- if (vha->vp_idx != sp->vha->vp_idx)
- continue;
- match = 0;
- cmd = GET_CMD_SP(sp);
- switch (type) {
- case WAIT_HOST:
- match = 1;
- break;
- case WAIT_TARGET:
- match = cmd->device->id == t;
- break;
- case WAIT_LUN:
- match = (cmd->device->id == t &&
- cmd->device->lun == l);
- break;
- }
- if (!match)
- continue;
+ while (wait_iter--) {
+ found = false;
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- status = qla2x00_eh_wait_on_command(cmd);
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (!sp)
+ continue;
+ if (sp->type != SRB_SCSI_CMD)
+ continue;
+ if (vha->vp_idx != sp->vha->vp_idx)
+ continue;
+ match = 0;
+ cmd = GET_CMD_SP(sp);
+ switch (type) {
+ case WAIT_HOST:
+ match = 1;
+ break;
+ case WAIT_TARGET:
+ if (sp->fcport)
+ match = sp->fcport->d_id.b24 == t;
+ else
+ match = 0;
+ break;
+ case WAIT_LUN:
+ if (sp->fcport)
+ match = (sp->fcport->d_id.b24 == t &&
+ cmd->device->lun == l);
+ else
+ match = 0;
+ break;
+ }
+ if (!match)
+ continue;
+
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ if (unlikely(pci_channel_offline(ha->pdev)) ||
+ ha->flags.eeh_busy) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8005,
+ "Return:eh_wait.\n");
+ return status;
+ }
+
+ /*
+ * SRB_SCSI_CMD is still in the outstanding_cmds array.
+ * it means scsi_done has not called. Wait for it to
+ * clear from outstanding_cmds.
+ */
+ msleep(ABORT_POLLING_PERIOD);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ found = true;
+ }
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ if (!found)
+ break;
}
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ if (!wait_iter && found)
+ status = QLA_FUNCTION_FAILED;
return status;
}
@@ -1858,6 +1863,17 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
+ /*
+ * perform lockless completion during driver unload
+ */
+ if (qla2x00_chip_is_down(vha)) {
+ req->outstanding_cmds[cnt] = NULL;
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ sp->done(sp, res);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ continue;
+ }
+
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
@@ -2940,9 +2956,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql2xallocfwdump = 0;
}
- /* This may fail but that's ok */
- pci_enable_pcie_error_reporting(pdev);
-
ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
if (!ha) {
ql_log_pci(ql_log_fatal, pdev, 0x0009,
@@ -3599,6 +3612,7 @@ skip_dpc:
probe_failed:
qla_enode_stop(base_vha);
qla_edb_stop(base_vha);
+ vfree(base_vha->scan.l);
if (base_vha->gnl.l) {
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
base_vha->gnl.l, base_vha->gnl.ldma);
@@ -3949,8 +3963,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
@@ -4085,7 +4097,8 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
"Mark all dev lost\n");
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->loop_id != FC_NO_LOOP_ID &&
+ if (ql2xfc2target &&
+ fcport->loop_id != FC_NO_LOOP_ID &&
(fcport->flags & FCF_FCP2_DEVICE) &&
fcport->port_type == FCT_TARGET &&
!qla2x00_reset_active(vha)) {
@@ -5004,8 +5017,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->vp_map = NULL;
}
-struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
- struct qla_hw_data *ha)
+struct scsi_qla_host *qla2x00_create_host(const struct scsi_host_template *sht,
+ struct qla_hw_data *ha)
{
struct Scsi_Host *host;
struct scsi_qla_host *vha = NULL;
@@ -6825,7 +6838,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
qla2x00_unmap_iobases(ha);
pci_release_selected_regions(ha->pdev, ha->bars);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
/*
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index aa0cf5ca6c1c..5258b07687a9 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -6395,8 +6395,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
return -ENOMEM;
}
- if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
- base_vha->host->hostt->supported_mode |= MODE_TARGET;
+ qla2xxx_driver_template.supported_mode |= MODE_TARGET;
rc = btree_init64(&tgt->lun_qpair_map);
if (rc) {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 42d69d89834f..4d6f06fb156b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.08.200-k"
+#define QLA2XXX_VERSION "10.02.08.300-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 8
-#define QLA_DRIVER_BETA_VER 200
+#define QLA_DRIVER_BETA_VER 300
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 8024322c9c5a..3b5ba4b47b3b 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -377,11 +377,6 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
tcm_qla2xxx_put_sess(sess);
}
-static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
-{
- return 0;
-}
-
static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
@@ -421,11 +416,6 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
return qlt_rdy_to_xfer(cmd);
}
-static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
-{
- return;
-}
-
static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
{
if (!(se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
@@ -1811,10 +1801,8 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
.close_session = tcm_qla2xxx_close_session,
- .sess_get_index = tcm_qla2xxx_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = tcm_qla2xxx_write_pending,
- .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
.get_cmd_state = tcm_qla2xxx_get_cmd_state,
.queue_data_in = tcm_qla2xxx_queue_data_in,
.queue_status = tcm_qla2xxx_queue_status,
@@ -1852,10 +1840,8 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
.close_session = tcm_qla2xxx_close_session,
- .sess_get_index = tcm_qla2xxx_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = tcm_qla2xxx_write_pending,
- .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
.get_cmd_state = tcm_qla2xxx_get_cmd_state,
.queue_data_in = tcm_qla2xxx_queue_data_in,
.queue_status = tcm_qla2xxx_queue_status,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 5f82c8afd5e0..5e683ba49fa5 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
-#include <linux/aer.h>
#include <linux/bsg-lib.h>
#include <linux/vmalloc.h>
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 6f0e77dc2a34..cf52258ecdde 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -472,14 +472,12 @@ static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
**/
void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
{
- uint32_t count = 0;
struct srb *srb = NULL;
struct status_entry *sts_entry;
/* Process all responses from response queue */
while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
sts_entry = (struct status_entry *) ha->response_ptr;
- count++;
/* Advance pointers for next entry */
if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index cd71074f3abe..249f1d7021d4 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1611,8 +1611,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
goto exit_get_chap;
}
- strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
- strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
+ strscpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
+ strscpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE);
exit_get_chap:
@@ -1732,8 +1732,8 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
goto exit_unlock_uni_chap;
}
- strlcpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
- strlcpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
+ strscpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
+ strscpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
rval = QLA_SUCCESS;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 005502125b27..b2a3988e1e15 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -798,9 +798,9 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
continue;
chap_rec->chap_tbl_idx = i;
- strlcpy(chap_rec->username, chap_table->name,
+ strscpy(chap_rec->username, chap_table->name,
ISCSI_CHAP_AUTH_NAME_MAX_LEN);
- strlcpy(chap_rec->password, chap_table->secret,
+ strscpy(chap_rec->password, chap_table->secret,
QL4_CHAP_MAX_SECRET_LEN);
chap_rec->password_length = chap_table->secret_len;
@@ -6052,8 +6052,8 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
if (!(chap_table->flags & BIT_6)) /* Not BIDI */
continue;
- strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
- strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
+ strscpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
+ strscpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
ret = 0;
break;
}
@@ -6281,8 +6281,8 @@ static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
tddb->tpgt = sess->tpgt;
tddb->port = conn->persistent_port;
- strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
- strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+ strscpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+ strscpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
}
static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
@@ -7781,7 +7781,7 @@ static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
goto exit_ddb_logout;
}
- strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname,
+ strscpy(flash_tddb->iscsi_name, fnode_sess->targetname,
ISCSI_NAME_SIZE);
if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
@@ -8639,8 +8639,6 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->host_no = host->host_no;
ha->func_num = PCI_FUNC(ha->pdev->devfn);
- pci_enable_pcie_error_reporting(pdev);
-
/* Setup Runtime configurable options */
if (is_qla8022(ha)) {
ha->isp_ops = &qla4_82xx_isp_ops;
@@ -8867,7 +8865,6 @@ probe_failed:
qla4xxx_free_adapter(ha);
probe_failed_ioconfig:
- pci_disable_pcie_error_reporting(pdev);
scsi_host_put(ha->host);
probe_disable_device:
@@ -9022,7 +9019,6 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev)
scsi_host_put(ha->host);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 8c961ff03fcd..1e8fbd457248 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -513,7 +513,7 @@ static int qlogicpti_load_firmware(struct qlogicpti *qpti)
qpti->qpti_id);
err = 1;
goto out;
- }
+ }
sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
@@ -563,7 +563,7 @@ static int qlogicpti_load_firmware(struct qlogicpti *qpti)
qpti->qpti_id);
err = 1;
goto out;
- }
+ }
/* Load it up.. */
for (i = 0; i < risc_code_length; i++) {
@@ -1136,7 +1136,7 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT))
return NULL;
-
+
in_ptr = sbus_readw(qpti->qregs + MBOX5);
sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL);
if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) {
@@ -1287,7 +1287,7 @@ static int qlogicpti_reset(struct scsi_cmnd *Cmnd)
return return_status;
}
-static struct scsi_host_template qpti_template = {
+static const struct scsi_host_template qpti_template = {
.module = THIS_MODULE,
.name = "qlogicpti",
.info = qlogicpti_info,
@@ -1362,9 +1362,8 @@ static int qpti_sbus_probe(struct platform_device *op)
fcode = of_get_property(dp, "isp-fcode", NULL);
if (fcode && fcode[0])
printk("(FCode %s)", fcode);
- if (of_find_property(dp, "differential", NULL) != NULL)
- qpti->differential = 1;
-
+ qpti->differential = of_property_read_bool(dp, "differential");
+
printk("\nqlogicpti%d: [%s Wide, using %s interface]\n",
qpti->qpti_id,
(qpti->ultra ? "Ultra" : "Fast"),
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 7d2210a006f0..09ef0b31dfc0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -314,11 +314,18 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
if (result)
return -EIO;
- /* Sanity check that we got the page back that we asked for */
+ /*
+ * Sanity check that we got the page back that we asked for and that
+ * the page size is not 0.
+ */
if (buffer[1] != page)
return -EIO;
- return get_unaligned_be16(&buffer[2]) + 4;
+ result = get_unaligned_be16(&buffer[2]);
+ if (!result)
+ return -EIO;
+
+ return result + 4;
}
static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
@@ -326,6 +333,9 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
int result;
+ if (sdev->no_vpd_size)
+ return SCSI_DEFAULT_VPD_LEN;
+
/*
* Fetch the VPD page header to find out how big the page
* is. This is done to prevent problems on legacy devices
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 8553277effb3..8c58128ad32a 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -250,6 +250,11 @@ static const char *sdebug_version_date = "20210520";
#define SDEB_XA_NOT_IN_USE XA_MARK_1
+static struct kmem_cache *queued_cmd_cache;
+
+#define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
+#define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
+
/* Zone types (zbcr05 table 25) */
enum sdebug_z_type {
ZBC_ZTYPE_CNV = 0x1,
@@ -288,7 +293,6 @@ struct sdebug_dev_info {
uuid_t lu_name;
struct sdebug_host_info *sdbg_host;
unsigned long uas_bm[1];
- atomic_t num_in_q;
atomic_t stopped; /* 1: by SSU, 2: device start */
bool used;
@@ -324,9 +328,12 @@ struct sdeb_store_info {
void *map_storep; /* provisioning map */
};
-#define to_sdebug_host(d) \
+#define dev_to_sdebug_host(d) \
container_of(d, struct sdebug_host_info, dev)
+#define shost_to_sdebug_host(shost) \
+ dev_to_sdebug_host(shost->dma_dev)
+
enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
@@ -334,13 +341,7 @@ struct sdebug_defer {
struct hrtimer hrt;
struct execute_work ew;
ktime_t cmpl_ts;/* time since boot to complete this cmd */
- int sqa_idx; /* index of sdebug_queue array */
- int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
- int hc_idx; /* hostwide tag index */
int issuing_cpu;
- bool init_hrt;
- bool init_wq;
- bool init_poll;
bool aborted; /* true when blk_abort_request() already called */
enum sdeb_defer_type defer_t;
};
@@ -349,15 +350,12 @@ struct sdebug_queued_cmd {
/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
* instance indicates this slot is in use.
*/
- struct sdebug_defer *sd_dp;
- struct scsi_cmnd *a_cmnd;
+ struct sdebug_defer sd_dp;
+ struct scsi_cmnd *scmd;
};
-struct sdebug_queue {
- struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
- unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
- spinlock_t qc_lock;
- atomic_t blocked; /* to temporarily stop more being queued */
+struct sdebug_scsi_cmd {
+ spinlock_t lock;
};
static atomic_t sdebug_cmnd_count; /* number of incoming commands */
@@ -507,6 +505,8 @@ static int sdebug_add_store(void);
static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
static void sdebug_erase_all_stores(bool apart_from_first);
+static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
+
/*
* The following are overflow arrays for cdbs that "hit" the same index in
* the opcode_info_arr array. The most time sensitive (or commonly used) cdb
@@ -754,7 +754,6 @@ static int sdebug_max_luns = DEF_MAX_LUNS;
static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
-static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
static int sdebug_no_lun_0 = DEF_NO_LUN_0;
static int sdebug_no_uld;
@@ -814,7 +813,7 @@ static int sdebug_cylinders_per; /* cylinders per surface */
static int sdebug_sectors_per; /* sectors per cylinder */
static LIST_HEAD(sdebug_host_list);
-static DEFINE_SPINLOCK(sdebug_host_list_lock);
+static DEFINE_MUTEX(sdebug_host_list_mutex);
static struct xarray per_store_arr;
static struct xarray *per_store_ap = &per_store_arr;
@@ -841,7 +840,6 @@ static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
static int poll_queues; /* iouring iopoll interface.*/
-static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
static DEFINE_RWLOCK(atomic_rw);
static DEFINE_RWLOCK(atomic_rw2);
@@ -906,7 +904,7 @@ static void sdebug_max_tgts_luns(void)
struct sdebug_host_info *sdbg_host;
struct Scsi_Host *hpnt;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
hpnt = sdbg_host->shost;
if ((hpnt->this_id >= 0) &&
@@ -917,7 +915,7 @@ static void sdebug_max_tgts_luns(void)
/* sdebug_max_luns; */
hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
@@ -1049,30 +1047,27 @@ static void all_config_cdb_len(void)
struct Scsi_Host *shost;
struct scsi_device *sdev;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
shost = sdbg_host->shost;
shost_for_each_device(sdev, shost) {
config_cdb_len(sdev);
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
{
- struct sdebug_host_info *sdhp;
+ struct sdebug_host_info *sdhp = devip->sdbg_host;
struct sdebug_dev_info *dp;
- spin_lock(&sdebug_host_list_lock);
- list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
- list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
- if ((devip->sdbg_host == dp->sdbg_host) &&
- (devip->target == dp->target))
- clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
+ list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
+ if ((devip->sdbg_host == dp->sdbg_host) &&
+ (devip->target == dp->target)) {
+ clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
}
}
- spin_unlock(&sdebug_host_list_lock);
}
static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
@@ -4899,20 +4894,6 @@ fini:
return res;
}
-static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
-{
- u16 hwq;
- u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
-
- hwq = blk_mq_unique_tag_to_hwq(tag);
-
- pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
- if (WARN_ON_ONCE(hwq >= submit_queues))
- hwq = 0;
-
- return sdebug_q_arr + hwq;
-}
-
static u32 get_tag(struct scsi_cmnd *cmnd)
{
return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
@@ -4921,75 +4902,41 @@ static u32 get_tag(struct scsi_cmnd *cmnd)
/* Queued (deferred) command completions converge here. */
static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{
- bool aborted = sd_dp->aborted;
- int qc_idx;
- int retiring = 0;
- unsigned long iflags;
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
- struct scsi_cmnd *scp;
- struct sdebug_dev_info *devip;
+ struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
+ unsigned long flags;
+ struct scsi_cmnd *scp = sqcp->scmd;
+ struct sdebug_scsi_cmd *sdsc;
+ bool aborted;
- if (unlikely(aborted))
- sd_dp->aborted = false;
- qc_idx = sd_dp->qc_idx;
- sqp = sdebug_q_arr + sd_dp->sqa_idx;
if (sdebug_statistics) {
atomic_inc(&sdebug_completions);
if (raw_smp_processor_id() != sd_dp->issuing_cpu)
atomic_inc(&sdebug_miss_cpus);
}
- if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
- pr_err("wild qc_idx=%d\n", qc_idx);
- return;
- }
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
- sqcp = &sqp->qc_arr[qc_idx];
- scp = sqcp->a_cmnd;
- if (unlikely(scp == NULL)) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
- sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
- return;
- }
- devip = (struct sdebug_dev_info *)scp->device->hostdata;
- if (likely(devip))
- atomic_dec(&devip->num_in_q);
- else
- pr_err("devip=NULL\n");
- if (unlikely(atomic_read(&retired_max_queue) > 0))
- retiring = 1;
-
- sqcp->a_cmnd = NULL;
- if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- pr_err("Unexpected completion\n");
- return;
+
+ if (!scp) {
+ pr_err("scmd=NULL\n");
+ goto out;
}
- if (unlikely(retiring)) { /* user has reduced max_queue */
- int k, retval;
+ sdsc = scsi_cmd_priv(scp);
+ spin_lock_irqsave(&sdsc->lock, flags);
+ aborted = sd_dp->aborted;
+ if (unlikely(aborted))
+ sd_dp->aborted = false;
+ ASSIGN_QUEUED_CMD(scp, NULL);
+
+ spin_unlock_irqrestore(&sdsc->lock, flags);
- retval = atomic_read(&retired_max_queue);
- if (qc_idx >= retval) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- pr_err("index %d too large\n", retval);
- return;
- }
- k = find_last_bit(sqp->in_use_bm, retval);
- if ((k < sdebug_max_queue) || (k == retval))
- atomic_set(&retired_max_queue, 0);
- else
- atomic_set(&retired_max_queue, k + 1);
- }
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (unlikely(aborted)) {
- if (sdebug_verbose)
- pr_info("bypassing scsi_done() due to aborted cmd\n");
- return;
+ if (aborted) {
+ pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
+ blk_abort_request(scsi_cmd_to_rq(scp));
+ goto out;
}
+
scsi_done(scp); /* callback to mid level */
+out:
+ sdebug_free_queued_cmd(sqcp);
}
/* When high resolution timer goes off this function is called. */
@@ -5152,7 +5099,6 @@ static struct sdebug_dev_info *sdebug_device_create(
} else {
devip->zmodel = BLK_ZONED_NONE;
}
- devip->sdbg_host = sdbg_host;
devip->create_ts = ktime_get_boottime();
atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
@@ -5166,11 +5112,7 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
struct sdebug_dev_info *open_devip = NULL;
struct sdebug_dev_info *devip;
- sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
- if (!sdbg_host) {
- pr_err("Host info NULL\n");
- return NULL;
- }
+ sdbg_host = shost_to_sdebug_host(sdev->host);
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if ((devip->used) && (devip->channel == sdev->channel) &&
@@ -5194,7 +5136,6 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
open_devip->target = sdev->id;
open_devip->lun = sdev->lun;
open_devip->sdbg_host = sdbg_host;
- atomic_set(&open_devip->num_in_q, 0);
set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
open_devip->used = true;
return open_devip;
@@ -5245,215 +5186,193 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
}
}
-static void stop_qc_helper(struct sdebug_defer *sd_dp,
+/* Returns true if we require the queued memory to be freed by the caller. */
+static bool stop_qc_helper(struct sdebug_defer *sd_dp,
enum sdeb_defer_type defer_t)
{
- if (!sd_dp)
- return;
- if (defer_t == SDEB_DEFER_HRT)
- hrtimer_cancel(&sd_dp->hrt);
- else if (defer_t == SDEB_DEFER_WQ)
- cancel_work_sync(&sd_dp->ew.work);
-}
+ if (defer_t == SDEB_DEFER_HRT) {
+ int res = hrtimer_try_to_cancel(&sd_dp->hrt);
-/* If @cmnd found deletes its timer or work queue and returns true; else
- returns false */
-static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
-{
- unsigned long iflags;
- int j, k, qmax, r_qmax;
- enum sdeb_defer_type l_defer_t;
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
- struct sdebug_dev_info *devip;
- struct sdebug_defer *sd_dp;
-
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- qmax = sdebug_max_queue;
- r_qmax = atomic_read(&retired_max_queue);
- if (r_qmax > qmax)
- qmax = r_qmax;
- for (k = 0; k < qmax; ++k) {
- if (test_bit(k, sqp->in_use_bm)) {
- sqcp = &sqp->qc_arr[k];
- if (cmnd != sqcp->a_cmnd)
- continue;
- /* found */
- devip = (struct sdebug_dev_info *)
- cmnd->device->hostdata;
- if (devip)
- atomic_dec(&devip->num_in_q);
- sqcp->a_cmnd = NULL;
- sd_dp = sqcp->sd_dp;
- if (sd_dp) {
- l_defer_t = READ_ONCE(sd_dp->defer_t);
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
- } else
- l_defer_t = SDEB_DEFER_NONE;
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp, l_defer_t);
- clear_bit(k, sqp->in_use_bm);
- return true;
- }
+ switch (res) {
+ case 0: /* Not active, it must have already run */
+ case -1: /* -1 It's executing the CB */
+ return false;
+ case 1: /* Was active, we've now cancelled */
+ default:
+ return true;
}
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ } else if (defer_t == SDEB_DEFER_WQ) {
+ /* Cancel if pending */
+ if (cancel_work_sync(&sd_dp->ew.work))
+ return true;
+ /* Was not pending, so it must have run */
+ return false;
+ } else if (defer_t == SDEB_DEFER_POLL) {
+ return true;
}
+
return false;
}
-/* Deletes (stops) timers or work queues of all queued commands */
-static void stop_all_queued(void)
+
+static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
{
- unsigned long iflags;
- int j, k;
enum sdeb_defer_type l_defer_t;
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
- struct sdebug_dev_info *devip;
struct sdebug_defer *sd_dp;
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
- if (test_bit(k, sqp->in_use_bm)) {
- sqcp = &sqp->qc_arr[k];
- if (sqcp->a_cmnd == NULL)
- continue;
- devip = (struct sdebug_dev_info *)
- sqcp->a_cmnd->device->hostdata;
- if (devip)
- atomic_dec(&devip->num_in_q);
- sqcp->a_cmnd = NULL;
- sd_dp = sqcp->sd_dp;
- if (sd_dp) {
- l_defer_t = READ_ONCE(sd_dp->defer_t);
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
- } else
- l_defer_t = SDEB_DEFER_NONE;
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp, l_defer_t);
- clear_bit(k, sqp->in_use_bm);
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- }
- }
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- }
+ lockdep_assert_held(&sdsc->lock);
+
+ if (!sqcp)
+ return false;
+ sd_dp = &sqcp->sd_dp;
+ l_defer_t = READ_ONCE(sd_dp->defer_t);
+ ASSIGN_QUEUED_CMD(cmnd, NULL);
+
+ if (stop_qc_helper(sd_dp, l_defer_t))
+ sdebug_free_queued_cmd(sqcp);
+
+ return true;
}
-/* Free queued command memory on heap */
-static void free_all_queued(void)
+/*
+ * Called from scsi_debug_abort() only, which is for timed-out cmd.
+ */
+static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
{
- int j, k;
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ unsigned long flags;
+ bool res;
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
- for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
- sqcp = &sqp->qc_arr[k];
- kfree(sqcp->sd_dp);
- sqcp->sd_dp = NULL;
- }
+ spin_lock_irqsave(&sdsc->lock, flags);
+ res = scsi_debug_stop_cmnd(cmnd);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+
+ return res;
+}
+
+/*
+ * All we can do is set the cmnd as internally aborted and wait for it to
+ * finish. We cannot call scsi_done() as normal completion path may do that.
+ */
+static bool sdebug_stop_cmnd(struct request *rq, void *data)
+{
+ scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
+
+ return true;
+}
+
+/* Deletes (stops) timers or work queues of all queued commands */
+static void stop_all_queued(void)
+{
+ struct sdebug_host_info *sdhp;
+
+ mutex_lock(&sdebug_host_list_mutex);
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ struct Scsi_Host *shost = sdhp->shost;
+
+ blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
}
+ mutex_unlock(&sdebug_host_list_mutex);
}
static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
{
- bool ok;
+ bool ok = scsi_debug_abort_cmnd(SCpnt);
++num_aborts;
- if (SCpnt) {
- ok = stop_queued_cmnd(SCpnt);
- if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
- sdev_printk(KERN_INFO, SCpnt->device,
- "%s: command%s found\n", __func__,
- ok ? "" : " not");
- }
+
+ if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
+ sdev_printk(KERN_INFO, SCpnt->device,
+ "%s: command%s found\n", __func__,
+ ok ? "" : " not");
+
return SUCCESS;
}
+static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
+{
+ struct scsi_device *sdp = data;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+
+ if (scmd->device == sdp)
+ scsi_debug_abort_cmnd(scmd);
+
+ return true;
+}
+
+/* Deletes (stops) timers or work queues of all queued commands per sdev */
+static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
+{
+ struct Scsi_Host *shost = sdp->host;
+
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ scsi_debug_stop_all_queued_iter, sdp);
+}
+
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
{
+ struct scsi_device *sdp = SCpnt->device;
+ struct sdebug_dev_info *devip = sdp->hostdata;
+
++num_dev_resets;
- if (SCpnt && SCpnt->device) {
- struct scsi_device *sdp = SCpnt->device;
- struct sdebug_dev_info *devip =
- (struct sdebug_dev_info *)sdp->hostdata;
- if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
- sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
- if (devip)
- set_bit(SDEBUG_UA_POR, devip->uas_bm);
- }
+ if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
+ sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
+
+ scsi_debug_stop_all_queued(sdp);
+ if (devip)
+ set_bit(SDEBUG_UA_POR, devip->uas_bm);
+
return SUCCESS;
}
static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
{
- struct sdebug_host_info *sdbg_host;
+ struct scsi_device *sdp = SCpnt->device;
+ struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
struct sdebug_dev_info *devip;
- struct scsi_device *sdp;
- struct Scsi_Host *hp;
int k = 0;
++num_target_resets;
- if (!SCpnt)
- goto lie;
- sdp = SCpnt->device;
- if (!sdp)
- goto lie;
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
- hp = sdp->host;
- if (!hp)
- goto lie;
- sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
- if (sdbg_host) {
- list_for_each_entry(devip,
- &sdbg_host->dev_info_list,
- dev_list)
- if (devip->target == sdp->id) {
- set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
- ++k;
- }
+
+ list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
+ if (devip->target == sdp->id) {
+ set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ ++k;
+ }
}
+
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp,
"%s: %d device(s) found in target\n", __func__, k);
-lie:
+
return SUCCESS;
}
static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
{
- struct sdebug_host_info *sdbg_host;
+ struct scsi_device *sdp = SCpnt->device;
+ struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
struct sdebug_dev_info *devip;
- struct scsi_device *sdp;
- struct Scsi_Host *hp;
int k = 0;
++num_bus_resets;
- if (!(SCpnt && SCpnt->device))
- goto lie;
- sdp = SCpnt->device;
+
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
- hp = sdp->host;
- if (hp) {
- sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
- if (sdbg_host) {
- list_for_each_entry(devip,
- &sdbg_host->dev_info_list,
- dev_list) {
- set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
- ++k;
- }
- }
+
+ list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
+ set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ ++k;
}
+
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp,
"%s: %d device(s) found in host\n", __func__, k);
-lie:
return SUCCESS;
}
@@ -5464,9 +5383,9 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
int k = 0;
++num_host_resets;
- if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
+ if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
@@ -5474,7 +5393,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
++k;
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
stop_all_queued();
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
@@ -5537,11 +5456,18 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
static void block_unblock_all_queues(bool block)
{
- int j;
- struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
+
+ lockdep_assert_held(&sdebug_host_list_mutex);
+
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ struct Scsi_Host *shost = sdhp->shost;
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
- atomic_set(&sqp->blocked, (int)block);
+ if (block)
+ scsi_block_requests(shost);
+ else
+ scsi_unblock_requests(shost);
+ }
}
/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
@@ -5554,10 +5480,13 @@ static void tweak_cmnd_count(void)
modulo = abs(sdebug_every_nth);
if (modulo < 2)
return;
+
+ mutex_lock(&sdebug_host_list_mutex);
block_unblock_all_queues(true);
count = atomic_read(&sdebug_cmnd_count);
atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
block_unblock_all_queues(false);
+ mutex_unlock(&sdebug_host_list_mutex);
}
static void clear_queue_stats(void)
@@ -5577,6 +5506,33 @@ static bool inject_on_this_cmd(void)
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
+
+void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
+{
+ if (sqcp)
+ kmem_cache_free(queued_cmd_cache, sqcp);
+}
+
+static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
+{
+ struct sdebug_queued_cmd *sqcp;
+ struct sdebug_defer *sd_dp;
+
+ sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
+ if (!sqcp)
+ return NULL;
+
+ sd_dp = &sqcp->sd_dp;
+
+ hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
+ INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
+
+ sqcp->scmd = scmd;
+
+ return sqcp;
+}
+
/* Complete the processing of the thread that queued a SCSI command to this
* driver. It either completes the command by calling cmnd_done() or
* schedules a hr timer or work queue then returns 0. Returns
@@ -5588,13 +5544,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
struct sdebug_dev_info *),
int delta_jiff, int ndelay)
{
- bool new_sd_dp;
- bool inject = false;
- bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
- int k, num_in_q, qdepth;
- unsigned long iflags;
+ struct request *rq = scsi_cmd_to_rq(cmnd);
+ bool polled = rq->cmd_flags & REQ_POLLED;
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ unsigned long flags;
u64 ns_from_boot = 0;
- struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct scsi_device *sdp;
struct sdebug_defer *sd_dp;
@@ -5609,66 +5563,30 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (delta_jiff == 0)
goto respond_in_thread;
- sqp = get_queue(cmnd);
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- if (unlikely(atomic_read(&sqp->blocked))) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
- num_in_q = atomic_read(&devip->num_in_q);
- qdepth = cmnd->device->queue_depth;
- if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
- if (scsi_result) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- goto respond_in_thread;
- } else
- scsi_result = device_qfull_result;
- } else if (unlikely(sdebug_every_nth &&
- (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
- (scsi_result == 0))) {
- if ((num_in_q == (qdepth - 1)) &&
+
+ if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
+ (scsi_result == 0))) {
+ int num_in_q = scsi_device_busy(sdp);
+ int qdepth = cmnd->device->queue_depth;
+
+ if ((num_in_q == qdepth) &&
(atomic_inc_return(&sdebug_a_tsf) >=
abs(sdebug_every_nth))) {
atomic_set(&sdebug_a_tsf, 0);
- inject = true;
scsi_result = device_qfull_result;
- }
- }
- k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
- if (unlikely(k >= sdebug_max_queue)) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (scsi_result)
- goto respond_in_thread;
- scsi_result = device_qfull_result;
- if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
- sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
- __func__, sdebug_max_queue);
- goto respond_in_thread;
- }
- set_bit(k, sqp->in_use_bm);
- atomic_inc(&devip->num_in_q);
- sqcp = &sqp->qc_arr[k];
- sqcp->a_cmnd = cmnd;
- cmnd->host_scribble = (unsigned char *)sqcp;
- sd_dp = sqcp->sd_dp;
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
-
- if (!sd_dp) {
- sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
- if (!sd_dp) {
- atomic_dec(&devip->num_in_q);
- clear_bit(k, sqp->in_use_bm);
- return SCSI_MLQUEUE_HOST_BUSY;
+ if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
+ sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
+ __func__, num_in_q);
}
- new_sd_dp = true;
- } else {
- new_sd_dp = false;
}
- /* Set the hostwide tag */
- if (sdebug_host_max_queue)
- sd_dp->hc_idx = get_tag(cmnd);
+ sqcp = sdebug_alloc_queued_cmd(cmnd);
+ if (!sqcp) {
+ pr_err("%s no alloc\n", __func__);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ sd_dp = &sqcp->sd_dp;
if (polled)
ns_from_boot = ktime_get_boottime_ns();
@@ -5715,14 +5633,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
u64 d = ktime_get_boottime_ns() - ns_from_boot;
if (kt <= d) { /* elapsed duration >= kt */
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- sqcp->a_cmnd = NULL;
- atomic_dec(&devip->num_in_q);
- clear_bit(k, sqp->in_use_bm);
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (new_sd_dp)
- kfree(sd_dp);
/* call scsi_done() from this thread */
+ sdebug_free_queued_cmd(sqcp);
scsi_done(cmnd);
return 0;
}
@@ -5730,72 +5642,54 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
kt -= d;
}
}
+ if (sdebug_statistics)
+ sd_dp->issuing_cpu = raw_smp_processor_id();
if (polled) {
+ spin_lock_irqsave(&sdsc->lock, flags);
sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- if (!sd_dp->init_poll) {
- sd_dp->init_poll = true;
- sqcp->sd_dp = sd_dp;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- }
+ ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
- if (!sd_dp->init_hrt) {
- sd_dp->init_hrt = true;
- sqcp->sd_dp = sd_dp;
- hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- }
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
/* schedule the invocation of scsi_done() for a later time */
+ spin_lock_irqsave(&sdsc->lock, flags);
+ ASSIGN_QUEUED_CMD(cmnd, sqcp);
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
+ /*
+ * The completion handler will try to grab sqcp->lock,
+ * so there is no chance that the completion handler
+ * will call scsi_done() until we release the lock
+ * here (so ok to keep referencing sdsc).
+ */
+ spin_unlock_irqrestore(&sdsc->lock, flags);
}
- if (sdebug_statistics)
- sd_dp->issuing_cpu = raw_smp_processor_id();
} else { /* jdelay < 0, use work queue */
if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
- atomic_read(&sdeb_inject_pending)))
+ atomic_read(&sdeb_inject_pending))) {
sd_dp->aborted = true;
+ atomic_set(&sdeb_inject_pending, 0);
+ sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
+ blk_mq_unique_tag_to_tag(get_tag(cmnd)));
+ }
+
+ if (sdebug_statistics)
+ sd_dp->issuing_cpu = raw_smp_processor_id();
if (polled) {
+ spin_lock_irqsave(&sdsc->lock, flags);
+ ASSIGN_QUEUED_CMD(cmnd, sqcp);
sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- if (!sd_dp->init_poll) {
- sd_dp->init_poll = true;
- sqcp->sd_dp = sd_dp;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- }
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
- if (!sd_dp->init_wq) {
- sd_dp->init_wq = true;
- sqcp->sd_dp = sd_dp;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
- }
+ spin_lock_irqsave(&sdsc->lock, flags);
+ ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
schedule_work(&sd_dp->ew.work);
- }
- if (sdebug_statistics)
- sd_dp->issuing_cpu = raw_smp_processor_id();
- if (unlikely(sd_dp->aborted)) {
- sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
- scsi_cmd_to_rq(cmnd)->tag);
- blk_abort_request(scsi_cmd_to_rq(cmnd));
- atomic_set(&sdeb_inject_pending, 0);
- sd_dp->aborted = false;
+ spin_unlock_irqrestore(&sdsc->lock, flags);
}
}
- if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
- sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
- num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
+
return 0;
respond_in_thread: /* call back to mid-layer using invocation thread */
@@ -5996,14 +5890,39 @@ static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
return length;
}
+struct sdebug_submit_queue_data {
+ int *first;
+ int *last;
+ int queue_num;
+};
+
+static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
+{
+ struct sdebug_submit_queue_data *data = opaque;
+ u32 unique_tag = blk_mq_unique_tag(rq);
+ u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
+ u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
+ int queue_num = data->queue_num;
+
+ if (hwq != queue_num)
+ return true;
+
+ /* Rely on iter'ing in ascending tag order */
+ if (*data->first == -1)
+ *data->first = *data->last = tag;
+ else
+ *data->last = tag;
+
+ return true;
+}
+
/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
* same for each scsi_debug host (if more than one). Some of the counters
* output are not atomics so might be inaccurate in a busy system. */
static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
{
- int f, j, l;
- struct sdebug_queue *sqp;
struct sdebug_host_info *sdhp;
+ int j;
seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
SDEBUG_VERSION, sdebug_version_date);
@@ -6031,11 +5950,17 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
atomic_read(&sdeb_mq_poll_count));
seq_printf(m, "submit_queues=%d\n", submit_queues);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
+ for (j = 0; j < submit_queues; ++j) {
+ int f = -1, l = -1;
+ struct sdebug_submit_queue_data data = {
+ .queue_num = j,
+ .first = &f,
+ .last = &l,
+ };
seq_printf(m, " queue %d:\n", j);
- f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
- if (f != sdebug_max_queue) {
- l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
+ blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
+ &data);
+ if (f >= 0) {
seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
"first,last bits", f, l);
}
@@ -6086,15 +6011,15 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
res = count;
if (sdebug_jdelay != jdelay) {
- int j, k;
- struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
+ mutex_lock(&sdebug_host_list_mutex);
block_unblock_all_queues(true);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
- ++j, ++sqp) {
- k = find_first_bit(sqp->in_use_bm,
- sdebug_max_queue);
- if (k != sdebug_max_queue) {
+
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ struct Scsi_Host *shost = sdhp->shost;
+
+ if (scsi_host_busy(shost)) {
res = -EBUSY; /* queued commands */
break;
}
@@ -6104,6 +6029,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
sdebug_ndelay = 0;
}
block_unblock_all_queues(false);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return res;
}
@@ -6126,25 +6052,27 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
(ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
res = count;
if (sdebug_ndelay != ndelay) {
- int j, k;
- struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
+ mutex_lock(&sdebug_host_list_mutex);
block_unblock_all_queues(true);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
- ++j, ++sqp) {
- k = find_first_bit(sqp->in_use_bm,
- sdebug_max_queue);
- if (k != sdebug_max_queue) {
+
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ struct Scsi_Host *shost = sdhp->shost;
+
+ if (scsi_host_busy(shost)) {
res = -EBUSY; /* queued commands */
break;
}
}
+
if (res > 0) {
sdebug_ndelay = ndelay;
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
}
block_unblock_all_queues(false);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return res;
}
@@ -6390,13 +6318,13 @@ static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
struct sdebug_host_info *sdhp;
struct sdebug_dev_info *dp;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return count;
}
@@ -6426,7 +6354,7 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
struct sdebug_host_info *sdhp;
struct sdebug_dev_info *dp;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdhp, &sdebug_host_list,
host_list) {
list_for_each_entry(dp, &sdhp->dev_info_list,
@@ -6435,7 +6363,7 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
dp->uas_bm);
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return count;
}
@@ -6452,28 +6380,19 @@ static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int j, n, k, a;
- struct sdebug_queue *sqp;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
(n <= SDEBUG_CANQUEUE) &&
(sdebug_host_max_queue == 0)) {
- block_unblock_all_queues(true);
- k = 0;
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
- ++j, ++sqp) {
- a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
- if (a > k)
- k = a;
- }
- sdebug_max_queue = n;
- if (k == SDEBUG_CANQUEUE)
- atomic_set(&retired_max_queue, 0);
- else if (k >= n)
- atomic_set(&retired_max_queue, k + 1);
+ mutex_lock(&sdebug_host_list_mutex);
+
+ /* We may only change sdebug_max_queue when we have no shosts */
+ if (list_empty(&sdebug_host_list))
+ sdebug_max_queue = n;
else
- atomic_set(&retired_max_queue, 0);
- block_unblock_all_queues(false);
+ count = -EBUSY;
+ mutex_unlock(&sdebug_host_list_mutex);
return count;
}
return -EINVAL;
@@ -6542,7 +6461,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
struct sdebug_host_info *sdhp;
struct sdebug_dev_info *dp;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdhp, &sdebug_host_list,
host_list) {
list_for_each_entry(dp, &sdhp->dev_info_list,
@@ -6551,7 +6470,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
dp->uas_bm);
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return count;
}
@@ -6901,7 +6820,6 @@ static int __init scsi_debug_init(void)
ramdisk_lck_a[0] = &atomic_rw;
ramdisk_lck_a[1] = &atomic_rw2;
- atomic_set(&retired_max_queue, 0);
if (sdebug_ndelay >= 1000 * 1000 * 1000) {
pr_warn("ndelay must be less than 1 second, ignored\n");
@@ -6997,13 +6915,6 @@ static int __init scsi_debug_init(void)
sdebug_max_queue);
}
- sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
- GFP_KERNEL);
- if (sdebug_q_arr == NULL)
- return -ENOMEM;
- for (k = 0; k < submit_queues; ++k)
- spin_lock_init(&sdebug_q_arr[k].qc_lock);
-
/*
* check for host managed zoned block device specified with
* ptype=0x14 or zbc=XXX.
@@ -7012,10 +6923,8 @@ static int __init scsi_debug_init(void)
sdeb_zbc_model = BLK_ZONED_HM;
} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
k = sdeb_zbc_model_str(sdeb_zbc_model_s);
- if (k < 0) {
- ret = k;
- goto free_q_arr;
- }
+ if (k < 0)
+ return k;
sdeb_zbc_model = k;
switch (sdeb_zbc_model) {
case BLK_ZONED_NONE:
@@ -7027,8 +6936,7 @@ static int __init scsi_debug_init(void)
break;
default:
pr_err("Invalid ZBC model\n");
- ret = -EINVAL;
- goto free_q_arr;
+ return -EINVAL;
}
}
if (sdeb_zbc_model != BLK_ZONED_NONE) {
@@ -7075,17 +6983,14 @@ static int __init scsi_debug_init(void)
sdebug_unmap_granularity <=
sdebug_unmap_alignment) {
pr_err("ERR: unmap_granularity <= unmap_alignment\n");
- ret = -EINVAL;
- goto free_q_arr;
+ return -EINVAL;
}
}
xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
if (want_store) {
idx = sdebug_add_store();
- if (idx < 0) {
- ret = idx;
- goto free_q_arr;
- }
+ if (idx < 0)
+ return idx;
}
pseudo_primary = root_device_register("pseudo_0");
@@ -7108,6 +7013,12 @@ static int __init scsi_debug_init(void)
hosts_to_add = sdebug_add_host;
sdebug_add_host = 0;
+ queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
+ if (!queued_cmd_cache) {
+ ret = -ENOMEM;
+ goto driver_unreg;
+ }
+
for (k = 0; k < hosts_to_add; k++) {
if (want_store && k == 0) {
ret = sdebug_add_host_helper(idx);
@@ -7130,14 +7041,14 @@ static int __init scsi_debug_init(void)
return 0;
+driver_unreg:
+ driver_unregister(&sdebug_driverfs_driver);
bus_unreg:
bus_unregister(&pseudo_lld_bus);
dev_unreg:
root_device_unregister(pseudo_primary);
free_vm:
sdebug_erase_store(idx, NULL);
-free_q_arr:
- kfree(sdebug_q_arr);
return ret;
}
@@ -7145,17 +7056,15 @@ static void __exit scsi_debug_exit(void)
{
int k = sdebug_num_hosts;
- stop_all_queued();
for (; k; k--)
sdebug_do_remove_host(true);
- free_all_queued();
+ kmem_cache_destroy(queued_cmd_cache);
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
sdebug_erase_all_stores(false);
xa_destroy(per_store_ap);
- kfree(sdebug_q_arr);
}
device_initcall(scsi_debug_init);
@@ -7165,7 +7074,7 @@ static void sdebug_release_adapter(struct device *dev)
{
struct sdebug_host_info *sdbg_host;
- sdbg_host = to_sdebug_host(dev);
+ sdbg_host = dev_to_sdebug_host(dev);
kfree(sdbg_host);
}
@@ -7311,9 +7220,9 @@ static int sdebug_add_host_helper(int per_host_idx)
goto clean;
}
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
@@ -7322,9 +7231,9 @@ static int sdebug_add_host_helper(int per_host_idx)
error = device_register(&sdbg_host->dev);
if (error) {
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_del(&sdbg_host->host_list);
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
goto clean;
}
@@ -7364,7 +7273,7 @@ static void sdebug_do_remove_host(bool the_end)
struct sdebug_host_info *sdbg_host = NULL;
struct sdebug_host_info *sdbg_host2;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
if (!list_empty(&sdebug_host_list)) {
sdbg_host = list_entry(sdebug_host_list.prev,
struct sdebug_host_info, host_list);
@@ -7389,7 +7298,7 @@ static void sdebug_do_remove_host(bool the_end)
}
if (sdbg_host)
list_del(&sdbg_host->host_list);
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
if (!sdbg_host)
return;
@@ -7400,16 +7309,13 @@ static void sdebug_do_remove_host(bool the_end)
static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
{
- int num_in_q = 0;
- struct sdebug_dev_info *devip;
+ struct sdebug_dev_info *devip = sdev->hostdata;
- block_unblock_all_queues(true);
- devip = (struct sdebug_dev_info *)sdev->hostdata;
- if (NULL == devip) {
- block_unblock_all_queues(false);
+ if (!devip)
return -ENODEV;
- }
- num_in_q = atomic_read(&devip->num_in_q);
+
+ mutex_lock(&sdebug_host_list_mutex);
+ block_unblock_all_queues(true);
if (qdepth > SDEBUG_CANQUEUE) {
qdepth = SDEBUG_CANQUEUE;
@@ -7421,11 +7327,12 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
if (qdepth != sdev->queue_depth)
scsi_change_queue_depth(sdev, qdepth);
- if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
- sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
- __func__, qdepth, num_in_q);
- }
block_unblock_all_queues(false);
+ mutex_unlock(&sdebug_host_list_mutex);
+
+ if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
+ sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
+
return sdev->queue_depth;
}
@@ -7515,94 +7422,82 @@ static void sdebug_map_queues(struct Scsi_Host *shost)
}
}
-static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+struct sdebug_blk_mq_poll_data {
+ unsigned int queue_num;
+ int *num_entries;
+};
+
+/*
+ * We don't handle aborted commands here, but it does not seem possible to have
+ * aborted polled commands from schedule_resp()
+ */
+static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
{
- bool first;
- bool retiring = false;
- int num_entries = 0;
- unsigned int qc_idx = 0;
- unsigned long iflags;
- ktime_t kt_from_boot = ktime_get_boottime();
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
- struct scsi_cmnd *scp;
- struct sdebug_dev_info *devip;
+ struct sdebug_blk_mq_poll_data *data = opaque;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
struct sdebug_defer *sd_dp;
+ u32 unique_tag = blk_mq_unique_tag(rq);
+ u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
+ struct sdebug_queued_cmd *sqcp;
+ unsigned long flags;
+ int queue_num = data->queue_num;
+ ktime_t time;
- sqp = sdebug_q_arr + queue_num;
+ /* We're only interested in one queue for this iteration */
+ if (hwq != queue_num)
+ return true;
- spin_lock_irqsave(&sqp->qc_lock, iflags);
+ /* Subsequent checks would fail if this failed, but check anyway */
+ if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
+ return true;
- qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
- if (qc_idx >= sdebug_max_queue)
- goto unlock;
+ time = ktime_get_boottime();
- for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
- if (first) {
- first = false;
- if (!test_bit(qc_idx, sqp->in_use_bm))
- continue;
- } else {
- qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
- }
- if (qc_idx >= sdebug_max_queue)
- break;
+ spin_lock_irqsave(&sdsc->lock, flags);
+ sqcp = TO_QUEUED_CMD(cmd);
+ if (!sqcp) {
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+ return true;
+ }
- sqcp = &sqp->qc_arr[qc_idx];
- sd_dp = sqcp->sd_dp;
- if (unlikely(!sd_dp))
- continue;
- scp = sqcp->a_cmnd;
- if (unlikely(scp == NULL)) {
- pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
- queue_num, qc_idx, __func__);
- break;
- }
- if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
- if (kt_from_boot < sd_dp->cmpl_ts)
- continue;
+ sd_dp = &sqcp->sd_dp;
+ if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+ return true;
+ }
- } else /* ignoring non REQ_POLLED requests */
- continue;
- devip = (struct sdebug_dev_info *)scp->device->hostdata;
- if (likely(devip))
- atomic_dec(&devip->num_in_q);
- else
- pr_err("devip=NULL from %s\n", __func__);
- if (unlikely(atomic_read(&retired_max_queue) > 0))
- retiring = true;
-
- sqcp->a_cmnd = NULL;
- if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
- pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
- sqp, queue_num, qc_idx, __func__);
- break;
- }
- if (unlikely(retiring)) { /* user has reduced max_queue */
- int k, retval;
+ if (time < sd_dp->cmpl_ts) {
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+ return true;
+ }
- retval = atomic_read(&retired_max_queue);
- if (qc_idx >= retval) {
- pr_err("index %d too large\n", retval);
- break;
- }
- k = find_last_bit(sqp->in_use_bm, retval);
- if ((k < sdebug_max_queue) || (k == retval))
- atomic_set(&retired_max_queue, 0);
- else
- atomic_set(&retired_max_queue, k + 1);
- }
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- scsi_done(scp); /* callback to mid level */
- num_entries++;
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
- break;
+ ASSIGN_QUEUED_CMD(cmd, NULL);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+
+ if (sdebug_statistics) {
+ atomic_inc(&sdebug_completions);
+ if (raw_smp_processor_id() != sd_dp->issuing_cpu)
+ atomic_inc(&sdebug_miss_cpus);
}
-unlock:
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ sdebug_free_queued_cmd(sqcp);
+
+ scsi_done(cmd); /* callback to mid level */
+ (*data->num_entries)++;
+ return true;
+}
+
+static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ int num_entries = 0;
+ struct sdebug_blk_mq_poll_data data = {
+ .queue_num = queue_num,
+ .num_entries = &num_entries,
+ };
+
+ blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
+ &data);
if (num_entries > 0)
atomic_add(num_entries, &sdeb_mq_poll_count);
@@ -7776,6 +7671,16 @@ err_out:
return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
}
+static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
+
+ spin_lock_init(&sdsc->lock);
+
+ return 0;
+}
+
+
static struct scsi_host_template sdebug_driver_template = {
.show_info = scsi_debug_show_info,
.write_info = scsi_debug_write_info,
@@ -7803,6 +7708,8 @@ static struct scsi_host_template sdebug_driver_template = {
.max_segment_size = -1U,
.module = THIS_MODULE,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct sdebug_scsi_cmd),
+ .init_cmd_priv = sdebug_init_cmd_priv,
};
static int sdebug_driver_probe(struct device *dev)
@@ -7812,14 +7719,14 @@ static int sdebug_driver_probe(struct device *dev)
struct Scsi_Host *hpnt;
int hprot;
- sdbg_host = to_sdebug_host(dev);
+ sdbg_host = dev_to_sdebug_host(dev);
sdebug_driver_template.can_queue = sdebug_max_queue;
sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
if (!sdebug_clustering)
sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
- hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
+ hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
if (NULL == hpnt) {
pr_err("scsi_host_alloc failed\n");
error = -ENODEV;
@@ -7862,7 +7769,6 @@ static int sdebug_driver_probe(struct device *dev)
hpnt->nr_maps = 3;
sdbg_host->shost = hpnt;
- *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
hpnt->max_id = sdebug_num_tgts + 1;
else
@@ -7936,7 +7842,7 @@ static void sdebug_driver_remove(struct device *dev)
struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
- sdbg_host = to_sdebug_host(dev);
+ sdbg_host = dev_to_sdebug_host(dev);
scsi_remove_host(sdbg_host->shost);
@@ -7950,15 +7856,8 @@ static void sdebug_driver_remove(struct device *dev)
scsi_host_put(sdbg_host->shost);
}
-static int pseudo_lld_bus_match(struct device *dev,
- struct device_driver *dev_driver)
-{
- return 1;
-}
-
static struct bus_type pseudo_lld_bus = {
.name = "pseudo",
- .match = pseudo_lld_bus_match,
.probe = sdebug_driver_probe,
.remove = sdebug_driver_remove,
.drv_groups = sdebug_drv_groups,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c7080454aea9..3fcaf10a9dfe 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -134,7 +134,7 @@ static struct {
{"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
{"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
{"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
- {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES},
+ {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES | BLIST_NO_VPD_SIZE},
{"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
{"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
@@ -188,6 +188,7 @@ static struct {
{"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
{"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"IBM", "2076", NULL, BLIST_NO_VPD_SIZE},
{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
{"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
{"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
@@ -233,6 +234,7 @@ static struct {
{"SGI", "RAID5", "*", BLIST_SPARSELUN},
{"SGI", "TP9100", "*", BLIST_REPORTLUN2},
{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES},
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 2aa2c2aee6e7..3ec8bfd4090f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -58,7 +58,7 @@
#define HOST_RESET_SETTLE_TIME (10)
static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
-static enum scsi_disposition scsi_try_to_abort_cmd(struct scsi_host_template *,
+static enum scsi_disposition scsi_try_to_abort_cmd(const struct scsi_host_template *,
struct scsi_cmnd *);
void scsi_eh_wakeup(struct Scsi_Host *shost)
@@ -699,7 +699,7 @@ EXPORT_SYMBOL_GPL(scsi_check_sense);
static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
{
- struct scsi_host_template *sht = sdev->host->hostt;
+ const struct scsi_host_template *sht = sdev->host->hostt;
struct scsi_device *tmp_sdev;
if (!sht->track_queue_depth ||
@@ -731,7 +731,7 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
static void scsi_handle_queue_full(struct scsi_device *sdev)
{
- struct scsi_host_template *sht = sdev->host->hostt;
+ const struct scsi_host_template *sht = sdev->host->hostt;
struct scsi_device *tmp_sdev;
if (!sht->track_queue_depth)
@@ -840,7 +840,7 @@ static enum scsi_disposition scsi_try_host_reset(struct scsi_cmnd *scmd)
unsigned long flags;
enum scsi_disposition rtn;
struct Scsi_Host *host = scmd->device->host;
- struct scsi_host_template *hostt = host->hostt;
+ const struct scsi_host_template *hostt = host->hostt;
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, host, "Snd Host RST\n"));
@@ -870,7 +870,7 @@ static enum scsi_disposition scsi_try_bus_reset(struct scsi_cmnd *scmd)
unsigned long flags;
enum scsi_disposition rtn;
struct Scsi_Host *host = scmd->device->host;
- struct scsi_host_template *hostt = host->hostt;
+ const struct scsi_host_template *hostt = host->hostt;
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
"%s: Snd Bus RST\n", __func__));
@@ -912,7 +912,7 @@ static enum scsi_disposition scsi_try_target_reset(struct scsi_cmnd *scmd)
unsigned long flags;
enum scsi_disposition rtn;
struct Scsi_Host *host = scmd->device->host;
- struct scsi_host_template *hostt = host->hostt;
+ const struct scsi_host_template *hostt = host->hostt;
if (!hostt->eh_target_reset_handler)
return FAILED;
@@ -941,7 +941,7 @@ static enum scsi_disposition scsi_try_target_reset(struct scsi_cmnd *scmd)
static enum scsi_disposition scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
{
enum scsi_disposition rtn;
- struct scsi_host_template *hostt = scmd->device->host->hostt;
+ const struct scsi_host_template *hostt = scmd->device->host->hostt;
if (!hostt->eh_device_reset_handler)
return FAILED;
@@ -970,7 +970,7 @@ static enum scsi_disposition scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
* link down on FibreChannel)
*/
static enum scsi_disposition
-scsi_try_to_abort_cmd(struct scsi_host_template *hostt, struct scsi_cmnd *scmd)
+scsi_try_to_abort_cmd(const struct scsi_host_template *hostt, struct scsi_cmnd *scmd)
{
if (!hostt->eh_abort_handler)
return FAILED;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 4e842d79de31..d217be323cc6 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1057,6 +1057,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
else if (*bflags & BLIST_SKIP_VPD_PAGES)
sdev->skip_vpd_pages = 1;
+ if (*bflags & BLIST_NO_VPD_SIZE)
+ sdev->no_vpd_size = 1;
+
transport_configure_device(&sdev->sdev_gendev);
if (sdev->host->hostt->slave_configure) {
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index 7259704a7f52..7f0914ea168f 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -21,25 +21,11 @@ static struct ctl_table scsi_table[] = {
{ }
};
-static struct ctl_table scsi_dir_table[] = {
- { .procname = "scsi",
- .mode = 0555,
- .child = scsi_table },
- { }
-};
-
-static struct ctl_table scsi_root_table[] = {
- { .procname = "dev",
- .mode = 0555,
- .child = scsi_dir_table },
- { }
-};
-
static struct ctl_table_header *scsi_table_header;
int __init scsi_init_sysctl(void)
{
- scsi_table_header = register_sysctl_table(scsi_root_table);
+ scsi_table_header = register_sysctl("dev/scsi", scsi_table);
if (!scsi_table_header)
return -ENOMEM;
return 0;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ee28f73af4d4..603e8fcfcb8a 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -296,7 +296,7 @@ store_host_reset(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
- struct scsi_host_template *sht = shost->hostt;
+ const struct scsi_host_template *sht = shost->hostt;
int ret = -EINVAL;
int type;
@@ -1025,7 +1025,7 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
{
int depth, retval;
struct scsi_device *sdev = to_scsi_device(dev);
- struct scsi_host_template *sht = sdev->host->hostt;
+ const struct scsi_host_template *sht = sdev->host->hostt;
if (!sht->change_queue_depth)
return -EINVAL;
@@ -1606,7 +1606,7 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
{
unsigned long flags;
struct Scsi_Host *shost = sdev->host;
- struct scsi_host_template *hostt = shost->hostt;
+ const struct scsi_host_template *hostt = shost->hostt;
struct scsi_target *starget = sdev->sdev_target;
device_initialize(&sdev->sdev_gendev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index f12e9467ebb4..64ff2629eaf9 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -905,7 +905,7 @@ fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf,
{
struct fc_els_fpin *fpin = (struct fc_els_fpin *)fpin_buf;
struct fc_tlv_desc *tlv;
- u32 desc_cnt = 0, bytes_remain;
+ u32 bytes_remain;
u32 dtag;
enum fc_host_event_code event_code =
event_acknowledge ? FCH_EVT_LINK_FPIN_ACK : FCH_EVT_LINK_FPIN;
@@ -932,7 +932,6 @@ fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf,
fc_fpin_congn_stats_update(shost, tlv);
}
- desc_cnt++;
bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
tlv = fc_tlv_next_desc(tlv);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3e7a69eeb8f0..5247abe4f266 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -588,7 +588,6 @@ ATTRIBUTE_GROUPS(sd_disk);
static struct class sd_disk_class = {
.name = "scsi_disk",
- .owner = THIS_MODULE,
.dev_release = scsi_disk_release,
.dev_groups = sd_disk_groups,
};
@@ -3062,8 +3061,13 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
}
if (sdkp->device->type == TYPE_ZBC) {
- /* Host-managed */
+ /*
+ * Host-managed: Per ZBC and ZAC specifications, writes in
+ * sequential write required zones of host-managed devices must
+ * be aligned to the device physical block size.
+ */
disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
+ blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
} else {
sdkp->zoned = zoned;
if (sdkp->zoned == 1) {
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 6b3a02d4406c..22801c24ea19 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -965,14 +965,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
disk_set_max_active_zones(disk, 0);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
- /*
- * Per ZBC and ZAC specifications, writes in sequential write required
- * zones of host-managed devices must be aligned to the device physical
- * block size.
- */
- if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
- blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
-
sdkp->early_zone_info.nr_zones = nr_zones;
sdkp->early_zone_info.zone_blocks = zone_blocks;
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index b11a9162e73a..d7d0c35c58b8 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -509,9 +509,6 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
int i;
struct ses_component *scomp;
- if (!edev->component[0].scratch)
- return 0;
-
for (i = 0; i < edev->components; i++) {
scomp = edev->component[i].scratch;
if (scomp->addr != efd->addr)
@@ -602,8 +599,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
components++,
type_ptr[0],
name);
- else
+ else if (components < edev->components)
ecomp = &edev->component[components++];
+ else
+ ecomp = ERR_PTR(-EINVAL);
if (!IS_ERR(ecomp)) {
if (addl_desc_ptr) {
@@ -663,8 +662,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
}
}
-static int ses_intf_add(struct device *cdev,
- struct class_interface *intf)
+static int ses_intf_add(struct device *cdev)
{
struct scsi_device *sdev = to_scsi_device(cdev->parent);
struct scsi_device *tmp_sdev;
@@ -734,11 +732,6 @@ static int ses_intf_add(struct device *cdev,
components += type_ptr[1];
}
- if (components == 0) {
- sdev_printk(KERN_WARNING, sdev, "enclosure has no enumerated components\n");
- goto err_free;
- }
-
ses_dev->page1 = buf;
ses_dev->page1_len = len;
buf = NULL;
@@ -780,9 +773,11 @@ static int ses_intf_add(struct device *cdev,
buf = NULL;
}
page2_not_supported:
- scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
- if (!scomp)
- goto err_free;
+ if (components > 0) {
+ scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
+ if (!scomp)
+ goto err_free;
+ }
edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
components, &ses_enclosure_callbacks);
@@ -869,8 +864,7 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
enclosure_unregister(edev);
}
-static void ses_intf_remove(struct device *cdev,
- struct class_interface *intf)
+static void ses_intf_remove(struct device *cdev)
{
struct scsi_device *sdev = to_scsi_device(cdev->parent);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index a91049213203..037f8c98a6d3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -96,8 +96,8 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
#define SG_SECTOR_SZ 512
-static int sg_add_device(struct device *, struct class_interface *);
-static void sg_remove_device(struct device *, struct class_interface *);
+static int sg_add_device(struct device *);
+static void sg_remove_device(struct device *);
static DEFINE_IDR(sg_index_idr);
static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
@@ -1488,7 +1488,7 @@ out_unlock:
}
static int
-sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
+sg_add_device(struct device *cl_dev)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
Sg_device *sdp = NULL;
@@ -1578,7 +1578,7 @@ sg_device_destroy(struct kref *kref)
}
static void
-sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
+sg_remove_device(struct device *cl_dev)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
Sg_device *sdp = dev_get_drvdata(cl_dev);
@@ -1677,7 +1677,7 @@ init_sg(void)
SG_MAX_DEVS, "sg");
if (rc)
return rc;
- sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
+ sg_sysfs_class = class_create("scsi_generic");
if ( IS_ERR(sg_sysfs_class) ) {
rc = PTR_ERR(sg_sysfs_class);
goto err_out;
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 57d5dff62f63..88e2b5eb9caa 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -204,7 +204,7 @@ static inline void init_hpc_chain(struct ip22_hostdata *hdata)
* arguments not with pointers. So this is going to blow up beautyfully
* on 64-bit systems with memory outside the compat address spaces.
*/
-static struct scsi_host_template sgiwd93_template = {
+static const struct scsi_host_template sgiwd93_template = {
.module = THIS_MODULE,
.proc_name = "SGIWD93",
.name = "SGI WD93",
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index 973d240649ab..789460b0a342 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,7 +1,7 @@
#
# Kernel configuration file for the SMARTPQI
#
-# Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 228838eb3686..f960b5095d09 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -1108,6 +1108,7 @@ struct pqi_scsi_dev {
u8 volume_offline : 1;
u8 rescan : 1;
u8 ignore_device : 1;
+ u8 erase_in_progress : 1;
bool aio_enabled; /* only valid for physical disks */
bool in_remove;
bool device_offline;
@@ -1147,7 +1148,7 @@ struct pqi_scsi_dev {
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
- atomic_t raid_bypass_cnt;
+ unsigned int raid_bypass_cnt;
};
/* VPD inquiry pages */
@@ -1357,6 +1358,7 @@ struct pqi_ctrl_info {
u32 max_write_raid_5_6;
u32 max_write_raid_1_10_2drive;
u32 max_write_raid_1_10_3drive;
+ int numa_node;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 49a8f91810b6..3669affd114b 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.20-035"
+#define DRIVER_VERSION "2.1.22-040"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 20
-#define DRIVER_REVISION 35
+#define DRIVER_RELEASE 22
+#define DRIVER_REVISION 40
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -519,6 +519,36 @@ static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
writeb(status, ctrl_info->soft_reset_status);
}
+static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
+{
+ bool io_high_prio;
+ int priority_class;
+
+ io_high_prio = false;
+
+ if (device->ncq_prio_enable) {
+ priority_class =
+ IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
+ if (priority_class == IOPRIO_CLASS_RT) {
+ /* Set NCQ priority for read/write commands. */
+ switch (scmd->cmnd[0]) {
+ case WRITE_16:
+ case READ_16:
+ case WRITE_12:
+ case READ_12:
+ case WRITE_10:
+ case READ_10:
+ case WRITE_6:
+ case READ_6:
+ io_high_prio = true;
+ break;
+ }
+ }
+ }
+
+ return io_high_prio;
+}
+
static int pqi_map_single(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *sg_descriptor, void *buffer,
size_t buffer_length, enum dma_data_direction data_direction)
@@ -578,10 +608,6 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
cdb = request->cdb;
switch (cmd) {
- case TEST_UNIT_READY:
- request->data_direction = SOP_READ_FLAG;
- cdb[0] = TEST_UNIT_READY;
- break;
case INQUIRY:
request->data_direction = SOP_READ_FLAG;
cdb[0] = INQUIRY;
@@ -708,7 +734,8 @@ static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *
}
}
- pqi_reinit_io_request(io_request);
+ if (io_request)
+ pqi_reinit_io_request(io_request);
return io_request;
}
@@ -1588,6 +1615,7 @@ no_buffer:
#define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
+#define PQI_DEVICE_ERASE_IN_PROGRESS 0x10
static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
@@ -1636,6 +1664,8 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
PQI_DEVICE_NCQ_PRIO_SUPPORTED);
+ device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
+
return 0;
}
@@ -1681,7 +1711,7 @@ out:
/*
* Prevent adding drive to OS for some corner cases such as a drive
- * undergoing a sanitize operation. Some OSes will continue to poll
+ * undergoing a sanitize (erase) operation. Some OSes will continue to poll
* the drive until the sanitize completes, which can take hours,
* resulting in long bootup delays. Commands such as TUR, READ_CAP
* are allowed, but READ/WRITE cause check condition. So the OS
@@ -1689,73 +1719,9 @@ out:
* Note: devices that have completed sanitize must be re-enabled
* using the management utility.
*/
-static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
{
- u8 scsi_status;
- int rc;
- enum dma_data_direction dir;
- char *buffer;
- int buffer_length = 64;
- size_t sense_data_length;
- struct scsi_sense_hdr sshdr;
- struct pqi_raid_path_request request;
- struct pqi_raid_error_info error_info;
- bool offline = false; /* Assume keep online */
-
- /* Do not check controllers. */
- if (pqi_is_hba_lunid(device->scsi3addr))
- return false;
-
- /* Do not check LVs. */
- if (pqi_is_logical_device(device))
- return false;
-
- buffer = kmalloc(buffer_length, GFP_KERNEL);
- if (!buffer)
- return false; /* Assume not offline */
-
- /* Check for SANITIZE in progress using TUR */
- rc = pqi_build_raid_path_request(ctrl_info, &request,
- TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
- buffer_length, 0, &dir);
- if (rc)
- goto out; /* Assume not offline */
-
- memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
-
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
-
- if (rc)
- goto out; /* Assume not offline */
-
- scsi_status = error_info.status;
- sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
- if (sense_data_length == 0)
- sense_data_length =
- get_unaligned_le16(&error_info.response_data_length);
- if (sense_data_length) {
- if (sense_data_length > sizeof(error_info.data))
- sense_data_length = sizeof(error_info.data);
-
- /*
- * Check for sanitize in progress: asc:0x04, ascq: 0x1b
- */
- if (scsi_status == SAM_STAT_CHECK_CONDITION &&
- scsi_normalize_sense(error_info.data,
- sense_data_length, &sshdr) &&
- sshdr.sense_key == NOT_READY &&
- sshdr.asc == 0x04 &&
- sshdr.ascq == 0x1b) {
- device->device_offline = true;
- offline = true;
- goto out; /* Keep device offline */
- }
- }
-
-out:
- kfree(buffer);
- return offline;
+ return device->erase_in_progress;
}
static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
@@ -2499,10 +2465,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (!pqi_is_supported_device(device))
continue;
- /* Do not present disks that the OS cannot fully probe */
- if (pqi_keep_device_offline(ctrl_info, device))
- continue;
-
/* Gather information about the device. */
rc = pqi_get_device_info(ctrl_info, device, id_phys);
if (rc == -ENOMEM) {
@@ -2525,6 +2487,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
continue;
}
+ /* Do not present disks that the OS cannot fully probe. */
+ if (pqi_keep_device_offline(device))
+ continue;
+
pqi_assign_bus_target_lun(device);
if (device->is_physical_device) {
@@ -5504,15 +5470,19 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
-static int pqi_raid_submit_scsi_cmd_with_io_request(
- struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
+static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
- struct pqi_queue_group *queue_group)
+ struct pqi_queue_group *queue_group, bool io_high_prio)
{
int rc;
size_t cdb_length;
+ struct pqi_io_request *io_request;
struct pqi_raid_path_request *request;
+ io_request = pqi_alloc_io_request(ctrl_info, scmd);
+ if (!io_request)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
io_request->io_complete_callback = pqi_raid_io_complete;
io_request->scmd = scmd;
@@ -5522,6 +5492,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
@@ -5587,14 +5558,11 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
- struct pqi_io_request *io_request;
+ bool io_high_prio;
- io_request = pqi_alloc_io_request(ctrl_info, scmd);
- if (!io_request)
- return SCSI_MLQUEUE_HOST_BUSY;
+ io_high_prio = pqi_is_io_high_priority(device, scmd);
- return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
- device, scmd, queue_group);
+ return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
}
static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
@@ -5639,44 +5607,13 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
-static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
-{
- bool io_high_prio;
- int priority_class;
-
- io_high_prio = false;
-
- if (device->ncq_prio_enable) {
- priority_class =
- IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
- if (priority_class == IOPRIO_CLASS_RT) {
- /* Set NCQ priority for read/write commands. */
- switch (scmd->cmnd[0]) {
- case WRITE_16:
- case READ_16:
- case WRITE_12:
- case READ_12:
- case WRITE_10:
- case READ_10:
- case WRITE_6:
- case READ_6:
- io_high_prio = true;
- break;
- }
- }
- }
-
- return io_high_prio;
-}
-
static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
bool io_high_prio;
- io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
+ io_high_prio = pqi_is_io_high_priority(device, scmd);
return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
scmd->cmnd, scmd->cmd_len, queue_group, NULL,
@@ -5694,10 +5631,10 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_aio_path_request *request;
struct pqi_scsi_dev *device;
- device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY;
+
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = raid_bypass;
@@ -5712,6 +5649,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
+ device = scmd->device->hostdata;
if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
if (cdb_length > sizeof(request->cdb))
@@ -6052,7 +5990,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
- atomic_inc(&device->raid_bypass_cnt);
+ device->raid_bypass_cnt++;
}
}
if (!raid_bypassed)
@@ -7288,7 +7226,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
- int raid_bypass_cnt;
+ unsigned int raid_bypass_cnt;
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -7304,7 +7242,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
return -ENODEV;
}
- raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
+ raid_bypass_cnt = device->raid_bypass_cnt;
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -7366,8 +7304,7 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
return -ENODEV;
}
- if (!device->ncq_prio_support ||
- !device->is_physical_device) {
+ if (!device->ncq_prio_support) {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -EINVAL;
}
@@ -7379,6 +7316,18 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
return strlen(buf);
}
+static ssize_t pqi_numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct scsi_device *sdev;
+ struct pqi_ctrl_info *ctrl_info;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
+}
+
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
@@ -7388,6 +7337,7 @@ static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
+static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid.attr,
@@ -7398,12 +7348,13 @@ static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_raid_level.attr,
&dev_attr_raid_bypass_cnt.attr,
&dev_attr_sas_ncq_prio_enable.attr,
+ &dev_attr_numa_node.attr,
NULL
};
ATTRIBUTE_GROUPS(pqi_sdev);
-static struct scsi_host_template pqi_driver_template = {
+static const struct scsi_host_template pqi_driver_template = {
.module = THIS_MODULE,
.name = DRIVER_NAME_SHORT,
.proc_name = DRIVER_NAME_SHORT,
@@ -7716,8 +7667,8 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
features_requested_iomem_addr +
(le16_to_cpu(firmware_features->num_elements) * 2) +
sizeof(__le16);
- writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
- host_max_known_feature_iomem_addr);
+ writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
+ writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
}
return pqi_config_table_update(ctrl_info,
@@ -8560,7 +8511,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
ctrl_info->iomem_base = ioremap(pci_resource_start(
ctrl_info->pci_dev, 0),
- sizeof(struct pqi_ctrl_registers));
+ pci_resource_len(ctrl_info->pci_dev, 0));
if (!ctrl_info->iomem_base) {
dev_err(&ctrl_info->pci_dev->dev,
"failed to map memory for controller registers\n");
@@ -9018,6 +8969,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
"failed to allocate controller info block\n");
return -ENOMEM;
}
+ ctrl_info->numa_node = node;
ctrl_info->pci_dev = pci_dev;
@@ -9929,6 +9881,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0804)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0805)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0806)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x5445)
},
{
@@ -9965,6 +9929,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x54da)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x54db)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x54dc)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x0b27)
},
{
@@ -10017,6 +9993,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1014, 0x0718)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1000)
},
{
@@ -10029,6 +10009,50 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1e93, 0x1005)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1001)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1002)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1003)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1004)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1005)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1006)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1007)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1008)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1009)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 13e8c539010e..a981d0377948 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -92,25 +92,23 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
identify = &rphy->identify;
identify->sas_address = pqi_sas_port->sas_address;
+ identify->phy_identifier = pqi_sas_port->device->phy_id;
identify->initiator_port_protocols = SAS_PROTOCOL_ALL;
identify->target_port_protocols = SAS_PROTOCOL_STP;
- if (pqi_sas_port->device) {
- identify->phy_identifier = pqi_sas_port->device->phy_id;
- switch (pqi_sas_port->device->device_type) {
- case SA_DEVICE_TYPE_SAS:
- case SA_DEVICE_TYPE_SES:
- case SA_DEVICE_TYPE_NVME:
- identify->target_port_protocols = SAS_PROTOCOL_SSP;
- break;
- case SA_DEVICE_TYPE_EXPANDER_SMP:
- identify->target_port_protocols = SAS_PROTOCOL_SMP;
- break;
- case SA_DEVICE_TYPE_SATA:
- default:
- break;
- }
+ switch (pqi_sas_port->device->device_type) {
+ case SA_DEVICE_TYPE_SAS:
+ case SA_DEVICE_TYPE_SES:
+ case SA_DEVICE_TYPE_NVME:
+ identify->target_port_protocols = SAS_PROTOCOL_SSP;
+ break;
+ case SA_DEVICE_TYPE_EXPANDER_SMP:
+ identify->target_port_protocols = SAS_PROTOCOL_SMP;
+ break;
+ case SA_DEVICE_TYPE_SATA:
+ default:
+ break;
}
return sas_rphy_add(rphy);
@@ -295,10 +293,12 @@ int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
rc = pqi_sas_port_add_rphy(pqi_sas_port, rphy);
if (rc)
- goto free_sas_port;
+ goto free_sas_rphy;
return 0;
+free_sas_rphy:
+ sas_rphy_free(rphy);
free_sas_port:
pqi_free_sas_port(pqi_sas_port);
device->sas_port = NULL;
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 5811fb3c22a9..673437c7152b 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 9dcbae96a5c6..0c97626d87d4 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 174f7811fe50..cc824dcfe7da 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -100,7 +100,7 @@ snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
return sdev->queue_depth;
}
-static struct scsi_host_template snic_host_template = {
+static const struct scsi_host_template snic_host_template = {
.module = THIS_MODULE,
.name = SNIC_DRV_NAME,
.queuecommand = snic_queuecommand,
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index 961af6fc21bc..c50ede326cc4 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -487,7 +487,6 @@ snic_process_icmnd_cmpl_status(struct snic *snic,
struct scsi_cmnd *sc)
{
u8 scsi_stat = icmnd_cmpl->scsi_status;
- u64 xfer_len = 0;
int ret = 0;
/* Mark the IO as complete */
@@ -496,15 +495,11 @@ snic_process_icmnd_cmpl_status(struct snic *snic,
if (likely(cmpl_stat == SNIC_STAT_IO_SUCCESS)) {
sc->result = (DID_OK << 16) | scsi_stat;
- xfer_len = scsi_bufflen(sc);
-
/* Update SCSI Cmd with resid value */
scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
- if (icmnd_cmpl->flags & SNIC_ICMND_CMPL_UNDR_RUN) {
- xfer_len -= le32_to_cpu(icmnd_cmpl->resid);
+ if (icmnd_cmpl->flags & SNIC_ICMND_CMPL_UNDR_RUN)
atomic64_inc(&snic->s_stats.misc.io_under_run);
- }
if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
atomic64_inc(&snic->s_stats.misc.qfull);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 9e51dcd30bfd..12869e6d4ebd 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -590,20 +590,15 @@ static int sr_open(struct cdrom_device_info *cdi, int purpose)
{
struct scsi_cd *cd = cdi->handle;
struct scsi_device *sdev = cd->device;
- int retval;
/*
* If the device is in error recovery, wait until it is done.
* If the device is offline, then disallow any access to it.
*/
- retval = -ENXIO;
if (!scsi_block_when_processing_errors(sdev))
- goto error_out;
+ return -ENXIO;
return 0;
-
-error_out:
- return retval;
}
static void sr_release(struct cdrom_device_info *cdi)
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 8def242675ef..5b230e149c3d 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1472,7 +1472,7 @@ static int stex_biosparam(struct scsi_device *sdev,
return 0;
}
-static struct scsi_host_template driver_template = {
+static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = DRV_NAME,
.proc_name = DRV_NAME,
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 33f568b7f54d..d9ce379c4d2e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -988,6 +988,22 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
}
/*
+ * Check for "Operating parameters have changed"
+ * due to Hyper-V changing the VHD/VHDX BlockSize
+ * when adding/removing a differencing disk. This
+ * causes discard_granularity to change, so do a
+ * rescan to pick up the new granularity. We don't
+ * want scsi_report_sense() to output a message
+ * that a sysadmin wouldn't know what to do with.
+ */
+ if ((asc == 0x3f) && (ascq != 0x03) &&
+ (ascq != 0x0e)) {
+ process_err_fn = storvsc_device_scan;
+ set_host_byte(scmnd, DID_REQUEUE);
+ goto do_work;
+ }
+
+ /*
* Otherwise, let upper layer deal with the
* error when sense message is present
*/
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index d3489ac7ab28..30f67cbf4a7a 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -169,7 +169,7 @@ static const struct esp_driver_ops sun3x_esp_ops = {
static int esp_sun3x_probe(struct platform_device *dev)
{
- struct scsi_host_template *tpnt = &scsi_esp_template;
+ const struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
struct esp *esp;
struct resource *res;
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 5dc38d35745b..d06e933191a2 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -146,7 +146,7 @@ static void esp_get_differential(struct esp *esp)
struct device_node *dp;
dp = op->dev.of_node;
- if (of_find_property(dp, "differential", NULL))
+ if (of_property_read_bool(dp, "differential"))
esp->flags |= ESP_FLAG_DIFFERENTIAL;
else
esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
@@ -451,7 +451,7 @@ static const struct esp_driver_ops sbus_esp_ops = {
static int esp_sbus_probe_one(struct platform_device *op,
struct platform_device *espdma, int hme)
{
- struct scsi_host_template *tpnt = &scsi_esp_template;
+ const struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
struct esp *esp;
int err;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 2e2852bd5860..ee36a9c15d9c 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1224,7 +1224,7 @@ static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev,
* If all is OK, install interrupt handling and
* start the timer daemon.
*/
-static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
+static struct Scsi_Host *sym_attach(const struct scsi_host_template *tpnt, int unit,
struct sym_device *dev)
{
struct sym_data *sym_data;
@@ -1625,7 +1625,7 @@ static int sym_detach(struct Scsi_Host *shost, struct pci_dev *pdev)
/*
* Driver host template.
*/
-static struct scsi_host_template sym2_template = {
+static const struct scsi_host_template sym2_template = {
.module = THIS_MODULE,
.name = "sym53c8xx",
.info = sym53c8xx_info,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index c5558c45ab3a..58498da9869a 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -746,7 +746,7 @@ static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
return SCSI_EH_RESET_TIMER;
}
-static struct scsi_host_template virtscsi_host_template = {
+static const struct scsi_host_template virtscsi_host_template = {
.module = THIS_MODULE,
.name = "Virtio SCSI HBA",
.proc_name = "virtio_scsi",
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index ff1b22077251..5a380eecfc75 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -878,7 +878,7 @@ fail_free_params:
return ret;
}
-static struct scsi_host_template wd719x_template = {
+static const struct scsi_host_template wd719x_template = {
.module = THIS_MODULE,
.name = "Western Digital 719x",
.cmd_size = sizeof(struct wd719x_scb),
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 71a3bb83984c..caae61aa2afe 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -770,7 +770,7 @@ static void scsifront_sdev_destroy(struct scsi_device *sdev)
}
}
-static struct scsi_host_template scsifront_sht = {
+static const struct scsi_host_template scsifront_sht = {
.module = THIS_MODULE,
.name = "Xen SCSI frontend driver",
.queuecommand = scsifront_queuecommand,
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index 928c8adf5cb3..56cae22a4242 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -713,7 +713,7 @@ MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl);
static int zorro_esp_probe(struct zorro_dev *z,
const struct zorro_device_id *ent)
{
- struct scsi_host_template *tpnt = &scsi_esp_template;
+ const struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
struct esp *esp;
const struct zorro_driver_data *zdd;