summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 11:23:28 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 11:23:28 -0800
commit28bc6fb9596fe1e577d09fc17ee6e1bb051c6ba3 (patch)
tree6293b282a960720fc5008e3e5fa4d096d974b2f1 /drivers/scsi
parent0be600a5add76e8e8b9e1119f2a7426ff849aca8 (diff)
parenta2390348c19d0819d525d375414a7cfdacb51a68 (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly updates of the usual driver suspects: arcmsr, scsi_debug, mpt3sas, lpfc, cxlflash, qla2xxx, aacraid, megaraid_sas, hisi_sas. We also have a rework of the libsas hotplug handling to make it more robust, a slew of 32 bit time conversions and fixes, and a host of the usual minor updates and style changes. The biggest potential for regressions is the libsas hotplug changes, but so far they seem stable under testing" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (313 commits) scsi: qla2xxx: Fix logo flag for qlt_free_session_done() scsi: arcmsr: avoid do_gettimeofday scsi: core: Add VENDOR_SPECIFIC sense code definitions scsi: qedi: Drop cqe response during connection recovery scsi: fas216: fix sense buffer initialization scsi: ibmvfc: Remove unneeded semicolons scsi: hisi_sas: fix a bug in hisi_sas_dev_gone() scsi: hisi_sas: directly attached disk LED feature for v2 hw scsi: hisi_sas: devicetree: bindings: add LED feature for v2 hw scsi: megaraid_sas: NVMe passthrough command support scsi: megaraid: use ktime_get_real for firmware time scsi: fnic: use 64-bit timestamps scsi: qedf: Fix error return code in __qedf_probe() scsi: devinfo: fix format of the device list scsi: qla2xxx: Update driver version to 10.00.00.05-k scsi: qla2xxx: Add XCB counters to debugfs scsi: qla2xxx: Fix queue ID for async abort with Multiqueue scsi: qla2xxx: Fix warning for code intentation in __qla24xx_handle_gpdb_event() scsi: qla2xxx: Fix warning during port_name debug print scsi: qla2xxx: Fix warning in qla2x00_async_iocb_timeout() ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c26
-rw-r--r--drivers/scsi/3w-9xxx.h2
-rw-r--r--drivers/scsi/3w-sas.c15
-rw-r--r--drivers/scsi/aacraid/aachba.c473
-rw-r--r--drivers/scsi/aacraid/aacraid.h54
-rw-r--r--drivers/scsi/aacraid/commctrl.c6
-rw-r--r--drivers/scsi/aacraid/comminit.c49
-rw-r--r--drivers/scsi/aacraid/commsup.c220
-rw-r--r--drivers/scsi/aacraid/linit.c26
-rw-r--r--drivers/scsi/aacraid/sa.c32
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h567
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1318
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/bfa/bfa_core.c2
-rw-r--r--drivers/scsi/bfa/bfa_cs.h6
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c8
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c3
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h4
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c78
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c62
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c10
-rw-r--r--drivers/scsi/bfa/bfa_port.c15
-rw-r--r--drivers/scsi/bfa/bfa_port.h2
-rw-r--r--drivers/scsi/bfa/bfa_svc.c51
-rw-r--r--drivers/scsi/bfa/bfa_svc.h2
-rw-r--r--drivers/scsi/bfa/bfad.c23
-rw-r--r--drivers/scsi/bfa/bfad_attr.c5
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c10
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c8
-rw-r--r--drivers/scsi/bfa/bfad_im.h32
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c60
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c19
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.h1
-rw-r--r--drivers/scsi/csiostor/csio_mb.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/cxlflash/Makefile2
-rw-r--r--drivers/scsi/cxlflash/backend.h41
-rw-r--r--drivers/scsi/cxlflash/common.h8
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c168
-rw-r--r--drivers/scsi/cxlflash/main.c100
-rw-r--r--drivers/scsi/cxlflash/superpipe.c64
-rw-r--r--drivers/scsi/cxlflash/superpipe.h4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c37
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c22
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c4
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c2
-rw-r--r--drivers/scsi/fnic/fnic_stats.h4
-rw-r--r--drivers/scsi/fnic/fnic_trace.c58
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h46
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c267
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c194
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c332
-rw-r--r--drivers/scsi/hosts.c6
-rw-r--r--drivers/scsi/hpsa.c18
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c34
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c320
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libiscsi.c28
-rw-r--r--drivers/scsi/libiscsi_tcp.c9
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/libsas/sas_discover.c34
-rw-r--r--drivers/scsi/libsas/sas_event.c86
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/libsas/sas_init.c107
-rw-r--r--drivers/scsi/libsas/sas_internal.h7
-rw-r--r--drivers/scsi/libsas/sas_phy.c69
-rw-r--r--drivers/scsi/libsas/sas_port.c25
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c20
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c294
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c356
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c71
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c193
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h37
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c171
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c99
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c207
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h29
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c31
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c332
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c33
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/ppa.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c3
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h173
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c1497
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1044
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c65
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c56
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c528
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c761
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c21
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c12
-rw-r--r--drivers/scsi/scsi_common.c14
-rw-r--r--drivers/scsi/scsi_debug.c724
-rw-r--r--drivers/scsi/scsi_devinfo.c39
-rw-r--r--drivers/scsi/scsi_dh.c5
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_lib.c50
-rw-r--r--drivers/scsi/scsi_priv.h15
-rw-r--r--drivers/scsi/scsi_sysfs.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c4
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/ses.c11
-rw-r--r--drivers/scsi/smartpqi/Makefile2
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/ufs/ufshci.h16
-rw-r--r--drivers/scsi/wd719x.c4
148 files changed, 8844 insertions, 4458 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 00e7968a1d70..b42c9c479d4b 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -369,7 +369,6 @@ out:
static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
{
u32 local_time;
- struct timeval time;
TW_Event *event;
unsigned short aen;
char host[16];
@@ -392,8 +391,8 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
memset(event, 0, sizeof(TW_Event));
event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
- do_gettimeofday(&time);
- local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+ /* event->time_stamp_sec overflows in y2106 */
+ local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
event->time_stamp_sec = local_time;
event->aen_code = aen;
event->retrieved = TW_AEN_NOT_RETRIEVED;
@@ -473,11 +472,10 @@ out:
static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
{
u32 schedulertime;
- struct timeval utc;
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
- u32 local_time;
+ time64_t local_time;
/* Fill out the command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -499,9 +497,8 @@ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
/* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
- do_gettimeofday(&utc);
- local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
- schedulertime = local_time - (3 * 86400);
+ local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
+ div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
schedulertime = cpu_to_le32(schedulertime % 604800);
memcpy(param->data, &schedulertime, sizeof(u32));
@@ -648,8 +645,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
TW_Command_Full *full_command_packet;
TW_Compatibility_Info *tw_compat_info;
TW_Event *event;
- struct timeval current_time;
- u32 current_time_ms;
+ ktime_t current_time;
TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
int retval = TW_IOCTL_ERROR_OS_EFAULT;
void __user *argp = (void __user *)arg;
@@ -840,17 +836,17 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
break;
case TW_IOCTL_GET_LOCK:
tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
- do_gettimeofday(&current_time);
- current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
+ current_time = ktime_get();
- if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
+ if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
+ ktime_after(current_time, tw_dev->ioctl_time)) {
tw_dev->ioctl_sem_lock = 1;
- tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
+ tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
tw_ioctl->driver_command.status = 0;
tw_lock->time_remaining_msec = tw_lock->timeout_msec;
} else {
tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
- tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
+ tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
}
break;
case TW_IOCTL_RELEASE_LOCK:
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index b6c208cc474f..d88cd3499bd5 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -666,7 +666,7 @@ typedef struct TAG_TW_Device_Extension {
unsigned char event_queue_wrapped;
unsigned int error_sequence_id;
int ioctl_sem_lock;
- u32 ioctl_msec;
+ ktime_t ioctl_time;
int chrdev_request_id;
wait_queue_head_t ioctl_wqueue;
struct mutex ioctl_lock;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index b150e131b2e7..cf9f2a09b47d 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -221,7 +221,6 @@ out:
static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
{
u32 local_time;
- struct timeval time;
TW_Event *event;
unsigned short aen;
char host[16];
@@ -240,8 +239,8 @@ static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
memset(event, 0, sizeof(TW_Event));
event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
- do_gettimeofday(&time);
- local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+ /* event->time_stamp_sec overflows in y2106 */
+ local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
event->time_stamp_sec = local_time;
event->aen_code = aen;
event->retrieved = TW_AEN_NOT_RETRIEVED;
@@ -408,11 +407,10 @@ out:
static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
{
u32 schedulertime;
- struct timeval utc;
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
- u32 local_time;
+ time64_t local_time;
/* Fill out the command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -434,10 +432,9 @@ static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
/* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
- do_gettimeofday(&utc);
- local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
- schedulertime = local_time - (3 * 86400);
- schedulertime = cpu_to_le32(schedulertime % 604800);
+ local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
+ div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
+ schedulertime = cpu_to_le32(schedulertime);
memcpy(param->data, &schedulertime, sizeof(u32));
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index af3e4d3f9735..e7961cbd2c55 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -42,6 +42,8 @@
#include <linux/highmem.h> /* For flush_kernel_dcache_page */
#include <linux/module.h>
+#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -913,8 +915,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
memset(str, ' ', sizeof(*str));
if (sup_adap_info->adapter_type_text[0]) {
- char *cp = sup_adap_info->adapter_type_text;
int c;
+ char *cp;
+ char *cname = kmemdup(sup_adap_info->adapter_type_text,
+ sizeof(sup_adap_info->adapter_type_text),
+ GFP_ATOMIC);
+ if (!cname)
+ return;
+
+ cp = cname;
if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
inqstrcpy("SMC", str->vid);
else {
@@ -923,7 +932,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
++cp;
c = *cp;
*cp = '\0';
- inqstrcpy(sup_adap_info->adapter_type_text, str->vid);
+ inqstrcpy(cname, str->vid);
*cp = c;
while (*cp && *cp != ' ')
++cp;
@@ -931,14 +940,11 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
while (*cp == ' ')
++cp;
/* last six chars reserved for vol type */
- c = 0;
- if (strlen(cp) > sizeof(str->pid)) {
- c = cp[sizeof(str->pid)];
+ if (strlen(cp) > sizeof(str->pid))
cp[sizeof(str->pid)] = '\0';
- }
inqstrcpy (cp, str->pid);
- if (c)
- cp[sizeof(str->pid)] = c;
+
+ kfree(cname);
} else {
struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
@@ -1660,87 +1666,309 @@ static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
(void *) cmd);
}
-int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
+static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
+ struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
{
- struct fib *fibptr;
- struct aac_srb *srbcmd;
- struct sgmap64 *sg64;
- struct aac_ciss_identify_pd *identify_resp;
- dma_addr_t addr;
- u32 vbus, vid;
- u16 fibsize, datasize;
- int rcode = -ENOMEM;
-
+ struct fib *fibptr;
+ dma_addr_t addr;
+ int rcode;
+ int fibsize;
+ struct aac_srb *srb;
+ struct aac_srb_reply *srb_reply;
+ struct sgmap64 *sg64;
+ u32 vbus;
+ u32 vid;
+
+ if (!dev->sa_firmware)
+ return 0;
+ /* allocate FIB */
fibptr = aac_fib_alloc(dev);
if (!fibptr)
- goto out;
+ return -ENOMEM;
- fibsize = sizeof(struct aac_srb) -
- sizeof(struct sgentry) + sizeof(struct sgentry64);
- datasize = sizeof(struct aac_ciss_identify_pd);
+ aac_fib_init(fibptr);
+ fibptr->hw_fib_va->header.XferState &=
+ ~cpu_to_le32(FastResponseCapable);
- identify_resp = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
- GFP_KERNEL);
- if (!identify_resp)
- goto fib_free_ptr;
+ fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
+ sizeof(struct sgentry64);
- vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
- vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
+ /* allocate DMA buffer for response */
+ addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&dev->pdev->dev, addr)) {
+ rcode = -ENOMEM;
+ goto fib_error;
+ }
- aac_fib_init(fibptr);
+ srb = fib_data(fibptr);
+ memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
- srbcmd = (struct aac_srb *) fib_data(fibptr);
- srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
- srbcmd->channel = cpu_to_le32(vbus);
- srbcmd->id = cpu_to_le32(vid);
- srbcmd->lun = 0;
- srbcmd->flags = cpu_to_le32(SRB_DataIn);
- srbcmd->timeout = cpu_to_le32(10);
- srbcmd->retry_limit = 0;
- srbcmd->cdb_size = cpu_to_le32(12);
- srbcmd->count = cpu_to_le32(datasize);
+ vbus = (u32)le16_to_cpu(
+ dev->supplement_adapter_info.virt_device_bus);
+ vid = (u32)le16_to_cpu(
+ dev->supplement_adapter_info.virt_device_target);
- memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
- srbcmd->cdb[0] = 0x26;
- srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
- srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
+ /* set the common request fields */
+ srb->channel = cpu_to_le32(vbus);
+ srb->id = cpu_to_le32(vid);
+ srb->lun = 0;
+ srb->function = cpu_to_le32(SRBF_ExecuteScsi);
+ srb->timeout = 0;
+ srb->retry_limit = 0;
+ srb->cdb_size = cpu_to_le32(16);
+ srb->count = cpu_to_le32(xfer_len);
+
+ sg64 = (struct sgmap64 *)&srb->sg;
+ sg64->count = cpu_to_le32(1);
+ sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
+ sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
+ sg64->sg[0].count = cpu_to_le32(xfer_len);
- sg64 = (struct sgmap64 *)&srbcmd->sg;
- sg64->count = cpu_to_le32(1);
- sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
- sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
- sg64->sg[0].count = cpu_to_le32(datasize);
+ /*
+ * Copy the updated data for other dumping or other usage if needed
+ */
+ memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
+
+ /* issue request to the controller */
+ rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
+ 1, 1, NULL, NULL);
+
+ if (rcode == -ERESTARTSYS)
+ rcode = -ERESTART;
+
+ if (unlikely(rcode < 0))
+ goto bmic_error;
+
+ srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
+ memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply));
+
+bmic_error:
+ dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL);
+fib_error:
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return rcode;
+}
+
+static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target)
+{
+
+ struct aac_ciss_identify_pd *identify_resp;
- rcode = aac_fib_send(ScsiPortCommand64,
- fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL);
+ if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW)
+ return;
+
+ identify_resp = dev->hba_map[bus][target].safw_identify_resp;
+ if (identify_resp == NULL) {
+ dev->hba_map[bus][target].qd_limit = 32;
+ return;
+ }
if (identify_resp->current_queue_depth_limit <= 0 ||
- identify_resp->current_queue_depth_limit > 32)
+ identify_resp->current_queue_depth_limit > 255)
dev->hba_map[bus][target].qd_limit = 32;
else
dev->hba_map[bus][target].qd_limit =
identify_resp->current_queue_depth_limit;
+}
- dma_free_coherent(&dev->pdev->dev, datasize, identify_resp, addr);
+static int aac_issue_safw_bmic_identify(struct aac_dev *dev,
+ struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target)
+{
+ int rcode = -ENOMEM;
+ int datasize;
+ struct aac_srb_unit srbu;
+ struct aac_srb *srbcmd;
+ struct aac_ciss_identify_pd *identify_reply;
- aac_fib_complete(fibptr);
+ datasize = sizeof(struct aac_ciss_identify_pd);
+ identify_reply = kmalloc(datasize, GFP_KERNEL);
+ if (!identify_reply)
+ goto out;
+
+ memset(&srbu, 0, sizeof(struct aac_srb_unit));
+
+ srbcmd = &srbu.srb;
+ srbcmd->flags = cpu_to_le32(SRB_DataIn);
+ srbcmd->cdb[0] = 0x26;
+ srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
+ srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
+
+ rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize);
+ if (unlikely(rcode < 0))
+ goto mem_free_all;
+
+ *identify_resp = identify_reply;
+
+out:
+ return rcode;
+mem_free_all:
+ kfree(identify_reply);
+ goto out;
+}
+
+static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
+{
+ kfree(dev->safw_phys_luns);
+ dev->safw_phys_luns = NULL;
+}
+
+/**
+ * aac_get_safw_ciss_luns() Process topology change
+ * @dev: aac_dev structure
+ *
+ * Execute a CISS REPORT PHYS LUNS and process the results into
+ * the current hba_map.
+ */
+static int aac_get_safw_ciss_luns(struct aac_dev *dev)
+{
+ int rcode = -ENOMEM;
+ int datasize;
+ struct aac_srb *srbcmd;
+ struct aac_srb_unit srbu;
+ struct aac_ciss_phys_luns_resp *phys_luns;
+
+ datasize = sizeof(struct aac_ciss_phys_luns_resp) +
+ (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
+ phys_luns = kmalloc(datasize, GFP_KERNEL);
+ if (phys_luns == NULL)
+ goto out;
+
+ memset(&srbu, 0, sizeof(struct aac_srb_unit));
+
+ srbcmd = &srbu.srb;
+ srbcmd->flags = cpu_to_le32(SRB_DataIn);
+ srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
+ srbcmd->cdb[1] = 2; /* extended reporting */
+ srbcmd->cdb[8] = (u8)(datasize >> 8);
+ srbcmd->cdb[9] = (u8)(datasize);
+
+ rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize);
+ if (unlikely(rcode < 0))
+ goto mem_free_all;
+
+ if (phys_luns->resp_flag != 2) {
+ rcode = -ENOMSG;
+ goto mem_free_all;
+ }
+
+ dev->safw_phys_luns = phys_luns;
+
+out:
+ return rcode;
+mem_free_all:
+ kfree(phys_luns);
+ goto out;
+}
+
+static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev)
+{
+ return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24;
+}
+
+static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f;
+}
+
+static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].level2[0];
+}
+
+static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].bus >> 6;
+}
+
+static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].node_ident[9];
+}
+
+static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
+{
+ return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
+}
+
+static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].node_ident[8];
+}
+
+static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
+ int bus, int target)
+{
+ kfree(dev->hba_map[bus][target].safw_identify_resp);
+ dev->hba_map[bus][target].safw_identify_resp = NULL;
+}
+
+static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev,
+ int lun_count)
+{
+ int luns;
+ int i;
+ u32 bus;
+ u32 target;
+
+ luns = aac_get_safw_phys_lun_count(dev);
+
+ if (luns < lun_count)
+ lun_count = luns;
+ else if (lun_count < 0)
+ lun_count = luns;
+
+ for (i = 0; i < lun_count; i++) {
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+
+ aac_free_safw_identify_resp(dev, bus, target);
+ }
+}
+
+static int aac_get_safw_attr_all_targets(struct aac_dev *dev)
+{
+ int i;
+ int rcode = 0;
+ u32 lun_count;
+ u32 bus;
+ u32 target;
+ struct aac_ciss_identify_pd *identify_resp = NULL;
+
+ lun_count = aac_get_safw_phys_lun_count(dev);
+
+ for (i = 0; i < lun_count; ++i) {
+
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+
+ rcode = aac_issue_safw_bmic_identify(dev,
+ &identify_resp, bus, target);
+
+ if (unlikely(rcode < 0))
+ goto free_identify_resp;
+
+ dev->hba_map[bus][target].safw_identify_resp = identify_resp;
+ }
-fib_free_ptr:
- aac_fib_free(fibptr);
out:
return rcode;
+free_identify_resp:
+ aac_free_safw_all_identify_resp(dev, i);
+ goto out;
}
/**
- * aac_update hba_map()- update current hba map with data from FW
+ * aac_set_safw_attr_all_targets- update current hba map with data from FW
* @dev: aac_dev structure
* @phys_luns: FW information from report phys luns
+ * @rescan: Indicates scan type
*
* Update our hba map with the information gathered from the FW
*/
-void aac_update_hba_map(struct aac_dev *dev,
- struct aac_ciss_phys_luns_resp *phys_luns, int rescan)
+static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
{
/* ok and extended reporting */
u32 lun_count, nexus;
@@ -1748,24 +1976,21 @@ void aac_update_hba_map(struct aac_dev *dev,
u8 expose_flag, attribs;
u8 devtype;
- lun_count = ((phys_luns->list_length[0] << 24)
- + (phys_luns->list_length[1] << 16)
- + (phys_luns->list_length[2] << 8)
- + (phys_luns->list_length[3])) / 24;
+ lun_count = aac_get_safw_phys_lun_count(dev);
+
+ dev->scan_counter++;
for (i = 0; i < lun_count; ++i) {
- bus = phys_luns->lun[i].level2[1] & 0x3f;
- target = phys_luns->lun[i].level2[0];
- expose_flag = phys_luns->lun[i].bus >> 6;
- attribs = phys_luns->lun[i].node_ident[9];
- nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]);
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+ expose_flag = aac_get_safw_phys_expose_flag(dev, i);
+ attribs = aac_get_safw_phys_attribs(dev, i);
+ nexus = aac_get_safw_phys_nexus(dev, i);
if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
continue;
- dev->hba_map[bus][target].expose = expose_flag;
-
if (expose_flag != 0) {
devtype = AAC_DEVTYPE_RAID_MEMBER;
goto update_devtype;
@@ -1778,95 +2003,45 @@ void aac_update_hba_map(struct aac_dev *dev,
} else
devtype = AAC_DEVTYPE_ARC_RAW;
- if (devtype != AAC_DEVTYPE_NATIVE_RAW)
- goto update_devtype;
+ dev->hba_map[bus][target].scan_counter = dev->scan_counter;
- if (aac_issue_bmic_identify(dev, bus, target) < 0)
- dev->hba_map[bus][target].qd_limit = 32;
+ aac_set_safw_target_qd(dev, bus, target);
update_devtype:
- if (rescan == AAC_INIT)
- dev->hba_map[bus][target].devtype = devtype;
- else
- dev->hba_map[bus][target].new_devtype = devtype;
+ dev->hba_map[bus][target].devtype = devtype;
}
}
-/**
- * aac_report_phys_luns() Process topology change
- * @dev: aac_dev structure
- * @fibptr: fib pointer
- *
- * Execute a CISS REPORT PHYS LUNS and process the results into
- * the current hba_map.
- */
-int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
+static int aac_setup_safw_targets(struct aac_dev *dev)
{
- int fibsize, datasize;
- struct aac_ciss_phys_luns_resp *phys_luns;
- struct aac_srb *srbcmd;
- struct sgmap64 *sg64;
- dma_addr_t addr;
- u32 vbus, vid;
int rcode = 0;
- /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
- fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry)
- + sizeof(struct sgentry64);
- datasize = sizeof(struct aac_ciss_phys_luns_resp)
- + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
-
- phys_luns = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
- GFP_KERNEL);
- if (phys_luns == NULL) {
- rcode = -ENOMEM;
- goto err_out;
- }
-
- vbus = (u32) le16_to_cpu(
- dev->supplement_adapter_info.virt_device_bus);
- vid = (u32) le16_to_cpu(
- dev->supplement_adapter_info.virt_device_target);
+ rcode = aac_get_containers(dev);
+ if (unlikely(rcode < 0))
+ goto out;
- aac_fib_init(fibptr);
+ rcode = aac_get_safw_ciss_luns(dev);
+ if (unlikely(rcode < 0))
+ goto out;
- srbcmd = (struct aac_srb *) fib_data(fibptr);
- srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
- srbcmd->channel = cpu_to_le32(vbus);
- srbcmd->id = cpu_to_le32(vid);
- srbcmd->lun = 0;
- srbcmd->flags = cpu_to_le32(SRB_DataIn);
- srbcmd->timeout = cpu_to_le32(10);
- srbcmd->retry_limit = 0;
- srbcmd->cdb_size = cpu_to_le32(12);
- srbcmd->count = cpu_to_le32(datasize);
+ rcode = aac_get_safw_attr_all_targets(dev);
+ if (unlikely(rcode < 0))
+ goto free_ciss_luns;
- memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
- srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
- srbcmd->cdb[1] = 2; /* extended reporting */
- srbcmd->cdb[8] = (u8)(datasize >> 8);
- srbcmd->cdb[9] = (u8)(datasize);
-
- sg64 = (struct sgmap64 *) &srbcmd->sg;
- sg64->count = cpu_to_le32(1);
- sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
- sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
- sg64->sg[0].count = cpu_to_le32(datasize);
-
- rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize,
- FsaNormal, 1, 1, NULL, NULL);
-
- /* analyse data */
- if (rcode >= 0 && phys_luns->resp_flag == 2) {
- /* ok and extended reporting */
- aac_update_hba_map(dev, phys_luns, rescan);
- }
+ aac_set_safw_attr_all_targets(dev);
- dma_free_coherent(&dev->pdev->dev, datasize, phys_luns, addr);
-err_out:
+ aac_free_safw_all_identify_resp(dev, -1);
+free_ciss_luns:
+ aac_free_safw_ciss_luns(dev);
+out:
return rcode;
}
+int aac_setup_safw_adapter(struct aac_dev *dev)
+{
+ return aac_setup_safw_targets(dev);
+}
+
int aac_get_adapter_info(struct aac_dev* dev)
{
struct fib* fibptr;
@@ -1969,12 +2144,6 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
}
- if (!dev->sync_mode && dev->sa_firmware &&
- dev->supplement_adapter_info.virt_device_bus != 0xffff) {
- /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
- rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
- }
-
if (!dev->in_reset) {
char buffer[16];
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
@@ -2739,14 +2908,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
} else { /* check for physical non-dasd devices */
bus = aac_logical_to_phys(scmd_channel(scsicmd));
- if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
- (dev->hba_map[bus][cid].expose
- == AAC_HIDE_DISK)){
- if (scsicmd->cmnd[0] == INQUIRY) {
- scsicmd->result = DID_NO_CONNECT << 16;
- goto scsi_done_ret;
- }
- }
if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
dev->hba_map[bus][cid].devtype
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d52265416da2..0095fcbd1c88 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -41,6 +41,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <scsi/scsi_host.h>
/*------------------------------------------------------------------------------
* D E F I N E S
@@ -97,7 +98,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 50834
+# define AAC_DRIVER_BUILD 50877
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -117,9 +118,13 @@ enum {
/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */
#define AAC_MAX_BUSES 5
#define AAC_MAX_TARGETS 256
+#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS)
#define AAC_MAX_NATIVE_SIZE 2048
#define FW_ERROR_BUFFER_SIZE 512
+#define get_bus_number(x) (x/AAC_MAX_TARGETS)
+#define get_target_number(x) (x%AAC_MAX_TARGETS)
+
/* Thor AIF events */
#define SA_AIF_HOTPLUG (1<<1)
#define SA_AIF_HARDWARE (1<<2)
@@ -1334,17 +1339,17 @@ struct fib {
#define AAC_DEVTYPE_RAID_MEMBER 1
#define AAC_DEVTYPE_ARC_RAW 2
#define AAC_DEVTYPE_NATIVE_RAW 3
-#define AAC_EXPOSE_DISK 0
-#define AAC_HIDE_DISK 3
+
+#define AAC_SAFW_RESCAN_DELAY (10 * HZ)
struct aac_hba_map_info {
__le32 rmw_nexus; /* nexus for native HBA devices */
u8 devtype; /* device type */
- u8 new_devtype;
u8 reset_state; /* 0 - no reset, 1..x - */
/* after xth TM LUN reset */
u16 qd_limit;
- u8 expose; /*checks if to expose or not*/
+ u32 scan_counter;
+ struct aac_ciss_identify_pd *safw_identify_resp;
};
/*
@@ -1560,6 +1565,7 @@ struct aac_dev
spinlock_t fib_lock;
struct mutex ioctl_mutex;
+ struct mutex scan_mutex;
struct aac_queue_block *queues;
/*
* The user API will use an IOCTL to register itself to receive
@@ -1605,6 +1611,7 @@ struct aac_dev
int maximum_num_channels;
struct fsa_dev_info *fsa_dev;
struct task_struct *thread;
+ struct delayed_work safw_rescan_work;
int cardtype;
/*
*This lock will protect the two 32-bit
@@ -1668,9 +1675,11 @@ struct aac_dev
u32 vector_cap; /* MSI-X vector capab.*/
int msi_enabled; /* MSI/MSI-X enabled */
atomic_t msix_counter;
+ u32 scan_counter;
struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
+ struct aac_ciss_phys_luns_resp *safw_phys_luns;
u8 adapter_shutdown;
u32 handle_pci_error;
bool init_reset;
@@ -2023,6 +2032,12 @@ struct aac_srb_reply
__le32 sense_data_size;
u8 sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
};
+
+struct aac_srb_unit {
+ struct aac_srb srb;
+ struct aac_srb_reply srb_reply;
+};
+
/*
* SRB Flags
*/
@@ -2627,16 +2642,41 @@ static inline int aac_adapter_check_health(struct aac_dev *dev)
return (dev)->a_ops.adapter_check_health(dev);
}
+
+int aac_scan_host(struct aac_dev *dev);
+
+static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
+{
+ schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY);
+}
+
+static inline void aac_safw_rescan_worker(struct work_struct *work)
+{
+ struct aac_dev *dev = container_of(to_delayed_work(work),
+ struct aac_dev, safw_rescan_work);
+
+ wait_event(dev->scsi_host_ptr->host_wait,
+ !scsi_host_in_recovery(dev->scsi_host_ptr));
+
+ aac_scan_host(dev);
+}
+
+static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
+{
+ if (dev->sa_firmware)
+ cancel_delayed_work_sync(&dev->safw_rescan_work);
+}
+
/* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102
#define AAC_OWNER_ERROR_HANDLER 0x103
#define AAC_OWNER_FIRMWARE 0x106
+void aac_safw_rescan_worker(struct work_struct *work);
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
-int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan);
-int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target);
+int aac_setup_safw_adapter(struct aac_dev *dev);
const char *aac_driverinfo(struct Scsi_Host *);
void aac_fib_vector_assign(struct aac_dev *dev);
struct fib *aac_fib_alloc(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 9ab0fa959d83..a2b3430072c7 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1052,9 +1052,13 @@ static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
return -EFAULT;
+ dev->adapter_shutdown = 1;
+
+ mutex_unlock(&dev->ioctl_mutex);
retval = aac_reset_adapter(dev, 0, reset.reset_type);
- return retval;
+ mutex_lock(&dev->ioctl_mutex);
+ return retval;
}
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 1bc623ad3faf..0dc7b5a4fea2 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -42,6 +42,8 @@
#include <linux/completion.h>
#include <linux/mm.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
#include "aacraid.h"
@@ -284,6 +286,38 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->entries = qsize;
}
+static void aac_wait_for_io_completion(struct aac_dev *aac)
+{
+ unsigned long flagv = 0;
+ int i = 0;
+
+ for (i = 60; i; --i) {
+ struct scsi_device *dev;
+ struct scsi_cmnd *command;
+ int active = 0;
+
+ __shost_for_each_device(dev, aac->scsi_host_ptr) {
+ spin_lock_irqsave(&dev->list_lock, flagv);
+ list_for_each_entry(command, &dev->cmd_list, list) {
+ if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+ active++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flagv);
+ if (active)
+ break;
+
+ }
+ /*
+ * We can exit If all the commands are complete
+ */
+ if (active == 0)
+ break;
+ ssleep(1);
+ }
+}
+
/**
* aac_send_shutdown - shutdown an adapter
* @dev: Adapter to shutdown
@@ -295,12 +329,10 @@ int aac_send_shutdown(struct aac_dev * dev)
{
struct fib * fibctx;
struct aac_close *cmd;
- int status;
+ int status = 0;
- fibctx = aac_fib_alloc(dev);
- if (!fibctx)
- return -ENOMEM;
- aac_fib_init(fibctx);
+ if (aac_adapter_check_health(dev))
+ return status;
if (!dev->adapter_shutdown) {
mutex_lock(&dev->ioctl_mutex);
@@ -308,6 +340,13 @@ int aac_send_shutdown(struct aac_dev * dev)
mutex_unlock(&dev->ioctl_mutex);
}
+ aac_wait_for_io_completion(dev);
+
+ fibctx = aac_fib_alloc(dev);
+ if (!fibctx)
+ return -ENOMEM;
+ aac_fib_init(fibctx);
+
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
cmd->cid = cpu_to_le32(0xfffffffe);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 80a8cb26cdea..84858d5c8257 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/crash_dump.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
@@ -1629,28 +1630,28 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
command->scsi_done(command);
}
/*
- * Any Device that was already marked offline needs to be cleaned up
+ * Any Device that was already marked offline needs to be marked
+ * running
*/
__shost_for_each_device(dev, host) {
- if (!scsi_device_online(dev)) {
- sdev_printk(KERN_INFO, dev, "Removing offline device\n");
- scsi_remove_device(dev);
- scsi_device_put(dev);
- }
+ if (!scsi_device_online(dev))
+ scsi_device_set_state(dev, SDEV_RUNNING);
}
retval = 0;
out:
aac->in_reset = 0;
scsi_unblock_requests(host);
+
/*
* Issue bus rescan to catch any configuration that might have
* occurred
*/
- if (!retval) {
- dev_info(&aac->pdev->dev, "Issuing bus rescan\n");
- scsi_scan_host(host);
+ if (!retval && !is_kdump_kernel()) {
+ dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
+ aac_schedule_safw_scan_worker(aac);
}
+
if (jafo) {
spin_lock_irq(host->host_lock);
}
@@ -1681,31 +1682,6 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
*/
host = aac->scsi_host_ptr;
scsi_block_requests(host);
- if (forced < 2) for (retval = 60; retval; --retval) {
- struct scsi_device * dev;
- struct scsi_cmnd * command;
- int active = 0;
-
- __shost_for_each_device(dev, host) {
- spin_lock_irqsave(&dev->list_lock, flagv);
- list_for_each_entry(command, &dev->cmd_list, list) {
- if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
- active++;
- break;
- }
- }
- spin_unlock_irqrestore(&dev->list_lock, flagv);
- if (active)
- break;
-
- }
- /*
- * We can exit If all the commands are complete
- */
- if (active == 0)
- break;
- ssleep(1);
- }
/* Quiesce build, flush cache, write through mode */
if (forced < 2)
@@ -1874,42 +1850,124 @@ out:
return BlinkLED;
}
+static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
+{
+ return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
+}
+
+static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
+ int bus,
+ int target)
+{
+ if (bus != CONTAINER_CHANNEL)
+ bus = aac_phys_to_logical(bus);
+
+ return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
+}
+
+static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
+{
+ if (bus != CONTAINER_CHANNEL)
+ bus = aac_phys_to_logical(bus);
+
+ return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
+}
+
+static void aac_put_safw_scsi_device(struct scsi_device *sdev)
+{
+ if (sdev)
+ scsi_device_put(sdev);
+}
-static void aac_resolve_luns(struct aac_dev *dev)
+static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
{
- int bus, target, channel;
struct scsi_device *sdev;
- u8 devtype;
- u8 new_devtype;
- for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
- for (target = 0; target < AAC_MAX_TARGETS; target++) {
+ sdev = aac_lookup_safw_scsi_device(dev, bus, target);
+ scsi_remove_device(sdev);
+ aac_put_safw_scsi_device(sdev);
+}
- if (bus == CONTAINER_CHANNEL)
- channel = CONTAINER_CHANNEL;
- else
- channel = aac_phys_to_logical(bus);
+static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
+ int bus, int target)
+{
+ return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
+}
- devtype = dev->hba_map[bus][target].devtype;
- new_devtype = dev->hba_map[bus][target].new_devtype;
+static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
+{
+ if (is_safw_raid_volume(dev, bus, target))
+ return dev->fsa_dev[target].valid;
+ else
+ return aac_is_safw_scan_count_equal(dev, bus, target);
+}
- sdev = scsi_device_lookup(dev->scsi_host_ptr, channel,
- target, 0);
+static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
+{
+ int is_exposed = 0;
+ struct scsi_device *sdev;
- if (!sdev && new_devtype)
- scsi_add_device(dev->scsi_host_ptr, channel,
- target, 0);
- else if (sdev && new_devtype != devtype)
- scsi_remove_device(sdev);
- else if (sdev && new_devtype == devtype)
- scsi_rescan_device(&sdev->sdev_gendev);
+ sdev = aac_lookup_safw_scsi_device(dev, bus, target);
+ if (sdev)
+ is_exposed = 1;
+ aac_put_safw_scsi_device(sdev);
- if (sdev)
- scsi_device_put(sdev);
+ return is_exposed;
+}
- dev->hba_map[bus][target].devtype = new_devtype;
- }
+static int aac_update_safw_host_devices(struct aac_dev *dev)
+{
+ int i;
+ int bus;
+ int target;
+ int is_exposed = 0;
+ int rcode = 0;
+
+ rcode = aac_setup_safw_adapter(dev);
+ if (unlikely(rcode < 0)) {
+ goto out;
}
+
+ for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
+
+ bus = get_bus_number(i);
+ target = get_target_number(i);
+
+ is_exposed = aac_is_safw_device_exposed(dev, bus, target);
+
+ if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
+ aac_add_safw_device(dev, bus, target);
+ else if (!aac_is_safw_target_valid(dev, bus, target) &&
+ is_exposed)
+ aac_remove_safw_device(dev, bus, target);
+ }
+out:
+ return rcode;
+}
+
+static int aac_scan_safw_host(struct aac_dev *dev)
+{
+ int rcode = 0;
+
+ rcode = aac_update_safw_host_devices(dev);
+ if (rcode)
+ aac_schedule_safw_scan_worker(dev);
+
+ return rcode;
+}
+
+int aac_scan_host(struct aac_dev *dev)
+{
+ int rcode = 0;
+
+ mutex_lock(&dev->scan_mutex);
+ if (dev->sa_firmware)
+ rcode = aac_scan_safw_host(dev);
+ else
+ scsi_scan_host(dev->scsi_host_ptr);
+ mutex_unlock(&dev->scan_mutex);
+
+ return rcode;
}
/**
@@ -1922,10 +1980,8 @@ static void aac_resolve_luns(struct aac_dev *dev)
*/
static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
{
- int i, bus, target, container, rcode = 0;
+ int i;
u32 events = 0;
- struct fib *fib;
- struct scsi_device *sdev;
if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
events = SA_AIF_HOTPLUG;
@@ -1947,44 +2003,8 @@ static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
case SA_AIF_LDEV_CHANGE:
case SA_AIF_BPCFG_CHANGE:
- fib = aac_fib_alloc(dev);
- if (!fib) {
- pr_err("aac_handle_sa_aif: out of memory\n");
- return;
- }
- for (bus = 0; bus < AAC_MAX_BUSES; bus++)
- for (target = 0; target < AAC_MAX_TARGETS; target++)
- dev->hba_map[bus][target].new_devtype = 0;
-
- rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN);
-
- if (rcode != -ERESTARTSYS)
- aac_fib_free(fib);
-
- aac_resolve_luns(dev);
-
- if (events == SA_AIF_LDEV_CHANGE ||
- events == SA_AIF_BPCFG_CHANGE) {
- aac_get_containers(dev);
- for (container = 0; container <
- dev->maximum_num_containers; ++container) {
- sdev = scsi_device_lookup(dev->scsi_host_ptr,
- CONTAINER_CHANNEL,
- container, 0);
- if (dev->fsa_dev[container].valid && !sdev) {
- scsi_add_device(dev->scsi_host_ptr,
- CONTAINER_CHANNEL,
- container, 0);
- } else if (!dev->fsa_dev[container].valid &&
- sdev) {
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- } else if (sdev) {
- scsi_rescan_device(&sdev->sdev_gendev);
- scsi_device_put(sdev);
- }
- }
- }
+ aac_scan_host(dev);
+
break;
case SA_AIF_BPSTAT_CHANGE:
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index d55332de08f9..b3b931ab77eb 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -683,6 +683,9 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
u32 bus, cid;
int ret = FAILED;
+ if (aac_adapter_check_health(aac))
+ return ret;
+
bus = aac_logical_to_phys(scmd_channel(cmd));
cid = scmd_id(cmd);
if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
@@ -690,7 +693,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
struct aac_hba_tm_req *tmf;
int status;
u64 address;
- __le32 managed_request_id;
pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
AAC_DRIVERNAME,
@@ -703,8 +705,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
(fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
(fib->callback_data == cmd)) {
found = 1;
- managed_request_id = ((struct aac_hba_cmd_req *)
- fib->hw_fib_va)->request_id;
break;
}
}
@@ -1375,18 +1375,15 @@ static ssize_t aac_store_reset_adapter(struct device *device,
const char *buf, size_t count)
{
int retval = -EACCES;
- int bled = 0;
- struct aac_dev *aac;
-
if (!capable(CAP_SYS_ADMIN))
return retval;
- aac = (struct aac_dev *)class_to_shost(device)->hostdata;
- bled = buf[0] == '!' ? 1:0;
- retval = aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
+ retval = aac_reset_adapter(shost_priv(class_to_shost(device)),
+ buf[0] == '!', IOP_HWSOFT_RESET);
if (retval >= 0)
retval = count;
+
return retval;
}
@@ -1689,6 +1686,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&aac->fib_lock);
mutex_init(&aac->ioctl_mutex);
+ mutex_init(&aac->scan_mutex);
+
+ INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
/*
* Map in the registers from the adapter.
*/
@@ -1792,7 +1792,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = scsi_add_host(shost, &pdev->dev);
if (error)
goto out_deinit;
- scsi_scan_host(shost);
+
+ aac_scan_host(aac);
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
@@ -1877,6 +1878,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_block_requests(shost);
+ aac_cancel_safw_rescan_worker(aac);
aac_send_shutdown(aac);
aac_release_resources(aac);
@@ -1935,6 +1937,7 @@ static void aac_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+ aac_cancel_safw_rescan_worker(aac);
scsi_remove_host(shost);
__aac_shutdown(aac);
@@ -1992,6 +1995,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
aac->handle_pci_error = 1;
scsi_block_requests(aac->scsi_host_ptr);
+ aac_cancel_safw_rescan_worker(aac);
aac_flush_ios(aac);
aac_release_resources(aac);
@@ -2076,7 +2080,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING;
scsi_unblock_requests(aac->scsi_host_ptr);
- scsi_scan_host(aac->scsi_host_ptr);
+ aac_scan_host(aac);
pci_save_state(pdev);
dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 553922fed524..882f40353b96 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -329,6 +329,22 @@ int aac_sa_init(struct aac_dev *dev)
instance = dev->id;
name = dev->name;
+ /*
+ * Fill in the function dispatch table.
+ */
+
+ dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
+ dev->a_ops.adapter_notify = aac_sa_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_sa_check_health;
+ dev->a_ops.adapter_restart = aac_sa_restart_adapter;
+ dev->a_ops.adapter_start = aac_sa_start_adapter;
+ dev->a_ops.adapter_intr = aac_sa_intr;
+ dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
+ dev->a_ops.adapter_ioremap = aac_sa_ioremap;
+
if (aac_sa_ioremap(dev, dev->base_size)) {
printk(KERN_WARNING "%s: unable to map adapter.\n", name);
goto error_iounmap;
@@ -363,22 +379,6 @@ int aac_sa_init(struct aac_dev *dev)
}
/*
- * Fill in the function dispatch table.
- */
-
- dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
- dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
- dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
- dev->a_ops.adapter_notify = aac_sa_notify_adapter;
- dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
- dev->a_ops.adapter_check_health = aac_sa_check_health;
- dev->a_ops.adapter_restart = aac_sa_restart_adapter;
- dev->a_ops.adapter_start = aac_sa_start_adapter;
- dev->a_ops.adapter_intr = aac_sa_intr;
- dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
- dev->a_ops.adapter_ioremap = aac_sa_ioremap;
-
- /*
* First clear out all interrupts. Then enable the one's that
* we can handle.
*/
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index a254b32eba39..f375f3557c18 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -45,52 +45,57 @@
#include <linux/interrupt.h>
struct device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
-#ifdef CONFIG_XEN
- #define ARCMSR_MAX_FREECCB_NUM 160
-#define ARCMSR_MAX_OUTSTANDING_CMD 155
-#else
- #define ARCMSR_MAX_FREECCB_NUM 320
-#define ARCMSR_MAX_OUTSTANDING_CMD 255
-#endif
-#define ARCMSR_DRIVER_VERSION "v1.30.00.22-20151126"
-#define ARCMSR_SCSI_INITIATOR_ID 255
-#define ARCMSR_MAX_XFER_SECTORS 512
-#define ARCMSR_MAX_XFER_SECTORS_B 4096
-#define ARCMSR_MAX_XFER_SECTORS_C 304
-#define ARCMSR_MAX_TARGETID 17
-#define ARCMSR_MAX_TARGETLUN 8
-#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
-#define ARCMSR_MAX_QBUFFER 4096
-#define ARCMSR_DEFAULT_SG_ENTRIES 38
-#define ARCMSR_MAX_HBB_POSTQUEUE 264
+#define ARCMSR_MAX_FREECCB_NUM 1024
+#define ARCMSR_MAX_OUTSTANDING_CMD 1024
+#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
+#define ARCMSR_MIN_OUTSTANDING_CMD 32
+#define ARCMSR_DRIVER_VERSION "v1.40.00.04-20171130"
+#define ARCMSR_SCSI_INITIATOR_ID 255
+#define ARCMSR_MAX_XFER_SECTORS 512
+#define ARCMSR_MAX_XFER_SECTORS_B 4096
+#define ARCMSR_MAX_XFER_SECTORS_C 304
+#define ARCMSR_MAX_TARGETID 17
+#define ARCMSR_MAX_TARGETLUN 8
+#define ARCMSR_MAX_CMD_PERLUN 128
+#define ARCMSR_DEFAULT_CMD_PERLUN 32
+#define ARCMSR_MIN_CMD_PERLUN 1
+#define ARCMSR_MAX_QBUFFER 4096
+#define ARCMSR_DEFAULT_SG_ENTRIES 38
+#define ARCMSR_MAX_HBB_POSTQUEUE 264
#define ARCMSR_MAX_ARC1214_POSTQUEUE 256
#define ARCMSR_MAX_ARC1214_DONEQUEUE 257
-#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
-#define ARCMSR_CDB_SG_PAGE_LENGTH 256
+#define ARCMSR_MAX_HBE_DONEQUEUE 512
+#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
+#define ARCMSR_CDB_SG_PAGE_LENGTH 256
#define ARCMST_NUM_MSIX_VECTORS 4
#ifndef PCI_DEVICE_ID_ARECA_1880
-#define PCI_DEVICE_ID_ARECA_1880 0x1880
- #endif
+#define PCI_DEVICE_ID_ARECA_1880 0x1880
+#endif
#ifndef PCI_DEVICE_ID_ARECA_1214
- #define PCI_DEVICE_ID_ARECA_1214 0x1214
+#define PCI_DEVICE_ID_ARECA_1214 0x1214
#endif
#ifndef PCI_DEVICE_ID_ARECA_1203
- #define PCI_DEVICE_ID_ARECA_1203 0x1203
+#define PCI_DEVICE_ID_ARECA_1203 0x1203
#endif
+#ifndef PCI_DEVICE_ID_ARECA_1884
+#define PCI_DEVICE_ID_ARECA_1884 0x1884
+#endif
+#define ARCMSR_HOURS (1000 * 60 * 60 * 4)
+#define ARCMSR_MINUTES (1000 * 60 * 60)
/*
**********************************************************************************
**
**********************************************************************************
*/
-#define ARC_SUCCESS 0
-#define ARC_FAILURE 1
+#define ARC_SUCCESS 0
+#define ARC_FAILURE 1
/*
*******************************************************************************
** split 64bits dma addressing
*******************************************************************************
*/
-#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
-#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
+#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
+#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
/*
*******************************************************************************
** MESSAGE CONTROL CODE
@@ -130,7 +135,7 @@ struct CMD_MESSAGE_FIELD
#define FUNCTION_SAY_HELLO 0x0807
#define FUNCTION_SAY_GOODBYE 0x0808
#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
-#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
+#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
#define FUNCTION_HARDWARE_RESET 0x080B
/* ARECA IO CONTROL CODE*/
#define ARCMSR_MESSAGE_READ_RQBUFFER \
@@ -161,18 +166,18 @@ struct CMD_MESSAGE_FIELD
** structure for holding DMA address data
*************************************************************
*/
-#define IS_DMA64 (sizeof(dma_addr_t) == 8)
-#define IS_SG64_ADDR 0x01000000 /* bit24 */
+#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+#define IS_SG64_ADDR 0x01000000 /* bit24 */
struct SG32ENTRY
{
- __le32 length;
- __le32 address;
+ __le32 length;
+ __le32 address;
}__attribute__ ((packed));
struct SG64ENTRY
{
- __le32 length;
- __le32 address;
- __le32 addresshigh;
+ __le32 length;
+ __le32 address;
+ __le32 addresshigh;
}__attribute__ ((packed));
/*
********************************************************************
@@ -191,50 +196,50 @@ struct QBUFFER
*/
struct FIRMWARE_INFO
{
- uint32_t signature; /*0, 00-03*/
- uint32_t request_len; /*1, 04-07*/
- uint32_t numbers_queue; /*2, 08-11*/
- uint32_t sdram_size; /*3, 12-15*/
- uint32_t ide_channels; /*4, 16-19*/
- char vendor[40]; /*5, 20-59*/
- char model[8]; /*15, 60-67*/
- char firmware_ver[16]; /*17, 68-83*/
- char device_map[16]; /*21, 84-99*/
- uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
- uint8_t cfgSerial[16]; /*26,104-119*/
- uint32_t cfgPicStatus; /*30,120-123*/
+ uint32_t signature; /*0, 00-03*/
+ uint32_t request_len; /*1, 04-07*/
+ uint32_t numbers_queue; /*2, 08-11*/
+ uint32_t sdram_size; /*3, 12-15*/
+ uint32_t ide_channels; /*4, 16-19*/
+ char vendor[40]; /*5, 20-59*/
+ char model[8]; /*15, 60-67*/
+ char firmware_ver[16]; /*17, 68-83*/
+ char device_map[16]; /*21, 84-99*/
+ uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
+ uint8_t cfgSerial[16]; /*26,104-119*/
+ uint32_t cfgPicStatus; /*30,120-123*/
};
/* signature of set and get firmware config */
-#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
-#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
+#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
+#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
/* message code of inbound message register */
-#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
-#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
-#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
-#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
-#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
-#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
-#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
-#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
-#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
+#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
+#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
+#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
+#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
+#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
+#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
+#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
+#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
+#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
/* doorbell interrupt generator */
-#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
-#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
-#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
-#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
+#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
+#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
+#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
+#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
/* ccb areca cdb flag */
-#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
-#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
-#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
-#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
-#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
+#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
+#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
+#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
/* outbound firmware ok */
-#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
+#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
/* ARC-1680 Bus Reset*/
-#define ARCMSR_ARC1680_BUS_RESET 0x00000003
+#define ARCMSR_ARC1680_BUS_RESET 0x00000003
/* ARC-1880 Bus Reset*/
-#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
-#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
+#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
+#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
/*
************************************************************************
@@ -277,9 +282,10 @@ struct FIRMWARE_INFO
#define ARCMSR_MESSAGE_FLUSH_CACHE 0x00050008
/* (ARCMSR_INBOUND_MESG0_START_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
#define ARCMSR_MESSAGE_START_BGRB 0x00060008
+#define ARCMSR_MESSAGE_SYNC_TIMER 0x00080008
#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
-#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
+#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
/* ioctl transfer */
@@ -288,7 +294,7 @@ struct FIRMWARE_INFO
#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
-#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
+#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
/* data tunnel buffer between user space program and its firmware */
/* user space data to iop 128bytes */
@@ -313,12 +319,12 @@ struct FIRMWARE_INFO
#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK 0x00000008 /* When clear, the Outbound Post List FIFO Not Empty interrupt routes to the host.*/
#define ARCMSR_HBCMU_ALL_INTMASKENABLE 0x0000000D /* disable all ISR */
/* Host Interrupt Status */
-#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
+#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
/*
** Set when the Utility_A Interrupt bit is set in the Outbound Doorbell Register.
** It clears by writing a 1 to the Utility_A bit in the Outbound Doorbell Clear Register or through automatic clearing (if enabled).
*/
-#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
+#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
/*
** Set if Outbound Doorbell register bits 30:1 have a non-zero
** value. This bit clears only when Outbound Doorbell bits
@@ -331,7 +337,7 @@ struct FIRMWARE_INFO
** Register (FIFO) is not empty. It clears when the Outbound
** Post List FIFO is empty.
*/
-#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
+#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
/*
** This bit indicates a SAS interrupt from a source external to
** the PCIe core. This bit is not maskable.
@@ -340,17 +346,17 @@ struct FIRMWARE_INFO
#define ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK 0x00000002
#define ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK 0x00000004
/*inbound message 0 ready*/
-#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
+#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
/*more than 12 request completed in a time*/
#define ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING 0x00000010
#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 0x00000002
/*outbound DATA WRITE isr door bell clear*/
-#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
+#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 0x00000004
/*outbound DATA READ isr door bell clear*/
-#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
+#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
/*outbound message 0 ready*/
-#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
+#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
/*outbound message cmd isr door bell clear*/
#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008
/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
@@ -407,18 +413,43 @@ struct FIRMWARE_INFO
#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001
/*
*******************************************************************************
+** SPEC. for Areca Type E adapter
+*******************************************************************************
+*/
+#define ARCMSR_SIGNATURE_1884 0x188417D3
+
+#define ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK 0x00000002
+#define ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK 0x00000004
+#define ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
+
+#define ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK 0x00000002
+#define ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK 0x00000004
+#define ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
+
+#define ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK 0x80000000
+
+#define ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR 0x00000001
+#define ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR 0x00000008
+#define ARCMSR_HBEMU_ALL_INTMASKENABLE 0x00000009
+
+/* ARC-1884 doorbell sync */
+#define ARCMSR_HBEMU_DOORBELL_SYNC 0x100
+#define ARCMSR_ARC188X_RESET_ADAPTER 0x00000004
+#define ARCMSR_ARC1884_DiagWrite_ENABLE 0x00000080
+/*
+*******************************************************************************
** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
*******************************************************************************
*/
struct ARCMSR_CDB
{
- uint8_t Bus;
- uint8_t TargetID;
- uint8_t LUN;
- uint8_t Function;
- uint8_t CdbLength;
- uint8_t sgcount;
- uint8_t Flags;
+ uint8_t Bus;
+ uint8_t TargetID;
+ uint8_t LUN;
+ uint8_t Function;
+ uint8_t CdbLength;
+ uint8_t sgcount;
+ uint8_t Flags;
#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01
#define ARCMSR_CDB_FLAG_BIOS 0x02
#define ARCMSR_CDB_FLAG_WRITE 0x04
@@ -426,21 +457,21 @@ struct ARCMSR_CDB
#define ARCMSR_CDB_FLAG_HEADQ 0x08
#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
- uint8_t msgPages;
- uint32_t msgContext;
- uint32_t DataLength;
- uint8_t Cdb[16];
- uint8_t DeviceStatus;
+ uint8_t msgPages;
+ uint32_t msgContext;
+ uint32_t DataLength;
+ uint8_t Cdb[16];
+ uint8_t DeviceStatus;
#define ARCMSR_DEV_CHECK_CONDITION 0x02
#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
#define ARCMSR_DEV_ABORTED 0xF1
#define ARCMSR_DEV_INIT_FAIL 0xF2
- uint8_t SenseData[15];
+ uint8_t SenseData[15];
union
{
- struct SG32ENTRY sg32entry[1];
- struct SG64ENTRY sg64entry[1];
+ struct SG32ENTRY sg32entry[1];
+ struct SG64ENTRY sg64entry[1];
} u;
};
/*
@@ -480,13 +511,13 @@ struct MessageUnit_B
uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
uint32_t postq_index;
uint32_t doneq_index;
- uint32_t __iomem *drv2iop_doorbell;
- uint32_t __iomem *drv2iop_doorbell_mask;
- uint32_t __iomem *iop2drv_doorbell;
- uint32_t __iomem *iop2drv_doorbell_mask;
- uint32_t __iomem *message_rwbuffer;
- uint32_t __iomem *message_wbuffer;
- uint32_t __iomem *message_rbuffer;
+ uint32_t __iomem *drv2iop_doorbell;
+ uint32_t __iomem *drv2iop_doorbell_mask;
+ uint32_t __iomem *iop2drv_doorbell;
+ uint32_t __iomem *iop2drv_doorbell_mask;
+ uint32_t __iomem *message_rwbuffer;
+ uint32_t __iomem *message_wbuffer;
+ uint32_t __iomem *message_rbuffer;
};
/*
*********************************************************************
@@ -506,7 +537,7 @@ struct MessageUnit_C{
uint32_t diagnostic_rw_data; /*0024 0027*/
uint32_t diagnostic_rw_address_low; /*0028 002B*/
uint32_t diagnostic_rw_address_high; /*002C 002F*/
- uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_status; /*0030 0033*/
uint32_t host_int_mask; /*0034 0037*/
uint32_t dcr_data; /*0038 003B*/
uint32_t dcr_address; /*003C 003F*/
@@ -518,12 +549,12 @@ struct MessageUnit_C{
uint32_t iop_int_mask; /*0054 0057*/
uint32_t iop_inbound_queue_port; /*0058 005B*/
uint32_t iop_outbound_queue_port; /*005C 005F*/
- uint32_t inbound_free_list_index; /*0060 0063*/
- uint32_t inbound_post_list_index; /*0064 0067*/
- uint32_t outbound_free_list_index; /*0068 006B*/
- uint32_t outbound_post_list_index; /*006C 006F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t outbound_free_list_index; /*0068 006B*/
+ uint32_t outbound_post_list_index; /*006C 006F*/
uint32_t inbound_doorbell_clear; /*0070 0073*/
- uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
uint32_t last_used_message_source_address_low; /*0078 007B*/
uint32_t last_used_message_source_address_high; /*007C 007F*/
uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
@@ -531,7 +562,7 @@ struct MessageUnit_C{
uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
uint32_t utility_A_int_counter_timer; /*0098 009B*/
uint32_t outbound_doorbell; /*009C 009F*/
- uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
uint32_t message_source_address_index; /*00A4 00A7*/
uint32_t message_done_queue_index; /*00A8 00AB*/
uint32_t reserved0; /*00AC 00AF*/
@@ -553,10 +584,10 @@ struct MessageUnit_C{
uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
- uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
uint32_t write_sequence; /*00FC 00FF*/
uint32_t reserved1[34]; /*0100 0187*/
- uint32_t reserved2[1950]; /*0188 1FFF*/
+ uint32_t reserved2[1950]; /*0188 1FFF*/
uint32_t message_wbuffer[32]; /*2000 207F*/
uint32_t reserved3[32]; /*2080 20FF*/
uint32_t message_rbuffer[32]; /*2100 217F*/
@@ -614,115 +645,208 @@ struct MessageUnit_D {
u32 __iomem *msgcode_rwbuffer; /* 0x2200 */
};
/*
+*********************************************************************
+** Messaging Unit (MU) of Type E processor(LSI)
+*********************************************************************
+*/
+struct MessageUnit_E{
+ uint32_t iobound_doorbell; /*0000 0003*/
+ uint32_t write_sequence_3xxx; /*0004 0007*/
+ uint32_t host_diagnostic_3xxx; /*0008 000B*/
+ uint32_t posted_outbound_doorbell; /*000C 000F*/
+ uint32_t master_error_attribute; /*0010 0013*/
+ uint32_t master_error_address_low; /*0014 0017*/
+ uint32_t master_error_address_high; /*0018 001B*/
+ uint32_t hcb_size; /*001C 001F*/
+ uint32_t inbound_doorbell; /*0020 0023*/
+ uint32_t diagnostic_rw_data; /*0024 0027*/
+ uint32_t diagnostic_rw_address_low; /*0028 002B*/
+ uint32_t diagnostic_rw_address_high; /*002C 002F*/
+ uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_mask; /*0034 0037*/
+ uint32_t dcr_data; /*0038 003B*/
+ uint32_t dcr_address; /*003C 003F*/
+ uint32_t inbound_queueport; /*0040 0043*/
+ uint32_t outbound_queueport; /*0044 0047*/
+ uint32_t hcb_pci_address_low; /*0048 004B*/
+ uint32_t hcb_pci_address_high; /*004C 004F*/
+ uint32_t iop_int_status; /*0050 0053*/
+ uint32_t iop_int_mask; /*0054 0057*/
+ uint32_t iop_inbound_queue_port; /*0058 005B*/
+ uint32_t iop_outbound_queue_port; /*005C 005F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t reply_post_producer_index; /*0068 006B*/
+ uint32_t reply_post_consumer_index; /*006C 006F*/
+ uint32_t inbound_doorbell_clear; /*0070 0073*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t last_used_message_source_address_low; /*0078 007B*/
+ uint32_t last_used_message_source_address_high; /*007C 007F*/
+ uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
+ uint32_t message_dest_address_index; /*0090 0093*/
+ uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
+ uint32_t utility_A_int_counter_timer; /*0098 009B*/
+ uint32_t outbound_doorbell; /*009C 009F*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t message_source_address_index; /*00A4 00A7*/
+ uint32_t message_done_queue_index; /*00A8 00AB*/
+ uint32_t reserved0; /*00AC 00AF*/
+ uint32_t inbound_msgaddr0; /*00B0 00B3*/
+ uint32_t inbound_msgaddr1; /*00B4 00B7*/
+ uint32_t outbound_msgaddr0; /*00B8 00BB*/
+ uint32_t outbound_msgaddr1; /*00BC 00BF*/
+ uint32_t inbound_queueport_low; /*00C0 00C3*/
+ uint32_t inbound_queueport_high; /*00C4 00C7*/
+ uint32_t outbound_queueport_low; /*00C8 00CB*/
+ uint32_t outbound_queueport_high; /*00CC 00CF*/
+ uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/
+ uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/
+ uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/
+ uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/
+ uint32_t message_dest_queue_port_low; /*00E0 00E3*/
+ uint32_t message_dest_queue_port_high; /*00E4 00E7*/
+ uint32_t last_used_message_dest_address_low; /*00E8 00EB*/
+ uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
+ uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
+ uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t write_sequence; /*00FC 00FF*/
+ uint32_t reserved1[34]; /*0100 0187*/
+ uint32_t reserved2[1950]; /*0188 1FFF*/
+ uint32_t message_wbuffer[32]; /*2000 207F*/
+ uint32_t reserved3[32]; /*2080 20FF*/
+ uint32_t message_rbuffer[32]; /*2100 217F*/
+ uint32_t reserved4[32]; /*2180 21FF*/
+ uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
+};
+
+typedef struct deliver_completeQ {
+ uint16_t cmdFlag;
+ uint16_t cmdSMID;
+ uint16_t cmdLMID; // reserved (0)
+ uint16_t cmdFlag2; // reserved (0)
+} DeliverQ, CompletionQ, *pDeliver_Q, *pCompletion_Q;
+/*
*******************************************************************************
** Adapter Control Block
*******************************************************************************
*/
struct AdapterControlBlock
{
- uint32_t adapter_type; /* adapter A,B..... */
- #define ACB_ADAPTER_TYPE_A 0x00000001 /* hba I IOP */
- #define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */
- #define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc P IOP */
- #define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd A IOP */
- u32 roundup_ccbsize;
- struct pci_dev * pdev;
- struct Scsi_Host * host;
- unsigned long vir2phy_offset;
+ uint32_t adapter_type; /* adapter A,B..... */
+#define ACB_ADAPTER_TYPE_A 0x00000000 /* hba I IOP */
+#define ACB_ADAPTER_TYPE_B 0x00000001 /* hbb M IOP */
+#define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */
+#define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */
+#define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */
+ u32 roundup_ccbsize;
+ struct pci_dev * pdev;
+ struct Scsi_Host * host;
+ unsigned long vir2phy_offset;
/* Offset is used in making arc cdb physical to virtual calculations */
- uint32_t outbound_int_enable;
- uint32_t cdb_phyaddr_hi32;
- uint32_t reg_mu_acc_handle0;
- spinlock_t eh_lock;
- spinlock_t ccblist_lock;
- spinlock_t postq_lock;
- spinlock_t doneq_lock;
- spinlock_t rqbuffer_lock;
- spinlock_t wqbuffer_lock;
+ uint32_t outbound_int_enable;
+ uint32_t cdb_phyaddr_hi32;
+ uint32_t reg_mu_acc_handle0;
+ spinlock_t eh_lock;
+ spinlock_t ccblist_lock;
+ spinlock_t postq_lock;
+ spinlock_t doneq_lock;
+ spinlock_t rqbuffer_lock;
+ spinlock_t wqbuffer_lock;
union {
struct MessageUnit_A __iomem *pmuA;
struct MessageUnit_B *pmuB;
struct MessageUnit_C __iomem *pmuC;
struct MessageUnit_D *pmuD;
+ struct MessageUnit_E __iomem *pmuE;
};
/* message unit ATU inbound base address0 */
- void __iomem *mem_base0;
- void __iomem *mem_base1;
- uint32_t acb_flags;
+ void __iomem *mem_base0;
+ void __iomem *mem_base1;
+ uint32_t acb_flags;
u16 dev_id;
- uint8_t adapter_index;
- #define ACB_F_SCSISTOPADAPTER 0x0001
- #define ACB_F_MSG_STOP_BGRB 0x0002
- /* stop RAID background rebuild */
- #define ACB_F_MSG_START_BGRB 0x0004
- /* stop RAID background rebuild */
- #define ACB_F_IOPDATA_OVERFLOW 0x0008
- /* iop message data rqbuffer overflow */
- #define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
- /* message clear wqbuffer */
- #define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
- /* message clear rqbuffer */
- #define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
- #define ACB_F_BUS_RESET 0x0080
- #define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
+ uint8_t adapter_index;
+#define ACB_F_SCSISTOPADAPTER 0x0001
+#define ACB_F_MSG_STOP_BGRB 0x0002
+/* stop RAID background rebuild */
+#define ACB_F_MSG_START_BGRB 0x0004
+/* stop RAID background rebuild */
+#define ACB_F_IOPDATA_OVERFLOW 0x0008
+/* iop message data rqbuffer overflow */
+#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
+/* message clear wqbuffer */
+#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
+/* message clear rqbuffer */
+#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
+#define ACB_F_BUS_RESET 0x0080
+#define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
- #define ACB_F_IOP_INITED 0x0100
- /* iop init */
- #define ACB_F_ABORT 0x0200
- #define ACB_F_FIRMWARE_TRAP 0x0400
- struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
+#define ACB_F_IOP_INITED 0x0100
+/* iop init */
+#define ACB_F_ABORT 0x0200
+#define ACB_F_FIRMWARE_TRAP 0x0400
+#define ACB_F_MSG_GET_CONFIG 0x1000
+ struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
/* used for memory free */
- struct list_head ccb_free_list;
+ struct list_head ccb_free_list;
/* head of free ccb list */
- atomic_t ccboutstandingcount;
+ atomic_t ccboutstandingcount;
/*The present outstanding command number that in the IOP that
waiting for being handled by FW*/
- void * dma_coherent;
+ void * dma_coherent;
/* dma_coherent used for memory free */
- dma_addr_t dma_coherent_handle;
+ dma_addr_t dma_coherent_handle;
/* dma_coherent_handle used for memory free */
- dma_addr_t dma_coherent_handle2;
- void *dma_coherent2;
- unsigned int uncache_size;
- uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
+ dma_addr_t dma_coherent_handle2;
+ void *dma_coherent2;
+ unsigned int uncache_size;
+ uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
/* data collection buffer for read from 80331 */
- int32_t rqbuf_getIndex;
+ int32_t rqbuf_getIndex;
/* first of read buffer */
- int32_t rqbuf_putIndex;
+ int32_t rqbuf_putIndex;
/* last of read buffer */
- uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
+ uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
/* data collection buffer for write to 80331 */
- int32_t wqbuf_getIndex;
+ int32_t wqbuf_getIndex;
/* first of write buffer */
- int32_t wqbuf_putIndex;
+ int32_t wqbuf_putIndex;
/* last of write buffer */
- uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
+ uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
/* id0 ..... id15, lun0...lun7 */
-#define ARECA_RAID_GONE 0x55
-#define ARECA_RAID_GOOD 0xaa
- uint32_t num_resets;
- uint32_t num_aborts;
- uint32_t signature;
- uint32_t firm_request_len;
- uint32_t firm_numbers_queue;
- uint32_t firm_sdram_size;
- uint32_t firm_hd_channels;
- uint32_t firm_cfg_version;
+#define ARECA_RAID_GONE 0x55
+#define ARECA_RAID_GOOD 0xaa
+ uint32_t num_resets;
+ uint32_t num_aborts;
+ uint32_t signature;
+ uint32_t firm_request_len;
+ uint32_t firm_numbers_queue;
+ uint32_t firm_sdram_size;
+ uint32_t firm_hd_channels;
+ uint32_t firm_cfg_version;
char firm_model[12];
char firm_version[20];
char device_map[20]; /*21,84-99*/
- struct work_struct arcmsr_do_message_isr_bh;
- struct timer_list eternal_timer;
+ struct work_struct arcmsr_do_message_isr_bh;
+ struct timer_list eternal_timer;
unsigned short fw_flag;
- #define FW_NORMAL 0x0000
- #define FW_BOG 0x0001
- #define FW_DEADLOCK 0x0010
- atomic_t rq_map_token;
- atomic_t ante_token_value;
- uint32_t maxOutstanding;
- int vector_count;
+#define FW_NORMAL 0x0000
+#define FW_BOG 0x0001
+#define FW_DEADLOCK 0x0010
+ atomic_t rq_map_token;
+ atomic_t ante_token_value;
+ uint32_t maxOutstanding;
+ int vector_count;
+ uint32_t maxFreeCCB;
+ struct timer_list refresh_timer;
+ uint32_t doneq_index;
+ uint32_t ccbsize;
+ uint32_t in_doorbell;
+ uint32_t out_doorbell;
+ uint32_t completionQ_entry;
+ pCompletion_Q pCompletionQ;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
@@ -732,29 +856,30 @@ struct AdapterControlBlock
*/
struct CommandControlBlock{
/*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
- struct list_head list; /*x32: 8byte, x64: 16byte*/
- struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
- struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
- uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
- uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
- uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
- #define CCB_FLAG_READ 0x0000
- #define CCB_FLAG_WRITE 0x0001
- #define CCB_FLAG_ERROR 0x0002
- #define CCB_FLAG_FLUSHCACHE 0x0004
- #define CCB_FLAG_MASTER_ABORTED 0x0008
- uint16_t startdone; /*x32:2byte,x32:2byte*/
- #define ARCMSR_CCB_DONE 0x0000
- #define ARCMSR_CCB_START 0x55AA
- #define ARCMSR_CCB_ABORTED 0xAA55
- #define ARCMSR_CCB_ILLEGAL 0xFFFF
- #if BITS_PER_LONG == 64
+ struct list_head list; /*x32: 8byte, x64: 16byte*/
+ struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
+ struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
+ uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
+ uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
+ uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
+#define CCB_FLAG_READ 0x0000
+#define CCB_FLAG_WRITE 0x0001
+#define CCB_FLAG_ERROR 0x0002
+#define CCB_FLAG_FLUSHCACHE 0x0004
+#define CCB_FLAG_MASTER_ABORTED 0x0008
+ uint16_t startdone; /*x32:2byte,x32:2byte*/
+#define ARCMSR_CCB_DONE 0x0000
+#define ARCMSR_CCB_START 0x55AA
+#define ARCMSR_CCB_ABORTED 0xAA55
+#define ARCMSR_CCB_ILLEGAL 0xFFFF
+ uint32_t smid;
+#if BITS_PER_LONG == 64
/* ======================512+64 bytes======================== */
- uint32_t reserved[5]; /*24 byte*/
- #else
+ uint32_t reserved[4]; /*16 byte*/
+#else
/* ======================512+32 bytes======================== */
- uint32_t reserved; /*8 byte*/
- #endif
+ // uint32_t reserved; /*4 byte*/
+#endif
/* ======================================================= */
struct ARCMSR_CDB arcmsr_cdb;
};
@@ -788,13 +913,13 @@ struct SENSE_DATA
** Outbound Interrupt Status Register - OISR
*******************************************************************************
*/
-#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
-#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
-#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
-#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
-#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
-#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
-#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
+#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
+#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
+#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
+#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
+#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
+#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
+#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
(ARCMSR_MU_OUTBOUND_MESSAGE0_INT \
|ARCMSR_MU_OUTBOUND_MESSAGE1_INT \
|ARCMSR_MU_OUTBOUND_DOORBELL_INT \
@@ -805,13 +930,13 @@ struct SENSE_DATA
** Outbound Interrupt Mask Register - OIMR
*******************************************************************************
*/
-#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
-#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
-#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
-#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
-#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
-#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
-#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
+#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
+#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
+#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
+#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
+#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
+#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
+#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *);
extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 21f6421536a0..75e828bd30e3 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -75,6 +75,26 @@ MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
+static int msix_enable = 1;
+module_param(msix_enable, int, S_IRUGO);
+MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
+
+static int msi_enable = 1;
+module_param(msi_enable, int, S_IRUGO);
+MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
+
+static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
+module_param(host_can_queue, int, S_IRUGO);
+MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");
+
+static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
+module_param(cmd_per_lun, int, S_IRUGO);
+MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
+
+static int set_date_time = 0;
+module_param(set_date_time, int, S_IRUGO);
+MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
+
#define ARCMSR_SLEEPTIME 10
#define ARCMSR_RETRYCOUNT 12
@@ -102,19 +122,19 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_request_device_map(struct timer_list *t);
-static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_message_isr_bh_fn(struct work_struct *work);
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
+static void arcmsr_set_iop_datetime(struct timer_list *);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -127,15 +147,15 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.name = "Areca SAS/SATA RAID driver",
.info = arcmsr_info,
.queuecommand = arcmsr_queue_command,
- .eh_abort_handler = arcmsr_abort,
+ .eh_abort_handler = arcmsr_abort,
.eh_bus_reset_handler = arcmsr_bus_reset,
.bios_param = arcmsr_bios_param,
.change_queue_depth = arcmsr_adjust_disk_queue_depth,
- .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
- .this_id = ARCMSR_SCSI_INITIATOR_ID,
- .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
- .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
- .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
+ .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
+ .this_id = ARCMSR_SCSI_INITIATOR_ID,
+ .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
+ .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
+ .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = arcmsr_host_attrs,
.no_write_same = 1,
@@ -184,13 +204,15 @@ static struct pci_device_id arcmsr_device_id_table[] = {
.driver_data = ACB_ADAPTER_TYPE_A},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
.driver_data = ACB_ADAPTER_TYPE_C},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
+ .driver_data = ACB_ADAPTER_TYPE_E},
{0, 0}, /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
static struct pci_driver arcmsr_pci_driver = {
.name = "arcmsr",
- .id_table = arcmsr_device_id_table,
+ .id_table = arcmsr_device_id_table,
.probe = arcmsr_probe,
.remove = arcmsr_remove,
.suspend = arcmsr_suspend,
@@ -206,7 +228,8 @@ static void arcmsr_free_mu(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_B:
- case ACB_ADAPTER_TYPE_D: {
+ case ACB_ADAPTER_TYPE_D:
+ case ACB_ADAPTER_TYPE_E: {
dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
acb->dma_coherent2, acb->dma_coherent_handle2);
break;
@@ -271,6 +294,20 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
acb->mem_base0 = mem_base0;
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ acb->pmuE = ioremap(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (!acb->pmuE) {
+ pr_notice("arcmsr%d: memory mapping region fail \n",
+ acb->host->host_no);
+ return false;
+ }
+ writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
+ writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */
+ acb->in_doorbell = 0;
+ acb->out_doorbell = 0;
+ break;
+ }
}
return true;
}
@@ -295,6 +332,9 @@ static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
iounmap(acb->mem_base0);
break;
+ case ACB_ADAPTER_TYPE_E:
+ iounmap(acb->pmuE);
+ break;
}
}
@@ -408,6 +448,24 @@ static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
return false;
}
+static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
+{
+ int i;
+ uint32_t read_doorbell;
+ struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;
+
+ for (i = 0; i < 2000; i++) {
+ read_doorbell = readl(&phbcmu->iobound_doorbell);
+ if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(0, &phbcmu->host_int_status); /*clear interrupt*/
+ pACB->in_doorbell = read_doorbell;
+ return true;
+ }
+ msleep(10);
+ } /* max 20 seconds */
+ return false;
+}
+
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -475,6 +533,24 @@ static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
} while (retry_count != 0);
}
+static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
+{
+ int retry_count = 30;
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ do {
+ if (arcmsr_hbaE_wait_msgint_ready(pACB))
+ break;
+ retry_count--;
+ pr_notice("arcmsr%d: wait 'flush adapter "
+ "cache' timeout, retry count down = %d\n",
+ pACB->host->host_no, retry_count);
+ } while (retry_count != 0);
+}
+
static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -495,6 +571,9 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_flush_cache(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_flush_cache(acb);
+ break;
}
}
@@ -577,6 +656,23 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ uint32_t completeQ_size;
+ completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
+ acb->roundup_ccbsize = roundup(completeQ_size, 32);
+ dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+ &dma_coherent_handle, GFP_KERNEL);
+ if (!dma_coherent){
+ pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
+ return false;
+ }
+ acb->dma_coherent_handle2 = dma_coherent_handle;
+ acb->dma_coherent2 = dma_coherent;
+ acb->pCompletionQ = dma_coherent;
+ acb->completionQ_entry = acb->roundup_ccbsize / sizeof(struct deliver_completeQ);
+ acb->doneq_index = 0;
+ }
+ break;
default:
break;
}
@@ -610,7 +706,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->host->max_sectors = max_xfer_len/512;
acb->host->sg_tablesize = max_sg_entrys;
roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
- acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
+ acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
if(!dma_coherent){
printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
@@ -619,9 +715,10 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->dma_coherent = dma_coherent;
acb->dma_coherent_handle = dma_coherent_handle;
memset(dma_coherent, 0, acb->uncache_size);
+ acb->ccbsize = roundup_ccbsize;
ccb_tmp = dma_coherent;
acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
- for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
+ for(i = 0; i < acb->maxFreeCCB; i++){
cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
@@ -630,11 +727,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_C:
case ACB_ADAPTER_TYPE_D:
+ case ACB_ADAPTER_TYPE_E:
ccb_tmp->cdb_phyaddr = cdb_phyaddr;
break;
}
acb->pccb_pool[i] = ccb_tmp;
ccb_tmp->acb = acb;
+ ccb_tmp->smid = (u32)i << 16;
INIT_LIST_HEAD(&ccb_tmp->list);
list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
@@ -654,6 +753,7 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
struct scsi_device *psdev;
char diff, temp;
+ acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -683,6 +783,13 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+ break;
+ }
}
atomic_inc(&acb->rq_map_token);
if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
@@ -723,17 +830,26 @@ arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
unsigned long flags;
int nvec, i;
+ if (msix_enable == 0)
+ goto msi_int0;
nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
PCI_IRQ_MSIX);
if (nvec > 0) {
pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
flags = 0;
} else {
- nvec = pci_alloc_irq_vectors(pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+msi_int0:
+ if (msi_enable == 1) {
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (nvec == 1) {
+ dev_info(&pdev->dev, "msi enabled\n");
+ goto msi_int1;
+ }
+ }
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
if (nvec < 1)
return FAILED;
-
+msi_int1:
flags = IRQF_SHARED;
}
@@ -755,6 +871,24 @@ out_free_irq:
return FAILED;
}
+static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
+{
+ INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
+ atomic_set(&pacb->rq_map_token, 16);
+ atomic_set(&pacb->ante_token_value, 16);
+ pacb->fw_flag = FW_NORMAL;
+ timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
+ pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
+ add_timer(&pacb->eternal_timer);
+}
+
+static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
+{
+ timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
+ pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
+ add_timer(&pacb->refresh_timer);
+}
+
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
@@ -789,8 +923,12 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_lun = ARCMSR_MAX_TARGETLUN;
host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
- host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD;
- host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
+ if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
+ host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
+ host->can_queue = host_can_queue; /* max simultaneous cmds */
+ if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
+ cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
+ host->cmd_per_lun = cmd_per_lun;
host->this_id = ARCMSR_SCSI_INITIATOR_ID;
host->unique_id = (bus << 8) | dev_fun;
pci_set_drvdata(pdev, host);
@@ -833,18 +971,16 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto scsi_host_remove;
arcmsr_iop_init(acb);
- INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- add_timer(&acb->eternal_timer);
+ arcmsr_init_get_devmap_timer(acb);
+ if (set_date_time)
+ arcmsr_init_set_datetime_timer(acb);
if(arcmsr_alloc_sysfs_attr(acb))
goto out_free_sysfs;
scsi_scan_host(host);
return 0;
out_free_sysfs:
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
del_timer_sync(&acb->eternal_timer);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
@@ -887,6 +1023,8 @@ static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
@@ -924,13 +1062,9 @@ static int arcmsr_resume(struct pci_dev *pdev)
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto controller_stop;
arcmsr_iop_init(acb);
- INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- add_timer(&acb->eternal_timer);
+ arcmsr_init_get_devmap_timer(acb);
+ if (set_date_time)
+ arcmsr_init_set_datetime_timer(acb);
return 0;
controller_stop:
arcmsr_stop_adapter_bgrb(acb);
@@ -998,6 +1132,21 @@ static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
return true;
}
+static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'abort all outstanding "
+ "command' timeout\n", pACB->host->host_no);
+ return false;
+ }
+ return true;
+}
+
static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
{
uint8_t rtnval = 0;
@@ -1020,6 +1169,9 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
rtnval = arcmsr_hbaD_abort_allcmd(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtnval = arcmsr_hbaE_abort_allcmd(acb);
+ break;
}
return rtnval;
}
@@ -1050,7 +1202,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
struct scsi_cmnd *pcmd = ccb->pcmd;
struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
- pcmd->result = DID_OK << 16;
+ pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
if (sensebuffer) {
int sense_data_length =
sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
@@ -1059,6 +1211,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
sensebuffer->Valid = 1;
+ pcmd->result |= (DRIVER_SENSE << 24);
}
}
@@ -1092,6 +1245,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ orig_mask = readl(&reg->host_int_mask);
+ writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, &reg->host_int_mask);
+ readl(&reg->host_int_mask); /* Dummy readl to force pci flush */
+ }
+ break;
}
return orig_mask;
}
@@ -1196,7 +1356,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
/*clear and abort all outbound posted Q*/
writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
- && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ && (i++ < acb->maxOutstanding)) {
pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
@@ -1226,7 +1386,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C __iomem *reg = acb->pmuC;
- while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
/*need to do*/
flag_ccb = readl(&reg->outbound_queueport_low);
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
@@ -1280,6 +1440,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
pmu->doneq_index = 0x40FF;
}
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_postqueue_isr(acb);
+ break;
}
}
@@ -1293,13 +1456,15 @@ static void arcmsr_remove(struct pci_dev *pdev)
scsi_remove_host(host);
flush_work(&acb->arcmsr_do_message_isr_bh);
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
acb->acb_flags &= ~ACB_F_IOP_INITED;
- for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
+ for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
if (!atomic_read(&acb->ccboutstandingcount))
break;
arcmsr_interrupt(acb);/* FIXME: need spinlock */
@@ -1311,7 +1476,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
arcmsr_abort_allcmd(acb);
arcmsr_done4abort_postqueue(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
ccb->startdone = ARCMSR_CCB_ABORTED;
@@ -1335,6 +1500,8 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)host->hostdata;
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
flush_work(&acb->arcmsr_do_message_isr_bh);
@@ -1396,6 +1563,13 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
writel(intmask_org | mask, reg->pcief0_int_enable);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
+ writel(intmask_org & mask, &reg->host_int_mask);
+ break;
+ }
}
}
@@ -1527,6 +1701,16 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
spin_unlock_irqrestore(&acb->postq_lock, flags);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *pmu = acb->pmuE;
+ u32 ccb_post_stamp, arc_cdb_size;
+
+ arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
+ ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
+ writel(0, &pmu->inbound_queueport_high);
+ writel(ccb_post_stamp, &pmu->inbound_queueport_low);
+ break;
+ }
}
}
@@ -1580,6 +1764,20 @@ static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
"timeout\n", pACB->host->host_no);
}
+static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ "timeout\n", pACB->host->host_no);
+ }
+}
+
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -1599,6 +1797,9 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_stop_bgrb(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_stop_bgrb(acb);
+ break;
}
}
@@ -1633,6 +1834,12 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ }
+ break;
}
}
@@ -1673,6 +1880,12 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ }
+ break;
}
}
@@ -1702,6 +1915,11 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
+ }
+ break;
}
return qbuffer;
}
@@ -1732,6 +1950,11 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
+ }
+ break;
}
return pqbuffer;
}
@@ -1785,7 +2008,7 @@ arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
uint8_t __iomem *iop_data;
uint32_t iop_len;
- if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D))
+ if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
iop_data = (uint8_t __iomem *)prbuffer->data;
iop_len = readl(&prbuffer->data_len);
@@ -1871,7 +2094,7 @@ arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
uint8_t __iomem *iop_data;
int32_t allxfer_len = 0;
- if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
+ if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
arcmsr_write_ioctldata2iop_in_DWORD(acb);
return;
}
@@ -1968,6 +2191,33 @@ static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
| ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
}
+static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t outbound_doorbell, in_doorbell, tmp;
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ in_doorbell = readl(&reg->iobound_doorbell);
+ outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
+ do {
+ writel(0, &reg->host_int_status); /* clear interrupt */
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
+ arcmsr_iop2drv_data_wrote_handle(pACB);
+ }
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
+ arcmsr_iop2drv_data_read_handle(pACB);
+ }
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ arcmsr_hbaE_message_isr(pACB);
+ }
+ tmp = in_doorbell;
+ in_doorbell = readl(&reg->iobound_doorbell);
+ outbound_doorbell = tmp ^ in_doorbell;
+ } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
+ | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
+ | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE));
+ pACB->in_doorbell = in_doorbell;
+}
+
static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
{
uint32_t flag_ccb;
@@ -2077,6 +2327,33 @@ static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
spin_unlock_irqrestore(&acb->doneq_lock, flags);
}
+static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
+{
+ uint32_t doneq_index;
+ uint16_t cmdSMID;
+ int error;
+ struct MessageUnit_E __iomem *pmu;
+ struct CommandControlBlock *ccb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ doneq_index = acb->doneq_index;
+ pmu = acb->pmuE;
+ while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
+ ccb = acb->pccb_pool[cmdSMID];
+ error = (acb->pCompletionQ[doneq_index].cmdFlag
+ & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_drain_donequeue(acb, ccb, error);
+ doneq_index++;
+ if (doneq_index >= acb->completionQ_entry)
+ doneq_index = 0;
+ }
+ acb->doneq_index = doneq_index;
+ writel(doneq_index, &pmu->reply_post_consumer_index);
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+}
+
/*
**********************************************************************************
** Handle a message interrupt
@@ -2090,7 +2367,8 @@ static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
struct MessageUnit_A __iomem *reg = acb->pmuA;
/*clear interrupt and message state*/
writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
{
@@ -2098,7 +2376,8 @@ static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
/*clear interrupt and message state*/
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
/*
**********************************************************************************
@@ -2114,7 +2393,8 @@ static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
struct MessageUnit_C __iomem *reg = acb->pmuC;
/*clear interrupt and message state*/
writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
@@ -2123,7 +2403,17 @@ static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
readl(reg->outbound_doorbell);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+
+static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ writel(0, &reg->host_int_status);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
@@ -2229,6 +2519,31 @@ static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
return IRQ_HANDLED;
}
+static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t host_interrupt_status;
+ struct MessageUnit_E __iomem *pmu = pACB->pmuE;
+
+ host_interrupt_status = readl(&pmu->host_int_status) &
+ (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
+ if (!host_interrupt_status)
+ return IRQ_NONE;
+ do {
+ /* MU ioctl transfer doorbell interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
+ arcmsr_hbaE_doorbell_isr(pACB);
+ }
+ /* MU post queue interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
+ arcmsr_hbaE_postqueue_isr(pACB);
+ }
+ host_interrupt_status = readl(&pmu->host_int_status);
+ } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
+ return IRQ_HANDLED;
+}
+
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -2242,6 +2557,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
return arcmsr_hbaC_handle_isr(acb);
case ACB_ADAPTER_TYPE_D:
return arcmsr_hbaD_handle_isr(acb);
+ case ACB_ADAPTER_TYPE_E:
+ return arcmsr_hbaE_handle_isr(acb);
default:
return IRQ_NONE;
}
@@ -2636,74 +2953,66 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
static DEF_SCSI_QCMD(arcmsr_queue_command)
-static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
+static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
{
- struct MessageUnit_A __iomem *reg = acb->pmuA;
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
- char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
- char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
int count;
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
- printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
- miscellaneous data' timeout \n", acb->host->host_no);
- return false;
- }
- count = 8;
- while (count){
- *acb_firm_model = readb(iop_firm_model);
+ uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model;
+ uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version;
+ uint32_t *acb_device_map = (uint32_t *)pACB->device_map;
+ uint32_t *firm_model = &rwbuffer[15];
+ uint32_t *firm_version = &rwbuffer[17];
+ uint32_t *device_map = &rwbuffer[21];
+
+ count = 2;
+ while (count) {
+ *acb_firm_model = readl(firm_model);
acb_firm_model++;
- iop_firm_model++;
+ firm_model++;
count--;
}
-
- count = 16;
- while (count){
- *acb_firm_version = readb(iop_firm_version);
+ count = 4;
+ while (count) {
+ *acb_firm_version = readl(firm_version);
acb_firm_version++;
- iop_firm_version++;
+ firm_version++;
count--;
}
-
- count=16;
- while(count){
- *acb_device_map = readb(iop_device_map);
+ count = 4;
+ while (count) {
+ *acb_device_map = readl(device_map);
acb_device_map++;
- iop_device_map++;
+ device_map++;
count--;
}
+ pACB->signature = readl(&rwbuffer[0]);
+ pACB->firm_request_len = readl(&rwbuffer[1]);
+ pACB->firm_numbers_queue = readl(&rwbuffer[2]);
+ pACB->firm_sdram_size = readl(&rwbuffer[3]);
+ pACB->firm_hd_channels = readl(&rwbuffer[4]);
+ pACB->firm_cfg_version = readl(&rwbuffer[25]);
pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
- acb->signature = readl(&reg->message_rwbuffer[0]);
- acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
- acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
- acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
- acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
- acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
+ pACB->host->host_no,
+ pACB->firm_model,
+ pACB->firm_version);
+}
+
+static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+
+ arcmsr_wait_firmware_ready(acb);
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
+ miscellaneous data' timeout \n", acb->host->host_no);
+ return false;
+ }
+ arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
return true;
}
static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model;
- /*firm_model,15,60-67*/
- char __iomem *iop_firm_version;
- /*firm_version,17,68-83*/
- char __iomem *iop_device_map;
- /*firm_version,21,84-99*/
- int count;
-
- iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); /*firm_model,15,60-67*/
- iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); /*firm_version,17,68-83*/
- iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
arcmsr_wait_firmware_ready(acb);
writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
@@ -2717,127 +3026,43 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
miscellaneous data' timeout \n", acb->host->host_no);
return false;
}
- count = 8;
- while (count){
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count){
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
-
- count = 16;
- while(count){
- *acb_device_map = readb(iop_device_map);
- acb_device_map++;
- iop_device_map++;
- count--;
- }
-
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
-
- acb->signature = readl(&reg->message_rwbuffer[0]);
- /*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
- /*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
- /*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
- /*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
- /*firm_ide_channels,4,16-19*/
- acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
- /*firm_ide_channels,4,16-19*/
+ arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
return true;
}
static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
{
- uint32_t intmask_org, Index, firmware_state = 0;
+ uint32_t intmask_org;
struct MessageUnit_C __iomem *reg = pACB->pmuC;
- char *acb_firm_model = pACB->firm_model;
- char *acb_firm_version = pACB->firm_version;
- char __iomem *iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
- char __iomem *iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
- int count;
+
/* disable all outbound interrupt */
intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
/* wait firmware ready */
- do {
- firmware_state = readl(&reg->outbound_msgaddr1);
- } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
+ arcmsr_wait_firmware_ready(pACB);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
/* wait message ready */
- for (Index = 0; Index < 2000; Index++) {
- if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
- writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
- break;
- }
- udelay(10);
- } /*max 1 seconds*/
- if (Index >= 2000) {
+ if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", pACB->host->host_no);
return false;
}
- count = 8;
- while (count) {
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- pACB->host->host_no,
- pACB->firm_model,
- pACB->firm_version);
- pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
- pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
- pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
- pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
- pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
- /*all interrupt service will be enable at arcmsr_iop_init*/
+ arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
return true;
}
static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
{
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model;
- char __iomem *iop_firm_version;
- char __iomem *iop_device_map;
- u32 count;
struct MessageUnit_D *reg = acb->pmuD;
- iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);
- iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);
- iop_device_map = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
if (readl(acb->pmuD->outbound_doorbell) &
ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
acb->pmuD->outbound_doorbell);/*clear interrupt*/
}
+ arcmsr_wait_firmware_ready(acb);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
/* wait message ready */
@@ -2846,42 +3071,33 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
"miscellaneous data timeout\n", acb->host->host_no);
return false;
}
- count = 8;
- while (count) {
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_device_map = readb(iop_device_map);
- acb_device_map++;
- iop_device_map++;
- count--;
+ arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
+ return true;
+}
+
+static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+ uint32_t intmask_org;
+
+ /* disable all outbound interrupt */
+ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
+ writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
+ /* wait firmware ready */
+ arcmsr_wait_firmware_ready(pACB);
+ mdelay(20);
+ /* post "get config" instruction */
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ /* wait message ready */
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait get adapter firmware "
+ "miscellaneous data timeout\n", pACB->host->host_no);
+ return false;
}
- acb->signature = readl(&reg->msgcode_rwbuffer[0]);
- /*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);
- /*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]);
- /*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);
- /*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]);
- /*firm_hd_channels,4,16-19*/
- acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
+ arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
return true;
}
@@ -2902,14 +3118,20 @@ static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
rtn = arcmsr_hbaD_get_config(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtn = arcmsr_hbaE_get_config(acb);
+ break;
default:
break;
}
- if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
- acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD;
+ acb->maxOutstanding = acb->firm_numbers_queue - 1;
+ if (acb->host->can_queue >= acb->firm_numbers_queue)
+ acb->host->can_queue = acb->maxOutstanding;
else
- acb->maxOutstanding = acb->firm_numbers_queue - 1;
- acb->host->can_queue = acb->maxOutstanding;
+ acb->maxOutstanding = acb->host->can_queue;
+ acb->maxFreeCCB = acb->host->can_queue;
+ if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
+ acb->maxFreeCCB += 64;
return rtn;
}
@@ -3166,6 +3388,75 @@ polling_hbaD_ccb_retry:
return rtn;
}
+static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *poll_ccb)
+{
+ bool error;
+ uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index;
+ uint16_t cmdSMID;
+ unsigned long flags;
+ int rtn;
+ struct CommandControlBlock *pCCB;
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ polling_hbaC_ccb_retry:
+ poll_count++;
+ while (1) {
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ doneq_index = acb->doneq_index;
+ if ((readl(&reg->reply_post_producer_index) & 0xFFFF) ==
+ doneq_index) {
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ if (poll_ccb_done) {
+ rtn = SUCCESS;
+ break;
+ } else {
+ msleep(25);
+ if (poll_count > 40) {
+ rtn = FAILED;
+ break;
+ }
+ goto polling_hbaC_ccb_retry;
+ }
+ }
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
+ doneq_index++;
+ if (doneq_index >= acb->completionQ_entry)
+ doneq_index = 0;
+ acb->doneq_index = doneq_index;
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ pCCB = acb->pccb_pool[cmdSMID];
+ poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
+ /* check if command done with no error*/
+ if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
+ if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
+ pr_notice("arcmsr%d: scsi id = %d "
+ "lun = %d ccb = '0x%p' poll command "
+ "abort successfully\n"
+ , acb->host->host_no
+ , pCCB->pcmd->device->id
+ , (u32)pCCB->pcmd->device->lun
+ , pCCB);
+ pCCB->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
+ continue;
+ }
+ pr_notice("arcmsr%d: polling an illegal "
+ "ccb command done ccb = '0x%p' "
+ "ccboutstandingcount = %d\n"
+ , acb->host->host_no
+ , pCCB
+ , atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+ error = (acb->pCompletionQ[doneq_index].cmdFlag &
+ ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_report_ccb_state(acb, pCCB, error);
+ }
+ writel(doneq_index, &reg->reply_post_consumer_index);
+ return rtn;
+}
+
static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
@@ -3188,10 +3479,95 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
case ACB_ADAPTER_TYPE_D:
rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
+ break;
}
return rtn;
}
+static void arcmsr_set_iop_datetime(struct timer_list *t)
+{
+ struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer);
+ unsigned int next_time;
+ struct tm tm;
+
+ union {
+ struct {
+ uint16_t signature;
+ uint8_t year;
+ uint8_t month;
+ uint8_t date;
+ uint8_t hour;
+ uint8_t minute;
+ uint8_t second;
+ } a;
+ struct {
+ uint32_t msg_time[2];
+ } b;
+ } datetime;
+
+ time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm);
+
+ datetime.a.signature = 0x55AA;
+ datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */
+ datetime.a.month = tm.tm_mon;
+ datetime.a.date = tm.tm_mday;
+ datetime.a.hour = tm.tm_hour;
+ datetime.a.minute = tm.tm_min;
+ datetime.a.second = tm.tm_sec;
+
+ switch (pacb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = pacb->pmuA;
+ writel(datetime.b.msg_time[0], &reg->message_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->message_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_B: {
+ uint32_t __iomem *rwbuffer;
+ struct MessageUnit_B *reg = pacb->pmuB;
+ rwbuffer = reg->message_rwbuffer;
+ writel(datetime.b.msg_time[0], rwbuffer++);
+ writel(datetime.b.msg_time[1], rwbuffer++);
+ writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = pacb->pmuC;
+ writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ uint32_t __iomem *rwbuffer;
+ struct MessageUnit_D *reg = pacb->pmuD;
+ rwbuffer = reg->msgcode_rwbuffer;
+ writel(datetime.b.msg_time[0], rwbuffer++);
+ writel(datetime.b.msg_time[1], rwbuffer++);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = pacb->pmuE;
+ writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pacb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
+ }
+ if (sys_tz.tz_minuteswest)
+ next_time = ARCMSR_HOURS;
+ else
+ next_time = ARCMSR_MINUTES;
+ mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time));
+}
+
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
{
uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
@@ -3208,6 +3584,10 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
dma_coherent_handle = acb->dma_coherent_handle2;
break;
+ case ACB_ADAPTER_TYPE_E:
+ dma_coherent_handle = acb->dma_coherent_handle +
+ offsetof(struct CommandControlBlock, arcmsr_cdb);
+ break;
default:
dma_coherent_handle = acb->dma_coherent_handle;
break;
@@ -3316,6 +3696,29 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
}
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
+ writel(ARCMSR_SIGNATURE_1884, &reg->msgcode_rwbuffer[1]);
+ writel(cdb_phyaddr, &reg->msgcode_rwbuffer[2]);
+ writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[3]);
+ writel(acb->ccbsize, &reg->msgcode_rwbuffer[4]);
+ dma_coherent_handle = acb->dma_coherent_handle2;
+ cdb_phyaddr = (uint32_t)(dma_coherent_handle & 0xffffffff);
+ cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16);
+ writel(cdb_phyaddr, &reg->msgcode_rwbuffer[5]);
+ writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[6]);
+ writel(acb->roundup_ccbsize, &reg->msgcode_rwbuffer[7]);
+ writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
+ pr_notice("arcmsr%d: 'set command Q window' timeout \n",
+ acb->host->host_no);
+ return 1;
+ }
+ }
+ break;
}
return 0;
}
@@ -3356,83 +3759,22 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
}
break;
- }
-}
-
-static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_A __iomem *reg = acb->pmuA;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- }
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- }
- return;
-}
-
-static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_B *reg = acb->pmuB;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- }
- writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- }
- return;
-}
-
-static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_C __iomem *reg = acb->pmuC;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ do {
+ firmware_state = readl(&reg->outbound_msgaddr1);
+ } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0);
}
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ break;
}
- return;
}
-static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
+static void arcmsr_request_device_map(struct timer_list *t)
{
- struct MessageUnit_D *reg = acb->pmuD;
-
+ struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
- ((acb->acb_flags & ACB_F_BUS_RESET) != 0) ||
- ((acb->acb_flags & ACB_F_ABORT) != 0)) {
+ (acb->acb_flags & ACB_F_BUS_RESET) ||
+ (acb->acb_flags & ACB_F_ABORT)) {
mod_timer(&acb->eternal_timer,
jiffies + msecs_to_jiffies(6 * HZ));
} else {
@@ -3448,32 +3790,40 @@ static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
msecs_to_jiffies(6 * HZ));
return;
}
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG,
- reg->inbound_msgaddr0);
- mod_timer(&acb->eternal_timer, jiffies +
- msecs_to_jiffies(6 * HZ));
- }
-}
-
-static void arcmsr_request_device_map(struct timer_list *t)
-{
- struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
- switch (acb->adapter_type) {
+ switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- arcmsr_hbaA_request_device_map(acb);
- }
- break;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ break;
+ }
case ACB_ADAPTER_TYPE_B: {
- arcmsr_hbaB_request_device_map(acb);
- }
- break;
+ struct MessageUnit_B *reg = acb->pmuB;
+ writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
+ break;
+ }
case ACB_ADAPTER_TYPE_C: {
- arcmsr_hbaC_request_device_map(acb);
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
+ default:
+ return;
}
- break;
- case ACB_ADAPTER_TYPE_D:
- arcmsr_hbaD_request_device_map(acb);
- break;
+ acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
}
}
@@ -3524,6 +3874,20 @@ static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
}
}
+static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *pmu = pACB->pmuE;
+
+ pACB->acb_flags |= ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &pmu->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'start adapter "
+ "background rebulid' timeout \n", pACB->host->host_no);
+ }
+}
+
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -3539,6 +3903,9 @@ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_start_bgrb(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_start_bgrb(acb);
+ break;
}
}
@@ -3558,10 +3925,19 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
- /*clear interrupt and message state*/
- writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ uint32_t outbound_doorbell, i;
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
/* let IOP know data has been read */
+ for(i=0; i < 200; i++) {
+ msleep(20);
+ outbound_doorbell = readl(reg->iop2drv_doorbell);
+ if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
+ } else
+ break;
+ }
}
break;
case ACB_ADAPTER_TYPE_C: {
@@ -3607,6 +3983,27 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
}
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ uint32_t i, tmp;
+
+ acb->in_doorbell = readl(&reg->iobound_doorbell);
+ writel(0, &reg->host_int_status); /*clear interrupt*/
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ for(i=0; i < 200; i++) {
+ msleep(20);
+ tmp = acb->in_doorbell;
+ acb->in_doorbell = readl(&reg->iobound_doorbell);
+ if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
+ writel(0, &reg->host_int_status); /*clear interrupt*/
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ } else
+ break;
+ }
+ }
+ break;
}
}
@@ -3658,6 +4055,19 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
writel(0xD, &pmuC->write_sequence);
} while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
+ } else if (acb->dev_id == 0x1884) {
+ struct MessageUnit_E __iomem *pmuE = acb->pmuE;
+ do {
+ count++;
+ writel(0x4, &pmuE->write_sequence_3xxx);
+ writel(0xB, &pmuE->write_sequence_3xxx);
+ writel(0x2, &pmuE->write_sequence_3xxx);
+ writel(0x7, &pmuE->write_sequence_3xxx);
+ writel(0xD, &pmuE->write_sequence_3xxx);
+ mdelay(10);
+ } while (((readl(&pmuE->host_diagnostic_3xxx) &
+ ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
+ writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
} else if ((acb->dev_id == 0x1214)) {
writel(0x20, pmuD->reset_request);
} else {
@@ -3671,6 +4081,45 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
msleep(1000);
return;
}
+
+static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
+{
+ bool rtn = true;
+
+ switch(acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ rtn = ((readl(&reg->outbound_msgaddr1) &
+ ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_B:{
+ struct MessageUnit_B *reg = acb->pmuB;
+ rtn = ((readl(reg->iop2drv_doorbell) &
+ ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C:{
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ rtn = (readl(&reg->host_diagnostic) & 0x04) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D:{
+ struct MessageUnit_D *reg = acb->pmuD;
+ rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ?
+ true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_E:{
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ rtn = (readl(&reg->host_diagnostic_3xxx) &
+ ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
+ }
+ break;
+ }
+ return rtn;
+}
+
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
uint32_t intmask_org;
@@ -3703,7 +4152,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
rtnval = arcmsr_abort_allcmd(acb);
/* clear all outbound posted Q */
arcmsr_done4abort_postqueue(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
scsi_dma_unmap(ccb->pcmd);
@@ -3725,197 +4174,55 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
{
struct AdapterControlBlock *acb;
- uint32_t intmask_org, outbound_doorbell;
int retry_count = 0;
int rtn = FAILED;
acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
- printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
+ pr_notice("arcmsr: executing bus reset eh.....num_resets = %d,"
+ " num_aborts = %d \n", acb->num_resets, acb->num_aborts);
acb->num_resets++;
- switch(acb->adapter_type){
- case ACB_ADAPTER_TYPE_A:{
- if (acb->acb_flags & ACB_F_BUS_RESET){
- long timeout;
- printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
- if (timeout) {
- return SUCCESS;
- }
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_A __iomem *reg;
- reg = acb->pmuA;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
-sleep_again:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto sleep_again;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- /* clear Qbuffer if door bell ringed */
- outbound_doorbell = readl(&reg->outbound_doorbell);
- writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
- writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
- /* enable outbound Post Queue,outbound doorbell Interrupt */
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_B:{
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = FAILED;
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_C:{
- if (acb->acb_flags & ACB_F_BUS_RESET) {
- long timeout;
- printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
- if (timeout) {
- return SUCCESS;
- }
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_C __iomem *reg;
- reg = acb->pmuC;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
-sleep:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto sleep;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- /* clear Qbuffer if door bell ringed */
- arcmsr_clear_doorbell_queue_buffer(acb);
- /* enable outbound Post Queue,outbound doorbell Interrupt */
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_D: {
- if (acb->acb_flags & ACB_F_BUS_RESET) {
- long timeout;
- pr_notice("arcmsr: there is an bus reset"
- " eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags
- & ACB_F_BUS_RESET) == 0, 220 * HZ);
- if (timeout)
- return SUCCESS;
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_D *reg;
- reg = acb->pmuD;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
- nap:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(reg->sample_at_reset) & 0x80) != 0) {
- pr_err("arcmsr%d: waiting for "
- "hw bus reset return, retry=%d\n",
- acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- pr_err("arcmsr%d: waiting for hw bus"
- " reset return, "
- "RETRY TERMINATED!!\n",
- acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto nap;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- arcmsr_clear_doorbell_queue_buffer(acb);
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer,
- jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- pr_err("arcmsr: scsi bus reset "
- "eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer,
- jiffies + msecs_to_jiffies(6 * HZ));
- rtn = SUCCESS;
+ if (acb->acb_flags & ACB_F_BUS_RESET) {
+ long timeout;
+ pr_notice("arcmsr: there is a bus reset eh proceeding...\n");
+ timeout = wait_event_timeout(wait_q, (acb->acb_flags
+ & ACB_F_BUS_RESET) == 0, 220 * HZ);
+ if (timeout)
+ return SUCCESS;
+ }
+ acb->acb_flags |= ACB_F_BUS_RESET;
+ if (!arcmsr_iop_reset(acb)) {
+ arcmsr_hardware_reset(acb);
+ acb->acb_flags &= ~ACB_F_IOP_INITED;
+wait_reset_done:
+ ssleep(ARCMSR_SLEEPTIME);
+ if (arcmsr_reset_in_progress(acb)) {
+ if (retry_count > ARCMSR_RETRYCOUNT) {
+ acb->fw_flag = FW_DEADLOCK;
+ pr_notice("arcmsr%d: waiting for hw bus reset"
+ " return, RETRY TERMINATED!!\n",
+ acb->host->host_no);
+ return FAILED;
}
- break;
+ retry_count++;
+ goto wait_reset_done;
}
+ arcmsr_iop_init(acb);
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ rtn = SUCCESS;
+ pr_notice("arcmsr: scsi bus reset eh returns with success\n");
+ } else {
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ rtn = SUCCESS;
}
return rtn;
}
@@ -3953,7 +4260,7 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
}
intmask_org = arcmsr_disable_outbound_ints(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
ccb->startdone = ARCMSR_CCB_ABORTED;
@@ -3999,6 +4306,7 @@ static const char *arcmsr_info(struct Scsi_Host *host)
case PCI_DEVICE_ID_ARECA_1680:
case PCI_DEVICE_ID_ARECA_1681:
case PCI_DEVICE_ID_ARECA_1880:
+ case PCI_DEVICE_ID_ARECA_1884:
type = "SAS/SATA";
break;
default:
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index f4775ca70bab..27bda2b05de6 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2011,7 +2011,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
* have valid data in the sense buffer that could
* confuse the higher levels.
*/
- memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
+ memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
/*
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 3e1caec82554..10a63be92544 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1957,7 +1957,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
};
- *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
+ *npciids = ARRAY_SIZE(__pciids);
*pciids = __pciids;
}
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index df6760ca0911..9685efc59b16 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -35,10 +35,10 @@
#define BFA_TRC_TS(_trcm) \
({ \
- struct timeval tv; \
+ struct timespec64 ts; \
\
- do_gettimeofday(&tv); \
- (tv.tv_sec*1000000+tv.tv_usec); \
+ ktime_get_ts64(&ts); \
+ (ts.tv_sec*1000000+ts.tv_nsec / 1000); \
})
#ifndef BFA_TRC_TS
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index e81707f938cb..3d0c96a5c873 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -1455,7 +1455,8 @@ struct bfa_aen_entry_s {
enum bfa_aen_category aen_category;
u32 aen_type;
union bfa_aen_data_u aen_data;
- struct timeval aen_tv;
+ u64 aen_tv_sec;
+ u64 aen_tv_usec;
u32 seq_num;
u32 bfad_num;
};
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index b8dadc9cc993..d3b00a475aeb 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1250,8 +1250,8 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
rspnid->dap = s_id;
- rspnid->spn_len = (u8) strlen((char *)name);
- strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
+ strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
+ rspnid->spn_len = (u8) strlen(rspnid->spn);
return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
}
@@ -1271,8 +1271,8 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
rsnn_nn->node_name = node_name;
- rsnn_nn->snn_len = (u8) strlen((char *)name);
- strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
+ strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
+ rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
}
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 5f53b3276234..2c85f5b1f9c1 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -468,7 +468,7 @@ bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
}
bfa_status_t
-bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
+bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time)
{
struct bfa_itnim_s *itnim;
struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
@@ -1478,6 +1478,7 @@ bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
return BFA_STATUS_IOPROFILE_OFF;
itnim->ioprofile.index = BFA_IOBUCKET_MAX;
+ /* unsigned 32-bit time_t overflow here in y2106 */
itnim->ioprofile.io_profile_start_time =
bfa_io_profile_start_time(itnim->bfa);
itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index e93921dec347..ec8f863540ae 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -136,7 +136,7 @@ struct bfa_fcpim_s {
struct bfa_fcpim_del_itn_stats_s del_itn_stats;
bfa_boolean_t ioredirect;
bfa_boolean_t io_profile;
- u32 io_profile_start_time;
+ time64_t io_profile_start_time;
bfa_fcpim_profile_t profile_comp;
bfa_fcpim_profile_t profile_start;
};
@@ -310,7 +310,7 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
struct bfa_itnim_iostats_s *stats, u8 lp_tag);
void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
struct bfa_itnim_iostats_s *itnim_stats);
-bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
+bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time);
bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
#define bfa_fcpim_ioredirect_enabled(__bfa) \
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 4aa61e20e82d..932feb0ed4da 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -769,23 +769,23 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strncpy((char *)&port_cfg->sym_name, model,
- BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcpy(port_cfg->sym_name.symname, model,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/* Driver Version */
- strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
- BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcat(port_cfg->sym_name.symname, driver_info->version,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/* Host machine name */
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_machine_name,
- BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_machine_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/*
* Host OS Info :
@@ -793,24 +793,24 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
* OS name string and instead copy the entire OS info string (64 bytes).
*/
if (driver_info->host_os_patch[0] == '\0') {
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_name,
- BFA_FCS_OS_STR_LEN);
- strncat((char *)&port_cfg->sym_name,
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
} else {
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_name,
- BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
- strncat((char *)&port_cfg->sym_name,
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Append host OS Patch Info */
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_patch,
- BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_patch,
+ BFA_SYMNAME_MAXLEN);
}
/* null terminate */
@@ -830,26 +830,26 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strncpy((char *)&port_cfg->node_sym_name, model,
- BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcpy(port_cfg->node_sym_name.symname, model,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Driver Version */
- strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
- BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Host machine name */
- strncat((char *)&port_cfg->node_sym_name,
- (char *)driver_info->host_machine_name,
- BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcat(port_cfg->node_sym_name.symname,
+ driver_info->host_machine_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* null terminate */
port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 638c0a2857f7..b4f2c1d8742e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -2642,10 +2642,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
hba_attr->fw_version);
- strncpy(hba_attr->driver_version, (char *)driver_info->version,
+ strlcpy(hba_attr->driver_version, (char *)driver_info->version,
sizeof(hba_attr->driver_version));
- strncpy(hba_attr->os_name, driver_info->host_os_name,
+ strlcpy(hba_attr->os_name, driver_info->host_os_name,
sizeof(hba_attr->os_name));
/*
@@ -2653,23 +2653,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
* to the os name along with a separator
*/
if (driver_info->host_os_patch[0] != '\0') {
- strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
- strncat(hba_attr->os_name, driver_info->host_os_patch,
- sizeof(driver_info->host_os_patch));
+ strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(hba_attr->os_name));
+ strlcat(hba_attr->os_name, driver_info->host_os_patch,
+ sizeof(hba_attr->os_name));
}
/* Retrieve the max frame size from the port attr */
bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
- strncpy(hba_attr->node_sym_name.symname,
+ strlcpy(hba_attr->node_sym_name.symname,
port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
strcpy(hba_attr->vendor_info, "QLogic");
hba_attr->num_ports =
cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
- strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
+ strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
}
@@ -2736,20 +2736,20 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
/*
* OS device Name
*/
- strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
+ strlcpy(port_attr->os_device_name, driver_info->os_device_name,
sizeof(port_attr->os_device_name));
/*
* Host name
*/
- strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
+ strlcpy(port_attr->host_name, driver_info->host_machine_name,
sizeof(port_attr->host_name));
port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
- strncpy(port_attr->port_sym_name.symname,
- (char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN);
+ strlcpy(port_attr->port_sym_name.symname,
+ bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
bfa_fcs_lport_get_attr(port, &lport_attr);
port_attr->port_type = cpu_to_be32(lport_attr.port_type);
port_attr->scos = pport_attr.cos_supported;
@@ -3229,7 +3229,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
rsp_str[gmal_entry->len-1] = 0;
/* copy IP Address to fabric */
- strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
+ strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
gmal_entry->ip_addr,
BFA_FCS_FABRIC_IPADDR_SZ);
break;
@@ -4667,21 +4667,13 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
* to that of the base port.
*/
- strncpy((char *)psymbl,
- (char *) &
- (bfa_fcs_lport_get_psym_name
+ strlcpy(symbl,
+ (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
- strlen((char *) &
- bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
- (port->fcs))));
-
- /* Ensure we have a null terminating string. */
- ((char *)psymbl)[strlen((char *) &
- bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
- (port->fcs)))] = 0;
- strncat((char *)psymbl,
- (char *) &(bfa_fcs_lport_get_psym_name(port)),
- strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
+ sizeof(symbl));
+
+ strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)),
+ sizeof(symbl));
} else {
psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
}
@@ -5173,7 +5165,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
u8 symbl[256];
- u8 *psymbl = &symbl[0];
int len;
/* Avoid sending RSPN in the following states. */
@@ -5203,22 +5194,17 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
* For Vports, we append the vport's port symbolic name
* to that of the base port.
*/
- strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
+ strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
- strlen((char *)&bfa_fcs_lport_get_psym_name(
- bfa_fcs_get_base_port(port->fcs))));
-
- /* Ensure we have a null terminating string. */
- ((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
- bfa_fcs_get_base_port(port->fcs)))] = 0;
+ sizeof(symbl));
- strncat((char *)psymbl,
+ strlcat(symbl,
(char *)&(bfa_fcs_lport_get_psym_name(port)),
- strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
+ sizeof(symbl));
}
len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
- bfa_fcs_lport_get_fcid(port), 0, psymbl);
+ bfa_fcs_lport_get_fcid(port), 0, symbl);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 256f4afaccf9..16d3aeb0e572 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -1809,13 +1809,12 @@ static void
bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s enable_req;
- struct timeval tv;
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
enable_req.clscode = cpu_to_be16(ioc->clscode);
- do_gettimeofday(&tv);
- enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
+ /* unsigned 32-bit time_t overflow in y2106 */
+ enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
@@ -1826,6 +1825,9 @@ bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
bfa_ioc_portid(ioc));
+ disable_req.clscode = cpu_to_be16(ioc->clscode);
+ /* unsigned 32-bit time_t overflow in y2106 */
+ disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
@@ -2803,7 +2805,7 @@ void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
- strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index da1721e0d167..079bc77f4102 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -96,14 +96,11 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
port->stats_busy = BFA_FALSE;
if (status == BFA_STATUS_OK) {
- struct timeval tv;
-
memcpy(port->stats, port->stats_dma.kva,
sizeof(union bfa_port_stats_u));
bfa_port_stats_swap(port, port->stats);
- do_gettimeofday(&tv);
- port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
+ port->stats->fc.secs_reset = ktime_get_seconds() - port->stats_reset_time;
}
if (port->stats_cbfn) {
@@ -124,16 +121,13 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
static void
bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
{
- struct timeval tv;
-
port->stats_status = status;
port->stats_busy = BFA_FALSE;
/*
* re-initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- port->stats_reset_time = tv.tv_sec;
+ port->stats_reset_time = ktime_get_seconds();
if (port->stats_cbfn) {
port->stats_cbfn(port->stats_cbarg, status);
@@ -471,8 +465,6 @@ void
bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
void *dev, struct bfa_trc_mod_s *trcmod)
{
- struct timeval tv;
-
WARN_ON(!port);
port->dev = dev;
@@ -494,8 +486,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
/*
* initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- port->stats_reset_time = tv.tv_sec;
+ port->stats_reset_time = ktime_get_seconds();
bfa_trc(port, 0);
}
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index 26dc1bf14c85..0c3b200243ca 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -36,7 +36,7 @@ struct bfa_port_s {
bfa_port_stats_cbfn_t stats_cbfn;
void *stats_cbarg;
bfa_status_t stats_status;
- u32 stats_reset_time;
+ time64_t stats_reset_time;
union bfa_port_stats_u *stats;
struct bfa_dma_s stats_dma;
bfa_boolean_t endis_pending;
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index e640223bab3c..6fc34fb20f00 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -288,18 +288,6 @@ plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
return 0;
}
-static u64
-bfa_get_log_time(void)
-{
- u64 system_time = 0;
- struct timeval tv;
- do_gettimeofday(&tv);
-
- /* We are interested in seconds only. */
- system_time = tv.tv_sec;
- return system_time;
-}
-
static void
bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
{
@@ -320,7 +308,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
- pl_recp->tv = bfa_get_log_time();
+ pl_recp->tv = ktime_get_real_seconds();
BFA_PL_LOG_REC_INCR(plog->tail);
if (plog->head == plog->tail)
@@ -350,8 +338,8 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
lp.eid = event;
lp.log_type = BFA_PL_LOG_TYPE_STRING;
lp.misc = misc;
- strncpy(lp.log_entry.string_log, log_str,
- BFA_PL_STRING_LOG_SZ - 1);
+ strlcpy(lp.log_entry.string_log, log_str,
+ BFA_PL_STRING_LOG_SZ);
lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
bfa_plog_add(plog, &lp);
}
@@ -3047,7 +3035,6 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
struct bfa_fcport_ln_s *ln = &fcport->ln;
- struct timeval tv;
fcport->bfa = bfa;
ln->fcport = fcport;
@@ -3060,8 +3047,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
/*
* initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- fcport->stats_reset_time = tv.tv_sec;
+ fcport->stats_reset_time = ktime_get_seconds();
fcport->stats_dma_ready = BFA_FALSE;
/*
@@ -3295,9 +3281,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
union bfa_fcport_stats_u *ret;
if (complete) {
- struct timeval tv;
- if (fcport->stats_status == BFA_STATUS_OK)
- do_gettimeofday(&tv);
+ time64_t time = ktime_get_seconds();
list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
bfa_q_deq(&fcport->stats_pending_q, &qe);
@@ -3312,7 +3296,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
bfa_fcport_fcoe_stats_swap(&ret->fcoe,
&fcport->stats->fcoe);
ret->fcoe.secs_reset =
- tv.tv_sec - fcport->stats_reset_time;
+ time - fcport->stats_reset_time;
}
}
bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
@@ -3373,13 +3357,10 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
struct list_head *qe, *qen;
if (complete) {
- struct timeval tv;
-
/*
* re-initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- fcport->stats_reset_time = tv.tv_sec;
+ fcport->stats_reset_time = ktime_get_seconds();
list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
bfa_q_deq(&fcport->statsclr_pending_q, &qe);
cb = (struct bfa_cb_pending_q_s *)qe;
@@ -6148,13 +6129,13 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
/*
* D-port
*/
-#define bfa_dport_result_start(__dport, __mode) do { \
- (__dport)->result.start_time = bfa_get_log_time(); \
- (__dport)->result.status = DPORT_TEST_ST_INPRG; \
- (__dport)->result.mode = (__mode); \
- (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
- (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
- (__dport)->result.lpcnt = (__dport)->lpcnt; \
+#define bfa_dport_result_start(__dport, __mode) do { \
+ (__dport)->result.start_time = ktime_get_real_seconds(); \
+ (__dport)->result.status = DPORT_TEST_ST_INPRG; \
+ (__dport)->result.mode = (__mode); \
+ (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
+ (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
+ (__dport)->result.lpcnt = (__dport)->lpcnt; \
} while (0)
static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
@@ -6588,7 +6569,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
switch (dport->i2hmsg.scn.state) {
case BFI_DPORT_SCN_TESTCOMP:
- dport->result.end_time = bfa_get_log_time();
+ dport->result.end_time = ktime_get_real_seconds();
bfa_trc(dport->bfa, dport->result.end_time);
dport->result.status = msg->info.testcomp.status;
@@ -6635,7 +6616,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
case BFI_DPORT_SCN_SUBTESTSTART:
subtesttype = msg->info.teststart.type;
dport->result.subtest[subtesttype].start_time =
- bfa_get_log_time();
+ ktime_get_real_seconds();
dport->result.subtest[subtesttype].status =
DPORT_TEST_ST_INPRG;
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index ea2278bc78a8..7e8fb6231d49 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -505,7 +505,7 @@ struct bfa_fcport_s {
struct list_head stats_pending_q;
struct list_head statsclr_pending_q;
bfa_boolean_t stats_qfull;
- u32 stats_reset_time; /* stats reset time stamp */
+ time64_t stats_reset_time; /* stats reset time stamp */
bfa_boolean_t diag_busy; /* diag busy status */
bfa_boolean_t beacon; /* port beacon status */
bfa_boolean_t link_e2e_beacon; /* link beacon status */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index cf0466686804..bd7e6a6fc1f1 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -610,13 +610,12 @@ bfad_hal_mem_alloc(struct bfad_s *bfad)
/* Iterate through the KVA meminfo queue */
list_for_each(km_qe, &kva_info->qe) {
kva_elem = (struct bfa_mem_kva_s *) km_qe;
- kva_elem->kva = vmalloc(kva_elem->mem_len);
+ kva_elem->kva = vzalloc(kva_elem->mem_len);
if (kva_elem->kva == NULL) {
bfad_hal_mem_release(bfad);
rc = BFA_STATUS_ENOMEM;
goto ext;
}
- memset(kva_elem->kva, 0, kva_elem->mem_len);
}
/* Iterate through the DMA meminfo queue */
@@ -981,20 +980,20 @@ bfad_start_ops(struct bfad_s *bfad) {
/* Fill the driver_info info to fcs*/
memset(&driver_info, 0, sizeof(driver_info));
- strncpy(driver_info.version, BFAD_DRIVER_VERSION,
- sizeof(driver_info.version) - 1);
+ strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
+ sizeof(driver_info.version));
if (host_name)
- strncpy(driver_info.host_machine_name, host_name,
- sizeof(driver_info.host_machine_name) - 1);
+ strlcpy(driver_info.host_machine_name, host_name,
+ sizeof(driver_info.host_machine_name));
if (os_name)
- strncpy(driver_info.host_os_name, os_name,
- sizeof(driver_info.host_os_name) - 1);
+ strlcpy(driver_info.host_os_name, os_name,
+ sizeof(driver_info.host_os_name));
if (os_patch)
- strncpy(driver_info.host_os_patch, os_patch,
- sizeof(driver_info.host_os_patch) - 1);
+ strlcpy(driver_info.host_os_patch, os_patch,
+ sizeof(driver_info.host_os_patch));
- strncpy(driver_info.os_device_name, bfad->pci_name,
- sizeof(driver_info.os_device_name) - 1);
+ strlcpy(driver_info.os_device_name, bfad->pci_name,
+ sizeof(driver_info.os_device_name));
/* FCS driver info init */
spin_lock_irqsave(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 13db3b7bc873..d4d276c757ea 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -487,7 +487,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) vport->drv_port.im_port;
struct bfad_s *bfad = im_port->bfad;
- struct bfad_port_s *port;
struct bfa_fcs_vport_s *fcs_vport;
struct Scsi_Host *vshost;
wwn_t pwwn;
@@ -502,8 +501,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
return 0;
}
- port = im_port->port;
-
vshost = vport->drv_port.im_port->shost;
u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
@@ -843,7 +840,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
char symname[BFA_SYMNAME_MAXLEN];
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
- strncpy(symname, port_attr.port_cfg.sym_name.symname,
+ strlcpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
return snprintf(buf, PAGE_SIZE, "%s\n", symname);
}
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index b2fa195adc7a..3976e787ba64 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -127,7 +127,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
/* fill in driver attr info */
strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
- strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
+ strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
iocmd->ioc_attr.adapter_attr.fw_ver);
@@ -315,9 +315,9 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
iocmd->attr.port_type = port_attr.port_type;
iocmd->attr.loopback = port_attr.loopback;
iocmd->attr.authfail = port_attr.authfail;
- strncpy(iocmd->attr.port_symname.symname,
+ strlcpy(iocmd->attr.port_symname.symname,
port_attr.port_cfg.sym_name.symname,
- sizeof(port_attr.port_cfg.sym_name.symname));
+ sizeof(iocmd->attr.port_symname.symname));
iocmd->status = BFA_STATUS_OK;
return 0;
@@ -2094,13 +2094,11 @@ bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_fcpim_profile_s *iocmd =
(struct bfa_bsg_fcpim_profile_s *)cmd;
- struct timeval tv;
unsigned long flags;
- do_gettimeofday(&tv);
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
- iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
+ iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds());
else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 05f523971348..349cfe7d055e 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -81,7 +81,7 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
- fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
+ fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n",
@@ -89,8 +89,6 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
return -ENOMEM;
}
- memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
-
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
fw_debug->debug_buffer,
@@ -125,7 +123,7 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
- fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
+ fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n",
@@ -133,8 +131,6 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
return -ENOMEM;
}
- memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
-
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
fw_debug->debug_buffer,
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 06ce4ba2b7bc..af66275570c3 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -141,16 +141,28 @@ struct bfad_im_s {
} while (0)
/* post fc_host vendor event */
-#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
- do_gettimeofday(&(_entry)->aen_tv); \
- (_entry)->bfad_num = (_drv)->inst_no; \
- (_entry)->seq_num = (_cnt); \
- (_entry)->aen_category = (_cat); \
- (_entry)->aen_type = (_evt); \
- if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
- queue_work((_drv)->im->drv_workq, \
- &(_drv)->im->aen_im_notify_work); \
-} while (0)
+static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
+ struct bfad_s *drv, int cnt,
+ enum bfa_aen_category cat,
+ enum bfa_ioc_aen_event evt)
+{
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ /*
+ * 'unsigned long aen_tv_sec' overflows in y2106 on 32-bit
+ * architectures, or in 2038 if user space interprets it
+ * as 'signed'.
+ */
+ entry->aen_tv_sec = ts.tv_sec;
+ entry->aen_tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ entry->bfad_num = drv->inst_no;
+ entry->seq_num = cnt;
+ entry->aen_category = cat;
+ entry->aen_type = evt;
+ if (drv->bfad_flags & BFAD_FC4_PROBE_DONE)
+ queue_work(drv->im->drv_workq, &drv->im->aen_im_notify_work);
+}
struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
struct bfad_s *);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e6b9de7d41ac..65de1d0578a1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1552,7 +1552,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
rc = bnx2fc_shost_config(lport, parent);
if (rc) {
- printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
+ printk(KERN_ERR PFX "Couldn't configure shost for %s\n",
interface->netdev->name);
goto lp_config_err;
}
@@ -1560,7 +1560,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
/* Initialize the libfc library */
rc = bnx2fc_libfc_config(lport);
if (rc) {
- printk(KERN_ERR PFX "Couldnt configure libfc\n");
+ printk(KERN_ERR PFX "Couldn't configure libfc\n");
goto shost_err;
}
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 26de61d65a4d..e8ae4d671d23 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1857,16 +1857,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
* entries. Hence the limit with one page is 8192 task context
* entries.
*/
- hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_bd_dma,
- GFP_KERNEL);
+ hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
if (!hba->task_ctx_bd_tbl) {
printk(KERN_ERR PFX "unable to allocate task context BDT\n");
rc = -1;
goto out;
}
- memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
/*
* Allocate task_ctx which is an array of pointers pointing to
@@ -1895,16 +1894,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
for (i = 0; i < task_ctx_arr_sz; i++) {
- hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_dma[i],
- GFP_KERNEL);
+ hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
if (!hba->task_ctx[i]) {
printk(KERN_ERR PFX "unable to alloc task context\n");
rc = -1;
goto out3;
}
- memset(hba->task_ctx[i], 0, PAGE_SIZE);
addr = (u64)hba->task_ctx_dma[i];
task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
task_ctx_bdt->lo = cpu_to_le32((u32)addr);
@@ -2033,28 +2031,23 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
}
for (i = 0; i < segment_count; ++i) {
- hba->hash_tbl_segments[i] =
- dma_alloc_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- &dma_segment_array[i],
- GFP_KERNEL);
+ hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
if (!hba->hash_tbl_segments[i]) {
printk(KERN_ERR PFX "hash segment alloc failed\n");
goto cleanup_dma;
}
- memset(hba->hash_tbl_segments[i], 0,
- BNX2FC_HASH_TBL_CHUNK_SIZE);
}
- hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->hash_tbl_pbl_dma,
- GFP_KERNEL);
+ hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
if (!hba->hash_tbl_pbl) {
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
goto cleanup_dma;
}
- memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
pbl = hba->hash_tbl_pbl;
for (i = 0; i < segment_count; ++i) {
@@ -2111,27 +2104,26 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
- hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_ptr_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev,
+ mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl_ptr) {
printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
mem_size = BNX2FC_NUM_MAX_SESS *
sizeof(struct fcoe_t2_hash_table_entry);
- hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl) {
printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->t2_hash_tbl, 0x00, mem_size);
for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
addr = (unsigned long) hba->t2_hash_tbl_dma +
((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
@@ -2148,16 +2140,14 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
}
- hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->stats_buf_dma,
- GFP_KERNEL);
+ hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
if (!hba->stats_buffer) {
printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->stats_buffer, 0x00, PAGE_SIZE);
return 0;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index a8ae1a019eea..e3d1c7c440c8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -672,56 +672,52 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
- &tgt->sq_dma, GFP_KERNEL);
+ tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
if (!tgt->sq) {
printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
tgt->sq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->sq, 0, tgt->sq_mem_size);
/* Allocate and map CQ */
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
- &tgt->cq_dma, GFP_KERNEL);
+ tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
if (!tgt->cq) {
printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
tgt->cq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->cq, 0, tgt->cq_mem_size);
/* Allocate and map RQ and RQ PBL */
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
- &tgt->rq_dma, GFP_KERNEL);
+ tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
if (!tgt->rq) {
printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
tgt->rq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->rq, 0, tgt->rq_mem_size);
tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
- &tgt->rq_pbl_dma, GFP_KERNEL);
+ tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
if (!tgt->rq_pbl) {
printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
tgt->rq_pbl_size);
goto mem_alloc_failure;
}
- memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
page = tgt->rq_dma;
pbl = (u32 *)tgt->rq_pbl;
@@ -739,44 +735,43 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
- &tgt->xferq_dma, GFP_KERNEL);
+ tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->xferq_mem_size, &tgt->xferq_dma,
+ GFP_KERNEL);
if (!tgt->xferq) {
printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
tgt->xferq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->xferq, 0, tgt->xferq_mem_size);
/* Allocate and map CONFQ & CONFQ PBL */
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
- &tgt->confq_dma, GFP_KERNEL);
+ tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->confq_mem_size, &tgt->confq_dma,
+ GFP_KERNEL);
if (!tgt->confq) {
printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
tgt->confq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->confq, 0, tgt->confq_mem_size);
tgt->confq_pbl_size =
(tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->confq_pbl_size =
(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
- tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
- tgt->confq_pbl_size,
- &tgt->confq_pbl_dma, GFP_KERNEL);
+ tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
if (!tgt->confq_pbl) {
printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
tgt->confq_pbl_size);
goto mem_alloc_failure;
}
- memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
page = tgt->confq_dma;
pbl = (u32 *)tgt->confq_pbl;
@@ -792,15 +787,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map ConnDB */
tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
- tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
- tgt->conn_db_mem_size,
- &tgt->conn_db_dma, GFP_KERNEL);
+ tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
if (!tgt->conn_db) {
printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
tgt->conn_db_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
/* Allocate and map LCQ */
@@ -808,15 +802,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
- &tgt->lcq_dma, GFP_KERNEL);
+ tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
if (!tgt->lcq) {
printk(KERN_ERR PFX "unable to allocate lcq %d\n",
tgt->lcq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->lcq, 0, tgt->lcq_mem_size);
tgt->conn_db->rq_prod = 0x8000;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e0640e0f259f..8f03a869ac98 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -547,12 +547,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8);
- if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
- u32 tmp = nopout_wqe->lun[0];
- /* 57710 requires LUN field to be swapped */
- nopout_wqe->lun[0] = nopout_wqe->lun[1];
- nopout_wqe->lun[1] = tmp;
- }
+ /* 57710 requires LUN field to be swapped */
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ swap(nopout_wqe->lun[0], nopout_wqe->lun[1]);
nopout_wqe->itt = ((u16)task->itt |
(ISCSI_TASK_TYPE_MPATH <<
@@ -1073,15 +1070,14 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual SQ element */
ep->qp.sq_virt =
- dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
- &ep->qp.sq_phys, GFP_KERNEL);
+ dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+ &ep->qp.sq_phys, GFP_KERNEL);
if (!ep->qp.sq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
ep->qp.sq_mem_size);
goto mem_alloc_err;
}
- memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
ep->qp.sq_first_qe = ep->qp.sq_virt;
ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
@@ -1110,14 +1106,13 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual CQ element */
ep->qp.cq_virt =
- dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
- &ep->qp.cq_phys, GFP_KERNEL);
+ dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+ &ep->qp.cq_phys, GFP_KERNEL);
if (!ep->qp.cq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
ep->qp.cq_mem_size);
goto mem_alloc_err;
}
- memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
ep->qp.cq_first_qe = ep->qp.cq_virt;
ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index cb1711a5d7a3..ed2dae657964 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1258,7 +1258,7 @@ module_init(csio_init);
module_exit(csio_exit);
MODULE_AUTHOR(CSIO_DRV_AUTHOR);
MODULE_DESCRIPTION(CSIO_DRV_DESC);
-MODULE_LICENSE(CSIO_DRV_LICENSE);
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(FW_FNAME_T5);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
index 96b31e5af91e..20244254325a 100644
--- a/drivers/scsi/csiostor/csio_init.h
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -48,7 +48,6 @@
#include "csio_hw.h"
#define CSIO_DRV_AUTHOR "Chelsio Communications"
-#define CSIO_DRV_LICENSE "Dual BSD/GPL"
#define CSIO_DRV_DESC "Chelsio FCoE driver"
#define CSIO_DRV_VERSION "1.0.0-ko"
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 931b1d8f9f3e..5f4e0a787bd1 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -1216,7 +1216,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
/* Queue mbox cmd, if another mbox cmd is active */
if (mbp->mb_cbfn == NULL) {
rv = -EBUSY;
- csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
+ csio_dbg(hw, "Couldn't own Mailbox %x op:0x%x\n",
hw->pfn, *((uint8_t *)mbp->mb));
goto error_out;
@@ -1244,14 +1244,14 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
rv = owner ? -EBUSY : -ETIMEDOUT;
csio_dbg(hw,
- "Couldnt own Mailbox %x op:0x%x "
+ "Couldn't own Mailbox %x op:0x%x "
"owner:%x\n",
hw->pfn, *((uint8_t *)mbp->mb), owner);
goto error_out;
} else {
if (mbm->mcurrent == NULL) {
csio_err(hw,
- "Couldnt own Mailbox %x "
+ "Couldn't own Mailbox %x "
"op:0x%x owner:%x\n",
hw->pfn, *((uint8_t *)mbp->mb),
owner);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index ce1336414e0a..3f3af5e74a07 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1914,7 +1914,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
if (task->sc) {
task->hdr = (struct iscsi_hdr *)tdata->skb->data;
} else {
- task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_KERNEL);
+ task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC);
if (!task->hdr) {
__kfree_skb(tdata->skb);
tdata->skb = NULL;
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
index 9e39866d473b..7ec3f6b55dde 100644
--- a/drivers/scsi/cxlflash/Makefile
+++ b/drivers/scsi/cxlflash/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CXLFLASH) += cxlflash.o
-cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
+cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
new file mode 100644
index 000000000000..339e42b03c49
--- /dev/null
+++ b/drivers/scsi/cxlflash/backend.h
@@ -0,0 +1,41 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
+
+struct cxlflash_backend_ops {
+ struct module *module;
+ void __iomem * (*psa_map)(void *);
+ void (*psa_unmap)(void __iomem *);
+ int (*process_element)(void *);
+ int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *);
+ void (*unmap_afu_irq)(void *, int, void *);
+ int (*start_context)(void *);
+ int (*stop_context)(void *);
+ int (*afu_reset)(void *);
+ void (*set_master)(void *);
+ void * (*get_context)(struct pci_dev *, void *);
+ void * (*dev_context_init)(struct pci_dev *, void *);
+ int (*release_context)(void *);
+ void (*perst_reloads_same_image)(void *, bool);
+ ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t);
+ int (*allocate_afu_irqs)(void *, int);
+ void (*free_afu_irqs)(void *);
+ void * (*create_afu)(struct pci_dev *);
+ struct file * (*get_fd)(void *, struct file_operations *, int *);
+ void * (*fops_get_context)(struct file *);
+ int (*start_work)(void *, u64);
+ int (*fd_mmap)(struct file *, struct vm_area_struct *);
+ int (*fd_release)(struct inode *, struct file *);
+};
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 6d95e8e147e0..102fd26ca886 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -25,6 +25,8 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
+#include "backend.h"
+
extern const struct file_operations cxlflash_cxl_fops;
#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
@@ -114,6 +116,7 @@ enum cxlflash_hwq_mode {
struct cxlflash_cfg {
struct afu *afu;
+ const struct cxlflash_backend_ops *ops;
struct pci_dev *dev;
struct pci_device_id *dev_id;
struct Scsi_Host *host;
@@ -129,7 +132,7 @@ struct cxlflash_cfg {
int lr_port;
atomic_t scan_host_needed;
- struct cxl_afu *cxl_afu;
+ void *afu_cookie;
atomic_t recovery_threads;
struct mutex ctx_recovery_mutex;
@@ -203,8 +206,7 @@ struct hwq {
* fields after this point
*/
struct afu *afu;
- struct cxl_context *ctx;
- struct cxl_ioctl_start_work work;
+ void *ctx_cookie;
struct sisl_host_map __iomem *host_map; /* MC host map */
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
ctx_hndl_t ctx_hndl; /* master's context handle */
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
new file mode 100644
index 000000000000..db1cadad5c5d
--- /dev/null
+++ b/drivers/scsi/cxlflash/cxl_hw.c
@@ -0,0 +1,168 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <misc/cxl.h>
+
+#include "backend.h"
+
+/*
+ * The following routines map the cxlflash backend operations to existing CXL
+ * kernel API function and are largely simple shims that provide an abstraction
+ * for converting generic context and AFU cookies into cxl_context or cxl_afu
+ * pointers.
+ */
+
+static void __iomem *cxlflash_psa_map(void *ctx_cookie)
+{
+ return cxl_psa_map(ctx_cookie);
+}
+
+static void cxlflash_psa_unmap(void __iomem *addr)
+{
+ cxl_psa_unmap(addr);
+}
+
+static int cxlflash_process_element(void *ctx_cookie)
+{
+ return cxl_process_element(ctx_cookie);
+}
+
+static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
+ irq_handler_t handler, void *cookie, char *name)
+{
+ return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
+}
+
+static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
+{
+ cxl_unmap_afu_irq(ctx_cookie, num, cookie);
+}
+
+static int cxlflash_start_context(void *ctx_cookie)
+{
+ return cxl_start_context(ctx_cookie, 0, NULL);
+}
+
+static int cxlflash_stop_context(void *ctx_cookie)
+{
+ return cxl_stop_context(ctx_cookie);
+}
+
+static int cxlflash_afu_reset(void *ctx_cookie)
+{
+ return cxl_afu_reset(ctx_cookie);
+}
+
+static void cxlflash_set_master(void *ctx_cookie)
+{
+ cxl_set_master(ctx_cookie);
+}
+
+static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
+{
+ return cxl_get_context(dev);
+}
+
+static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
+{
+ return cxl_dev_context_init(dev);
+}
+
+static int cxlflash_release_context(void *ctx_cookie)
+{
+ return cxl_release_context(ctx_cookie);
+}
+
+static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
+{
+ cxl_perst_reloads_same_image(afu_cookie, image);
+}
+
+static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
+ void *buf, size_t count)
+{
+ return cxl_read_adapter_vpd(dev, buf, count);
+}
+
+static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
+{
+ return cxl_allocate_afu_irqs(ctx_cookie, num);
+}
+
+static void cxlflash_free_afu_irqs(void *ctx_cookie)
+{
+ cxl_free_afu_irqs(ctx_cookie);
+}
+
+static void *cxlflash_create_afu(struct pci_dev *dev)
+{
+ return cxl_pci_to_afu(dev);
+}
+
+static struct file *cxlflash_get_fd(void *ctx_cookie,
+ struct file_operations *fops, int *fd)
+{
+ return cxl_get_fd(ctx_cookie, fops, fd);
+}
+
+static void *cxlflash_fops_get_context(struct file *file)
+{
+ return cxl_fops_get_context(file);
+}
+
+static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
+{
+ struct cxl_ioctl_start_work work = { 0 };
+
+ work.num_interrupts = irqs;
+ work.flags = CXL_START_WORK_NUM_IRQS;
+
+ return cxl_start_work(ctx_cookie, &work);
+}
+
+static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
+{
+ return cxl_fd_mmap(file, vm);
+}
+
+static int cxlflash_fd_release(struct inode *inode, struct file *file)
+{
+ return cxl_fd_release(inode, file);
+}
+
+const struct cxlflash_backend_ops cxlflash_cxl_ops = {
+ .module = THIS_MODULE,
+ .psa_map = cxlflash_psa_map,
+ .psa_unmap = cxlflash_psa_unmap,
+ .process_element = cxlflash_process_element,
+ .map_afu_irq = cxlflash_map_afu_irq,
+ .unmap_afu_irq = cxlflash_unmap_afu_irq,
+ .start_context = cxlflash_start_context,
+ .stop_context = cxlflash_stop_context,
+ .afu_reset = cxlflash_afu_reset,
+ .set_master = cxlflash_set_master,
+ .get_context = cxlflash_get_context,
+ .dev_context_init = cxlflash_dev_context_init,
+ .release_context = cxlflash_release_context,
+ .perst_reloads_same_image = cxlflash_perst_reloads_same_image,
+ .read_adapter_vpd = cxlflash_read_adapter_vpd,
+ .allocate_afu_irqs = cxlflash_allocate_afu_irqs,
+ .free_afu_irqs = cxlflash_free_afu_irqs,
+ .create_afu = cxlflash_create_afu,
+ .get_fd = cxlflash_get_fd,
+ .fops_get_context = cxlflash_fops_get_context,
+ .start_work = cxlflash_start_work,
+ .fd_mmap = cxlflash_fd_mmap,
+ .fd_release = cxlflash_fd_release,
+};
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 38b3a9c84fd1..d8fe7ab870b8 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
cmd->parent = afu;
cmd->hwq_index = hwq_index;
+ cmd->sa.ioasc = 0;
cmd->rcb.ctx_id = hwq->ctx_hndl;
cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
@@ -710,7 +711,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
}
if (likely(afu->afu_map)) {
- cxl_psa_unmap((void __iomem *)afu->afu_map);
+ cfg->ops->psa_unmap(afu->afu_map);
afu->afu_map = NULL;
}
}
@@ -738,7 +739,7 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
hwq = get_hwq(afu, index);
- if (!hwq->ctx) {
+ if (!hwq->ctx_cookie) {
dev_err(dev, "%s: returning with NULL MC\n", __func__);
return;
}
@@ -747,13 +748,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
case UNMAP_THREE:
/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
if (index == PRIMARY_HWQ)
- cxl_unmap_afu_irq(hwq->ctx, 3, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
case UNMAP_TWO:
- cxl_unmap_afu_irq(hwq->ctx, 2, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
case UNMAP_ONE:
- cxl_unmap_afu_irq(hwq->ctx, 1, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
case FREE_IRQ:
- cxl_free_afu_irqs(hwq->ctx);
+ cfg->ops->free_afu_irqs(hwq->ctx_cookie);
/* fall through */
case UNDO_NOOP:
/* No action required */
@@ -782,15 +783,15 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
hwq = get_hwq(afu, index);
- if (!hwq->ctx) {
+ if (!hwq->ctx_cookie) {
dev_err(dev, "%s: returning with NULL MC\n", __func__);
return;
}
- WARN_ON(cxl_stop_context(hwq->ctx));
+ WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
if (index != PRIMARY_HWQ)
- WARN_ON(cxl_release_context(hwq->ctx));
- hwq->ctx = NULL;
+ WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
+ hwq->ctx_cookie = NULL;
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
flush_pending_cmds(hwq);
@@ -1598,27 +1599,6 @@ out:
}
/**
- * start_context() - starts the master context
- * @cfg: Internal structure associated with the host.
- * @index: Index of the hardware queue.
- *
- * Return: A success or failure value from CXL services.
- */
-static int start_context(struct cxlflash_cfg *cfg, u32 index)
-{
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(cfg->afu, index);
- int rc = 0;
-
- rc = cxl_start_context(hwq->ctx,
- hwq->work.work_element_descriptor,
- NULL);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
* read_vpd() - obtains the WWPNs from VPD
* @cfg: Internal structure associated with the host.
* @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
@@ -1640,7 +1620,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
/* Get the VPD data from the device */
- vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
+ vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
if (unlikely(vpd_size <= 0)) {
dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
__func__, vpd_size);
@@ -1732,6 +1712,7 @@ static void init_pcr(struct cxlflash_cfg *cfg)
struct afu *afu = cfg->afu;
struct sisl_ctrl_map __iomem *ctrl_map;
struct hwq *hwq;
+ void *cookie;
int i;
for (i = 0; i < MAX_CONTEXT; i++) {
@@ -1746,8 +1727,9 @@ static void init_pcr(struct cxlflash_cfg *cfg)
/* Copy frequently used fields into hwq */
for (i = 0; i < afu->num_hwqs; i++) {
hwq = get_hwq(afu, i);
+ cookie = hwq->ctx_cookie;
- hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx);
+ hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
@@ -1925,13 +1907,13 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
struct hwq *hwq)
{
struct device *dev = &cfg->dev->dev;
- struct cxl_context *ctx = hwq->ctx;
+ void *ctx = hwq->ctx_cookie;
int rc = 0;
enum undo_level level = UNDO_NOOP;
bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
int num_irqs = is_primary_hwq ? 3 : 2;
- rc = cxl_allocate_afu_irqs(ctx, num_irqs);
+ rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
if (unlikely(rc)) {
dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
__func__, rc);
@@ -1939,16 +1921,16 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
- "SISL_MSI_SYNC_ERROR");
+ rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
+ "SISL_MSI_SYNC_ERROR");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
level = FREE_IRQ;
goto out;
}
- rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
- "SISL_MSI_RRQ_UPDATED");
+ rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
+ "SISL_MSI_RRQ_UPDATED");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
level = UNMAP_ONE;
@@ -1959,8 +1941,8 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
if (!is_primary_hwq)
goto out;
- rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
- "SISL_MSI_ASYNC_ERROR");
+ rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
+ "SISL_MSI_ASYNC_ERROR");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
level = UNMAP_TWO;
@@ -1979,7 +1961,7 @@ out:
*/
static int init_mc(struct cxlflash_cfg *cfg, u32 index)
{
- struct cxl_context *ctx;
+ void *ctx;
struct device *dev = &cfg->dev->dev;
struct hwq *hwq = get_hwq(cfg->afu, index);
int rc = 0;
@@ -1990,23 +1972,23 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
INIT_LIST_HEAD(&hwq->pending_cmds);
if (index == PRIMARY_HWQ)
- ctx = cxl_get_context(cfg->dev);
+ ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
else
- ctx = cxl_dev_context_init(cfg->dev);
- if (unlikely(!ctx)) {
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
+ if (IS_ERR_OR_NULL(ctx)) {
rc = -ENOMEM;
goto err1;
}
- WARN_ON(hwq->ctx);
- hwq->ctx = ctx;
+ WARN_ON(hwq->ctx_cookie);
+ hwq->ctx_cookie = ctx;
/* Set it up as a master with the CXL */
- cxl_set_master(ctx);
+ cfg->ops->set_master(ctx);
/* Reset AFU when initializing primary context */
if (index == PRIMARY_HWQ) {
- rc = cxl_afu_reset(ctx);
+ rc = cfg->ops->afu_reset(ctx);
if (unlikely(rc)) {
dev_err(dev, "%s: AFU reset failed rc=%d\n",
__func__, rc);
@@ -2020,11 +2002,8 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
goto err2;
}
- /* This performs the equivalent of the CXL_IOCTL_START_WORK.
- * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
- * element (pe) that is embedded in the context (ctx)
- */
- rc = start_context(cfg, index);
+ /* Finally, activate the context by starting it */
+ rc = cfg->ops->start_context(hwq->ctx_cookie);
if (unlikely(rc)) {
dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
level = UNMAP_THREE;
@@ -2037,9 +2016,9 @@ out:
err2:
term_intr(cfg, level, index);
if (index != PRIMARY_HWQ)
- cxl_release_context(ctx);
+ cfg->ops->release_context(ctx);
err1:
- hwq->ctx = NULL;
+ hwq->ctx_cookie = NULL;
goto out;
}
@@ -2094,7 +2073,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
struct hwq *hwq;
int i;
- cxl_perst_reloads_same_image(cfg->cxl_afu, true);
+ cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
afu->num_hwqs = afu->desired_hwqs;
for (i = 0; i < afu->num_hwqs; i++) {
@@ -2108,9 +2087,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
/* Map the entire MMIO space of the AFU using the first context */
hwq = get_hwq(afu, PRIMARY_HWQ);
- afu->afu_map = cxl_psa_map(hwq->ctx);
+ afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
if (!afu->afu_map) {
- dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
+ dev_err(dev, "%s: psa_map failed\n", __func__);
rc = -ENOMEM;
goto err1;
}
@@ -3670,6 +3649,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->init_state = INIT_STATE_NONE;
cfg->dev = pdev;
+ cfg->ops = &cxlflash_cxl_ops;
cfg->cxl_fops = cxlflash_cxl_fops;
/*
@@ -3701,7 +3681,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cfg);
- cfg->cxl_afu = cxl_pci_to_afu(pdev);
+ cfg->afu_cookie = cfg->ops->create_afu(pdev);
rc = init_pci(cfg);
if (rc) {
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 170fff5aeff6..2fe79df5c73c 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -810,20 +810,22 @@ err:
* init_context() - initializes a previously allocated context
* @ctxi: Previously allocated context
* @cfg: Internal structure associated with the host.
- * @ctx: Previously obtained CXL context reference.
+ * @ctx: Previously obtained context cookie.
* @ctxid: Previously obtained process element associated with CXL context.
* @file: Previously obtained file associated with CXL context.
* @perms: User-specified permissions.
+ * @irqs: User-specified number of interrupts.
*/
static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
- struct cxl_context *ctx, int ctxid, struct file *file,
- u32 perms)
+ void *ctx, int ctxid, struct file *file, u32 perms,
+ u64 irqs)
{
struct afu *afu = cfg->afu;
ctxi->rht_perms = perms;
ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
+ ctxi->irqs = irqs;
ctxi->pid = task_tgid_nr(current); /* tgid = pid */
ctxi->ctx = ctx;
ctxi->cfg = cfg;
@@ -976,9 +978,9 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
*/
static int cxlflash_cxl_release(struct inode *inode, struct file *file)
{
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
struct dk_cxlflash_detach detach = { { 0 }, 0 };
@@ -986,7 +988,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
int ctxid;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1014,7 +1016,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
out_release:
- cxl_fd_release(inode, file);
+ cfg->ops->fd_release(inode, file);
out:
dev_dbg(dev, "%s: returning\n", __func__);
return 0;
@@ -1089,9 +1091,9 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
struct page *err_page = NULL;
@@ -1099,7 +1101,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
int rc = 0;
int ctxid;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1162,16 +1164,16 @@ static const struct vm_operations_struct cxlflash_mmap_vmops = {
*/
static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
int ctxid;
int rc = 0;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1188,7 +1190,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
- rc = cxl_fd_mmap(file, vma);
+ rc = cfg->ops->fd_mmap(file, vma);
if (likely(!rc)) {
/* Insert ourself in the mmap fault handler path */
ctxi->cxl_mmap_vmops = vma->vm_ops;
@@ -1307,23 +1309,23 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
struct afu *afu = cfg->afu;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
- struct cxl_ioctl_start_work *work;
struct ctx_info *ctxi = NULL;
struct lun_access *lun_access = NULL;
int rc = 0;
u32 perms;
int ctxid = -1;
+ u64 irqs = attach->num_interrupts;
u64 flags = 0UL;
u64 rctxid = 0UL;
struct file *file = NULL;
- struct cxl_context *ctx = NULL;
+ void *ctx = NULL;
int fd = -1;
- if (attach->num_interrupts > 4) {
+ if (irqs > 4) {
dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
- __func__, attach->num_interrupts);
+ __func__, irqs);
rc = -EINVAL;
goto out;
}
@@ -1394,7 +1396,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto err;
}
- ctx = cxl_dev_context_init(cfg->dev);
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
@@ -1402,25 +1404,21 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto err;
}
- work = &ctxi->work;
- work->num_interrupts = attach->num_interrupts;
- work->flags = CXL_START_WORK_NUM_IRQS;
-
- rc = cxl_start_work(ctx, work);
+ rc = cfg->ops->start_work(ctx, irqs);
if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc);
goto err;
}
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM;
goto err;
}
- file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+ file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
@@ -1431,7 +1429,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
perms = SISL_RHT_PERM(attach->hdr.flags + 1);
/* Context mutex is locked upon return */
- init_context(ctxi, cfg, ctx, ctxid, file, perms);
+ init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
rc = afu_attach(cfg, ctxi);
if (unlikely(rc)) {
@@ -1479,8 +1477,8 @@ out:
err:
/* Cleanup CXL context; okay to 'stop' even if it was not started */
if (!IS_ERR_OR_NULL(ctx)) {
- cxl_stop_context(ctx);
- cxl_release_context(ctx);
+ cfg->ops->stop_context(ctx);
+ cfg->ops->release_context(ctx);
ctx = NULL;
}
@@ -1529,10 +1527,10 @@ static int recover_context(struct cxlflash_cfg *cfg,
int fd = -1;
int ctxid = -1;
struct file *file;
- struct cxl_context *ctx;
+ void *ctx;
struct afu *afu = cfg->afu;
- ctx = cxl_dev_context_init(cfg->dev);
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
@@ -1540,21 +1538,21 @@ static int recover_context(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_start_work(ctx, &ctxi->work);
+ rc = cfg->ops->start_work(ctx, ctxi->irqs);
if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc);
goto err1;
}
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM;
goto err2;
}
- file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+ file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
@@ -1601,9 +1599,9 @@ err3:
fput(file);
put_unused_fd(fd);
err2:
- cxl_stop_context(ctx);
+ cfg->ops->stop_context(ctx);
err1:
- cxl_release_context(ctx);
+ cfg->ops->release_context(ctx);
goto out;
}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index 0b5976829913..35c3cbf83fb5 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -96,15 +96,15 @@ struct ctx_info {
struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
u8 *rht_needs_ws; /* User-desired write-same function per RHTE */
- struct cxl_ioctl_start_work work;
u64 ctxid;
+ u64 irqs; /* Number of interrupts requested for context */
pid_t pid;
bool initialized;
bool unavail;
bool err_recovery_active;
struct mutex mutex; /* Context protection */
struct kref kref;
- struct cxl_context *ctx;
+ void *ctx;
struct cxlflash_cfg *cfg;
struct list_head luns; /* LUNs attached to this context */
const struct vm_operations_struct *cxl_mmap_vmops;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fd22dc6ab5d9..022e421c2185 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -40,6 +40,7 @@
#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
#define TPGS_SUPPORT_OFFLINE 0x40
#define TPGS_SUPPORT_TRANSITION 0x80
+#define TPGS_SUPPORT_ALL 0xdf
#define RTPG_FMT_MASK 0x70
#define RTPG_FMT_EXT_HDR 0x10
@@ -81,6 +82,7 @@ struct alua_port_group {
int tpgs;
int state;
int pref;
+ int valid_states;
unsigned flags; /* used for optimizing STPG */
unsigned char transition_tmo;
unsigned long expiry;
@@ -243,6 +245,7 @@ static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
pg->group_id = group_id;
pg->tpgs = tpgs;
pg->state = SCSI_ACCESS_STATE_OPTIMAL;
+ pg->valid_states = TPGS_SUPPORT_ALL;
if (optimize_stpg)
pg->flags |= ALUA_OPTIMIZE_STPG;
kref_init(&pg->kref);
@@ -516,7 +519,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
{
struct scsi_sense_hdr sense_hdr;
struct alua_port_group *tmp_pg;
- int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE;
+ int len, k, off, bufflen = ALUA_RTPG_SIZE;
unsigned char *desc, *buff;
unsigned err, retval;
unsigned int tpg_desc_tbl_off;
@@ -541,6 +544,22 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
if (retval) {
+ /*
+ * Some (broken) implementations have a habit of returning
+ * an error during things like firmware update etc.
+ * But if the target only supports active/optimized there's
+ * not much we can do; it's not that we can switch paths
+ * or anything.
+ * So ignore any errors to avoid spurious failures during
+ * path failover.
+ */
+ if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: ignoring rtpg result %d\n",
+ ALUA_DH_NAME, retval);
+ kfree(buff);
+ return SCSI_DH_OK;
+ }
if (!scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: rtpg failed, result %d\n",
@@ -652,7 +671,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
rcu_read_unlock();
}
if (tmp_pg == pg)
- valid_states = desc[1];
+ tmp_pg->valid_states = desc[1];
spin_unlock_irqrestore(&tmp_pg->lock, flags);
}
kref_put(&tmp_pg->kref, release_port_group);
@@ -665,13 +684,13 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
pg->pref ? "preferred" : "non-preferred",
- valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
- valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
- valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
- valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
- valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
- valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
- valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
+ pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
+ pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
+ pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
+ pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
+ pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
+ pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
+ pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
switch (pg->state) {
case SCSI_ACCESS_STATE_TRANSITIONING:
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 5e3d909cfc53..6d3e1cb4fea6 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -108,24 +108,6 @@ void fnic_debugfs_terminate(void)
}
/*
- * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace
- * Or Open fc_trace_enable file for fc_trace
- * @inode: The inode pointer.
- * @file: The file pointer to attach the trace enable/disable flag.
- *
- * Description:
- * This routine opens a debugsfs file trace_enable or fc_trace_enable.
- *
- * Returns:
- * This function returns zero if successful.
- */
-static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
-/*
* fnic_trace_ctrl_read -
* Read trace_enable ,fc_trace_enable
* or fc_trace_clear debugfs file
@@ -220,7 +202,7 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
static const struct file_operations fnic_trace_ctrl_fops = {
.owner = THIS_MODULE,
- .open = fnic_trace_ctrl_open,
+ .open = simple_open,
.read = fnic_trace_ctrl_read,
.write = fnic_trace_ctrl_write,
};
@@ -632,7 +614,7 @@ static ssize_t fnic_reset_stats_write(struct file *file,
sizeof(struct io_path_stats) - sizeof(u64));
memset(fw_stats_p+1, 0,
sizeof(struct fw_stats) - sizeof(u64));
- getnstimeofday(&stats->stats_timestamps.last_reset_time);
+ ktime_get_real_ts64(&stats->stats_timestamps.last_reset_time);
}
(*ppos)++;
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 999fc7547560..c7bf316d8e83 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -442,15 +442,13 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
shost_printk(KERN_INFO, fnic->lport->host,
"process_vlan_resp: FIP VLAN %d\n", vid);
- vlan = kmalloc(sizeof(*vlan),
- GFP_ATOMIC);
+ vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
if (!vlan) {
/* retry from timer */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
goto out;
}
- memset(vlan, 0, sizeof(struct fcoe_vlan));
vlan->vid = vid & 0x0fff;
vlan->state = FIP_VLAN_AVAIL;
list_add_tail(&vlan->list, &fnic->vlans);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 242e2ee494a1..8cbd3c9f0b4c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -906,7 +906,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"icmnd_cmpl abts pending "
- "hdr status = %s tag = 0x%x sc = 0x%p"
+ "hdr status = %s tag = 0x%x sc = 0x%p "
"scsi_status = %x residual = %d\n",
fnic_fcpio_status_to_str(hdr_status),
id, sc,
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index e007feedbf72..9daa6ada6fa0 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -18,8 +18,8 @@
#define _FNIC_STATS_H_
struct stats_timestamps {
- struct timespec last_reset_time;
- struct timespec last_read_time;
+ struct timespec64 last_reset_time;
+ struct timespec64 last_read_time;
};
struct io_path_stats {
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4826f596cb31..abddde11982b 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -111,7 +111,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
int len = 0;
unsigned long flags;
char str[KSYM_SYMBOL_LEN];
- struct timespec val;
+ struct timespec64 val;
fnic_trace_data_t *tbp;
spin_lock_irqsave(&fnic_trace_lock, flags);
@@ -129,10 +129,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
/* Convert function pointer to function name */
if (sizeof(unsigned long) < 8) {
sprint_symbol(str, tbp->fnaddr.low);
- jiffies_to_timespec(tbp->timestamp.low, &val);
+ jiffies_to_timespec64(tbp->timestamp.low, &val);
} else {
sprint_symbol(str, tbp->fnaddr.val);
- jiffies_to_timespec(tbp->timestamp.val, &val);
+ jiffies_to_timespec64(tbp->timestamp.val, &val);
}
/*
* Dump trace buffer entry to memory file
@@ -140,8 +140,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
*/
len += snprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
- "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
- "%16llx %16llx %16llx\n", val.tv_sec,
+ "%16llu.%09lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", (u64)val.tv_sec,
val.tv_nsec, str, tbp->host_no, tbp->tag,
tbp->data[0], tbp->data[1], tbp->data[2],
tbp->data[3], tbp->data[4]);
@@ -171,10 +171,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
/* Convert function pointer to function name */
if (sizeof(unsigned long) < 8) {
sprint_symbol(str, tbp->fnaddr.low);
- jiffies_to_timespec(tbp->timestamp.low, &val);
+ jiffies_to_timespec64(tbp->timestamp.low, &val);
} else {
sprint_symbol(str, tbp->fnaddr.val);
- jiffies_to_timespec(tbp->timestamp.val, &val);
+ jiffies_to_timespec64(tbp->timestamp.val, &val);
}
/*
* Dump trace buffer entry to memory file
@@ -182,8 +182,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
*/
len += snprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
- "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
- "%16llx %16llx %16llx\n", val.tv_sec,
+ "%16llu.%09lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", (u64)val.tv_sec,
val.tv_nsec, str, tbp->host_no, tbp->tag,
tbp->data[0], tbp->data[1], tbp->data[2],
tbp->data[3], tbp->data[4]);
@@ -217,29 +217,29 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
{
int len = 0;
int buf_size = debug->buf_size;
- struct timespec val1, val2;
+ struct timespec64 val1, val2;
- getnstimeofday(&val1);
+ ktime_get_real_ts64(&val1);
len = snprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tTime\n"
"------------------------------------------\n");
len += snprintf(debug->debug_buffer + len, buf_size - len,
- "Current time : [%ld:%ld]\n"
- "Last stats reset time: [%ld:%ld]\n"
- "Last stats read time: [%ld:%ld]\n"
- "delta since last reset: [%ld:%ld]\n"
- "delta since last read: [%ld:%ld]\n",
- val1.tv_sec, val1.tv_nsec,
- stats->stats_timestamps.last_reset_time.tv_sec,
+ "Current time : [%lld:%ld]\n"
+ "Last stats reset time: [%lld:%09ld]\n"
+ "Last stats read time: [%lld:%ld]\n"
+ "delta since last reset: [%lld:%ld]\n"
+ "delta since last read: [%lld:%ld]\n",
+ (s64)val1.tv_sec, val1.tv_nsec,
+ (s64)stats->stats_timestamps.last_reset_time.tv_sec,
stats->stats_timestamps.last_reset_time.tv_nsec,
- stats->stats_timestamps.last_read_time.tv_sec,
+ (s64)stats->stats_timestamps.last_read_time.tv_sec,
stats->stats_timestamps.last_read_time.tv_nsec,
- timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
- timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
- timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
- timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
+ (s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
+ timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
+ (s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
+ timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
stats->stats_timestamps.last_read_time = val1;
@@ -403,12 +403,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"\t\tOther Important Statistics\n"
"------------------------------------------\n");
- jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
- jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
+ jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1);
+ jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
len += snprintf(debug->debug_buffer + len, buf_size - len,
- "Last ISR time: %llu (%8lu.%8lu)\n"
- "Last ACK time: %llu (%8lu.%8lu)\n"
+ "Last ISR time: %llu (%8llu.%09lu)\n"
+ "Last ACK time: %llu (%8llu.%09lu)\n"
"Number of ISRs: %lld\n"
"Maximum CQ Entries: %lld\n"
"Number of ACK index out of range: %lld\n"
@@ -425,9 +425,9 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"Number of rport not ready: %lld\n"
"Number of receive frame errors: %lld\n",
(u64)stats->misc_stats.last_isr_time,
- val1.tv_sec, val1.tv_nsec,
+ (s64)val1.tv_sec, val1.tv_nsec,
(u64)stats->misc_stats.last_ack_time,
- val2.tv_sec, val2.tv_nsec,
+ (s64)val2.tv_sec, val2.tv_nsec,
(u64)atomic64_read(&stats->misc_stats.isr_count),
(u64)atomic64_read(&stats->misc_stats.max_cq_entries),
(u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 83357b0367d8..e7fd2877c19c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -99,12 +99,43 @@ struct hisi_sas_hw_error {
const struct hisi_sas_hw_error *sub;
};
+struct hisi_sas_rst {
+ struct hisi_hba *hisi_hba;
+ struct completion *completion;
+ struct work_struct work;
+ bool done;
+};
+
+#define HISI_SAS_RST_WORK_INIT(r, c) \
+ { .hisi_hba = hisi_hba, \
+ .completion = &c, \
+ .work = __WORK_INITIALIZER(r.work, \
+ hisi_sas_sync_rst_work_handler), \
+ .done = false, \
+ }
+
+#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \
+ DECLARE_COMPLETION_ONSTACK(c); \
+ DECLARE_WORK(w, hisi_sas_sync_rst_work_handler); \
+ struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c)
+
+enum hisi_sas_bit_err_type {
+ HISI_SAS_ERR_SINGLE_BIT_ECC = 0x0,
+ HISI_SAS_ERR_MULTI_BIT_ECC = 0x1,
+};
+
+enum hisi_sas_phy_event {
+ HISI_PHYE_PHY_UP = 0U,
+ HISI_PHYE_LINK_RESET,
+ HISI_PHYES_NUM,
+};
+
struct hisi_sas_phy {
+ struct work_struct works[HISI_PHYES_NUM];
struct hisi_hba *hisi_hba;
struct hisi_sas_port *port;
struct asd_sas_phy sas_phy;
struct sas_identify identify;
- struct work_struct phyup_ws;
u64 port_id; /* from hw */
u64 dev_sas_addr;
u64 frame_rcvd_size;
@@ -205,13 +236,16 @@ struct hisi_sas_hw {
void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *linkrates);
enum sas_linkrate (*phy_get_max_linkrate)(void);
- void (*free_device)(struct hisi_hba *hisi_hba,
+ void (*clear_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
+ void (*free_device)(struct hisi_sas_device *sas_dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
void (*dereg_device)(struct hisi_hba *hisi_hba,
struct domain_device *device);
int (*soft_reset)(struct hisi_hba *hisi_hba);
u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
+ int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data);
int max_command_entries;
int complete_hdr_size;
};
@@ -225,6 +259,7 @@ struct hisi_hba {
struct device *dev;
void __iomem *regs;
+ void __iomem *sgpio_regs;
struct regmap *ctrl;
u32 ctrl_reset_reg;
u32 ctrl_reset_sts_reg;
@@ -409,7 +444,8 @@ extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
-extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction);
+extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
+ int direction);
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
extern void hisi_sas_sata_done(struct sas_task *task,
struct hisi_sas_slot *slot);
@@ -425,5 +461,9 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
extern void hisi_sas_rst_work_handler(struct work_struct *work);
+extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
+extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
+ enum hisi_sas_phy_event event);
+extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 5f503cb09508..2d4dbed03ee3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -22,10 +22,12 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct domain_device *device,
int abort_flag, int tag);
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
+static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
+ void *funcdata);
-u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
+u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
- switch (cmd) {
+ switch (fis->command) {
case ATA_CMD_FPDMA_WRITE:
case ATA_CMD_FPDMA_READ:
case ATA_CMD_FPDMA_RECV:
@@ -77,10 +79,26 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_ZAC_MGMT_OUT:
return HISI_SAS_SATA_PROTOCOL_NONDATA;
default:
+ {
+ if (fis->command == ATA_CMD_SET_MAX) {
+ switch (fis->features) {
+ case ATA_SET_MAX_PASSWD:
+ case ATA_SET_MAX_LOCK:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+
+ case ATA_SET_MAX_PASSWD_DMA:
+ case ATA_SET_MAX_UNLOCK_DMA:
+ return HISI_SAS_SATA_PROTOCOL_DMA;
+
+ default:
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ }
+ }
if (direction == DMA_NONE)
return HISI_SAS_SATA_PROTOCOL_NONDATA;
return HISI_SAS_SATA_PROTOCOL_PIO;
}
+ }
}
EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
@@ -192,7 +210,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
if (!sas_protocol_ata(task->task_proto))
if (slot->n_elem)
- dma_unmap_sg(dev, task->scatter, slot->n_elem,
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
task->data_dir);
if (sas_dev)
@@ -431,7 +450,8 @@ err_out:
dev_err(dev, "task prep: failed[%d]!\n", rc);
if (!sas_protocol_ata(task->task_proto))
if (n_elem)
- dma_unmap_sg(dev, task->scatter, n_elem,
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
task->data_dir);
prep_out:
return rc;
@@ -578,6 +598,9 @@ static int hisi_sas_dev_found(struct domain_device *device)
}
}
+ dev_info(dev, "dev[%d:%x] found\n",
+ sas_dev->device_id, sas_dev->dev_type);
+
return 0;
}
@@ -617,7 +640,7 @@ static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
static void hisi_sas_phyup_work(struct work_struct *work)
{
struct hisi_sas_phy *phy =
- container_of(work, struct hisi_sas_phy, phyup_ws);
+ container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
@@ -626,10 +649,37 @@ static void hisi_sas_phyup_work(struct work_struct *work)
hisi_sas_bytes_dmaed(hisi_hba, phy_no);
}
+static void hisi_sas_linkreset_work(struct work_struct *work)
+{
+ struct hisi_sas_phy *phy =
+ container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
+}
+
+static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
+ [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
+ [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
+};
+
+bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
+ enum hisi_sas_phy_event event)
+{
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+
+ if (WARN_ON(event >= HISI_PHYES_NUM))
+ return false;
+
+ return queue_work(hisi_hba->wq, &phy->works[event]);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
+
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
phy->hisi_hba = hisi_hba;
phy->port = NULL;
@@ -647,7 +697,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
sas_phy->lldd_phy = phy;
- INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
+ for (i = 0; i < HISI_PHYES_NUM; i++)
+ INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
}
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
@@ -702,7 +753,7 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
hisi_sas_do_release_task(hisi_hba, slot->task, slot);
}
-static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
+void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
{
struct hisi_sas_device *sas_dev;
struct domain_device *device;
@@ -719,6 +770,7 @@ static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
hisi_sas_release_task(hisi_hba, device);
}
}
+EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
struct domain_device *device)
@@ -733,17 +785,21 @@ static void hisi_sas_dev_gone(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- dev_info(dev, "found dev[%d:%x] is gone\n",
+ dev_info(dev, "dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
- hisi_sas_internal_task_abort(hisi_hba, device,
+ if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
+ hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
- hisi_sas_dereg_device(hisi_hba, device);
+ hisi_sas_dereg_device(hisi_hba, device);
- hisi_hba->hw->free_device(hisi_hba, sas_dev);
- device->lldd_dev = NULL;
- memset(sas_dev, 0, sizeof(*sas_dev));
+ hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
+ device->lldd_dev = NULL;
+ }
+
+ if (hisi_hba->hw->free_device)
+ hisi_hba->hw->free_device(sas_dev);
sas_dev->dev_type = SAS_PHY_UNUSED;
}
@@ -859,12 +915,13 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
struct hisi_sas_slot *slot = task->lldd_task;
- dev_err(dev, "abort tmf: TMF task timeout\n");
+ dev_err(dev, "abort tmf: TMF task timeout and not done\n");
if (slot)
slot->task = NULL;
goto ex_err;
- }
+ } else
+ dev_err(dev, "abort tmf: TMF task timeout\n");
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -985,27 +1042,42 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
sizeof(ssp_task), tmf);
}
-static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
- struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
+static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
{
- struct hisi_sas_device *sas_dev;
- struct domain_device *device;
+ u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
int i;
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
- sas_dev = &hisi_hba->devices[i];
- device = sas_dev->sas_device;
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
+ struct domain_device *device = sas_dev->sas_device;
+ struct asd_sas_port *sas_port;
+ struct hisi_sas_port *port;
+ struct hisi_sas_phy *phy = NULL;
+ struct asd_sas_phy *sas_phy;
+
if ((sas_dev->dev_type == SAS_PHY_UNUSED)
- || !device || (device->port != sas_port))
+ || !device || !device->port)
continue;
- hisi_hba->hw->free_device(hisi_hba, sas_dev);
+ sas_port = device->port;
+ port = to_hisi_sas_port(sas_port);
+
+ list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
+ if (state & BIT(sas_phy->id)) {
+ phy = sas_phy->lldd_phy;
+ break;
+ }
+
+ if (phy) {
+ port->id = phy->port_id;
- /* Update linkrate of directly attached device. */
- if (!device->parent)
- device->linkrate = linkrate;
+ /* Update linkrate of directly attached device. */
+ if (!device->parent)
+ device->linkrate = phy->sas_phy.linkrate;
- hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+ hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+ } else
+ port->id = 0xff;
}
}
@@ -1020,21 +1092,17 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
bool do_port_check = !!(_sas_port != sas_port);
if (!sas_phy->phy->enabled)
continue;
/* Report PHY state change to libsas */
- if (state & (1 << phy_no)) {
- if (do_port_check && sas_port) {
+ if (state & BIT(phy_no)) {
+ if (do_port_check && sas_port && sas_port->port_dev) {
struct domain_device *dev = sas_port->port_dev;
_sas_port = sas_port;
- port->id = phy->port_id;
- hisi_sas_refresh_port_id(hisi_hba,
- sas_port, sas_phy->linkrate);
if (DEV_IS_EXPANDER(dev->dev_type))
sas_ha->notify_port_event(sas_phy,
@@ -1045,8 +1113,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
hisi_sas_phy_down(hisi_hba, phy_no, 0);
}
-
- drain_workqueue(hisi_hba->shost->work_q);
}
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
@@ -1063,7 +1129,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
return -1;
- dev_dbg(dev, "controller resetting...\n");
+ dev_info(dev, "controller resetting...\n");
old_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
@@ -1072,6 +1138,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
if (rc) {
dev_warn(dev, "controller reset failed (%d)\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
goto out;
}
spin_lock_irqsave(&hisi_hba->lock, flags);
@@ -1083,15 +1150,14 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
/* Init and wait for PHYs to come up and all libsas event finished. */
hisi_hba->hw->phys_init(hisi_hba);
msleep(1000);
- drain_workqueue(hisi_hba->wq);
- drain_workqueue(shost->work_q);
+ hisi_sas_refresh_port_id(hisi_hba);
+ scsi_unblock_requests(shost);
state = hisi_hba->hw->get_phys_state(hisi_hba);
hisi_sas_rescan_topology(hisi_hba, old_state, state);
- dev_dbg(dev, "controller reset complete\n");
+ dev_info(dev, "controller reset complete\n");
out:
- scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
return rc;
@@ -1134,6 +1200,11 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
+ if (rc2 < 0) {
+ dev_err(dev, "abort task: internal abort (%d)\n", rc2);
+ return TMF_RESP_FUNC_FAILED;
+ }
+
/*
* If the TMF finds that the IO is not in the device and also
* the internal abort does not succeed, then it is safe to
@@ -1151,8 +1222,12 @@ static int hisi_sas_abort_task(struct sas_task *task)
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (task->dev->dev_type == SAS_SATA_DEV) {
- hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "abort task: internal abort failed\n");
+ goto out;
+ }
hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_softreset_ata_disk(device);
}
@@ -1163,7 +1238,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
- if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
+ if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
+ task->lldd_task) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_do_release_task(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1178,12 +1254,29 @@ out:
static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
{
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
+ return TMF_RESP_FUNC_FAILED;
+ }
+ hisi_sas_dereg_device(hisi_hba, device);
tmf_task.tmf = TMF_ABORT_TASK_SET;
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_task(hisi_hba, device);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
+
return rc;
}
@@ -1213,20 +1306,25 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- unsigned long flags;
+ struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
if (sas_dev->dev_status != HISI_SAS_DEV_EH)
return TMF_RESP_FUNC_FAILED;
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
- hisi_sas_internal_task_abort(hisi_hba, device,
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
+ return TMF_RESP_FUNC_FAILED;
+ }
hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_debug_I_T_nexus_reset(device);
- if (rc == TMF_RESP_FUNC_COMPLETE) {
+ if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_release_task(hisi_hba, device);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1249,8 +1347,10 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
/* Clear internal IO and then hardreset */
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
- if (rc == TMF_RESP_FUNC_FAILED)
+ if (rc < 0) {
+ dev_err(dev, "lu_reset: internal abort failed\n");
goto out;
+ }
hisi_sas_dereg_device(hisi_hba, device);
phy = sas_get_local_phy(device);
@@ -1266,6 +1366,14 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
} else {
struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "lu_reset: internal abort failed\n");
+ goto out;
+ }
+ hisi_sas_dereg_device(hisi_hba, device);
+
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
if (rc == TMF_RESP_FUNC_COMPLETE) {
spin_lock_irqsave(&hisi_hba->lock, flags);
@@ -1283,8 +1391,14 @@ out:
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
+ HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
- return hisi_sas_controller_reset(hisi_hba);
+ queue_work(hisi_hba->wq, &r.work);
+ wait_for_completion(r.completion);
+ if (r.done)
+ return TMF_RESP_FUNC_COMPLETE;
+
+ return TMF_RESP_FUNC_FAILED;
}
static int hisi_sas_query_task(struct sas_task *task)
@@ -1441,8 +1555,14 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
int res;
+ /*
+ * The interface is not realized means this HW don't support internal
+ * abort, or don't need to do internal abort. Then here, we return
+ * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
+ * the internal abort has been executed and returned CQ.
+ */
if (!hisi_hba->hw->prep_abort)
- return -EOPNOTSUPP;
+ return TMF_RESP_FUNC_FAILED;
task = sas_alloc_slow_task(GFP_KERNEL);
if (!task)
@@ -1473,9 +1593,11 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (slot)
slot->task = NULL;
- dev_err(dev, "internal task abort: timeout.\n");
+ dev_err(dev, "internal task abort: timeout and not done.\n");
+ res = -EIO;
goto exit;
- }
+ } else
+ dev_err(dev, "internal task abort: timeout.\n");
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -1507,6 +1629,22 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
hisi_sas_port_notify_formed(sas_phy);
}
+static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
+{
+}
+
+static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+
+ if (!hisi_hba->hw->write_gpio)
+ return -EOPNOTSUPP;
+
+ return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
+ reg_index, reg_count, write_data);
+}
+
static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
{
phy->phy_attached = 0;
@@ -1561,6 +1699,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
+static struct device_attribute *host_attrs[] = {
+ &dev_attr_phy_event_threshold,
+ NULL,
+};
+
static struct scsi_host_template _hisi_sas_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
@@ -1580,6 +1723,7 @@ static struct scsi_host_template _hisi_sas_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+ .shost_attrs = host_attrs,
};
struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
EXPORT_SYMBOL_GPL(hisi_sas_sht);
@@ -1597,6 +1741,8 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_query_task = hisi_sas_query_task,
.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed,
+ .lldd_port_deformed = hisi_sas_port_deformed,
+ .lldd_write_gpio = hisi_sas_write_gpio,
};
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -1657,6 +1803,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
cq->hisi_hba = hisi_hba;
/* Delivery queue structure */
+ spin_lock_init(&dq->lock);
dq->id = i;
dq->hisi_hba = hisi_hba;
@@ -1803,6 +1950,17 @@ void hisi_sas_rst_work_handler(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
+void hisi_sas_sync_rst_work_handler(struct work_struct *work)
+{
+ struct hisi_sas_rst *rst =
+ container_of(work, struct hisi_sas_rst, work);
+
+ if (!hisi_sas_controller_reset(rst->hisi_hba))
+ rst->done = true;
+ complete(rst->completion);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
+
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
@@ -1909,6 +2067,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
if (IS_ERR(hisi_hba->regs))
goto err_out;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hisi_hba->sgpio_regs))
+ goto err_out;
+ }
+
if (hisi_sas_alloc(hisi_hba, shost)) {
hisi_sas_free(hisi_hba);
goto err_out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index dc6eca8d6afd..679e76f58a0a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -544,7 +544,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
(0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF));
}
-static void free_device_v1_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
u64 dev_id = sas_dev->device_id;
@@ -1482,7 +1482,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
else if (phy->identify.device_type != SAS_PHY_UNUSED)
phy->identify.target_port_protocols =
SAS_PROTOCOL_SMP;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
@@ -1850,7 +1850,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.hw_init = hisi_sas_v1_init,
.setup_itct = setup_itct_v1_hw,
.sl_notify = sl_notify_v1_hw,
- .free_device = free_device_v1_hw,
+ .clear_itct = clear_itct_v1_hw,
.prep_smp = prep_smp_v1_hw,
.prep_ssp = prep_ssp_v1_hw,
.get_free_slot = get_free_slot_v1_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 5d3467fd728d..4ccb61e2ae5c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -240,7 +240,12 @@
#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
+#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
+#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
+#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
@@ -952,7 +957,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void free_device_v2_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
@@ -963,10 +968,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
sas_dev->completion = &completion;
- /* SoC bug workaround */
- if (dev_is_sata(sas_dev->sas_device))
- clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
-
/* clear the itct interrupt state */
if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
@@ -981,6 +982,15 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
}
}
+static void free_device_v2_hw(struct hisi_sas_device *sas_dev)
+{
+ struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
+
+ /* SoC bug workaround */
+ if (dev_is_sata(sas_dev->sas_device))
+ clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
+}
+
static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
{
int i, reset_val;
@@ -1177,8 +1187,8 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff857fff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbfe);
hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -2356,6 +2366,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
+ dev_dbg(dev, "slot_complete: task(%p) aborted\n", task);
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
@@ -2400,6 +2411,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
(!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
>> CMPLT_HDR_ERR_PHASE_OFF;
+ u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
/* Analyse error happens on which phase TX or RX */
if (ERR_ON_TX_PHASE(err_phase))
@@ -2407,6 +2419,16 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
else if (ERR_ON_RX_PHASE(err_phase))
slot_err_v2_hw(hisi_hba, task, slot, 2);
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%p "
+ "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
+ "Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task,
+ complete_hdr->dw0, complete_hdr->dw1,
+ complete_hdr->act, complete_hdr->dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
+
if (unlikely(slot->abort))
return ts->stat;
goto out;
@@ -2456,7 +2478,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
if (!slot->port->port_attached) {
- dev_err(dev, "slot complete: port %d has removed\n",
+ dev_warn(dev, "slot complete: port %d has removed\n",
slot->port->sas_port.id);
ts->stat = SAS_PHY_DOWN;
}
@@ -2517,7 +2539,7 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
dw1 |= 1 << CMD_HDR_RESET_OFF;
dw1 |= (hisi_sas_get_ata_protocol(
- task->ata_task.fis.command, task->data_dir))
+ &task->ata_task.fis, task->data_dir))
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
@@ -2687,7 +2709,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
if (!timer_pending(&hisi_hba->timer))
set_link_timer_quirk(hisi_hba);
}
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -2713,10 +2735,12 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
u32 phy_state, sl_ctrl, txid_auto;
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct hisi_sas_port *port = phy->port;
+ struct device *dev = hisi_hba->dev;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+ dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
@@ -2813,6 +2837,33 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
}
+static const struct hisi_sas_hw_error port_ecc_axi_error[] = {
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_ERR_OFF),
+ .msg = "dmac_tx_ecc_bad_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_ERR_OFF),
+ .msg = "dmac_rx_ecc_bad_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
+ .msg = "dma_tx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
+ .msg = "dma_tx_axi_rd_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
+ .msg = "dma_rx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
+ .msg = "dma_rx_axi_rd_err",
+ },
+};
+
static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
@@ -2829,40 +2880,55 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff;
while (irq_msk) {
- if (irq_msk & (1 << phy_no)) {
- u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT0);
- u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT1);
- u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT2);
-
- if (irq_value1) {
- if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
- CHL_INT1_DMAC_TX_ECC_ERR_MSK))
- panic("%s: DMAC RX/TX ecc bad error!\
- (0x%x)",
- dev_name(dev), irq_value1);
-
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT1, irq_value1);
- }
+ u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+ u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT1);
+ u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT2);
+
+ if ((irq_msk & (1 << phy_no)) && irq_value1) {
+ int i;
- if (irq_value2)
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT2, irq_value2);
+ for (i = 0; i < ARRAY_SIZE(port_ecc_axi_error); i++) {
+ const struct hisi_sas_hw_error *error =
+ &port_ecc_axi_error[i];
+
+ if (!(irq_value1 & error->irq_msk))
+ continue;
+
+ dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ error->msg, phy_no, irq_value1);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT1, irq_value1);
+ }
- if (irq_value0) {
- if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
- phy_bcast_v2_hw(phy_no, hisi_hba);
+ if ((irq_msk & (1 << phy_no)) && irq_value2) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT0, irq_value0
- & (~CHL_INT0_HOTPLUG_TOUT_MSK)
- & (~CHL_INT0_SL_PHY_ENABLE_MSK)
- & (~CHL_INT0_NOT_RDY_MSK));
+ if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n",
+ phy_no);
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
}
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT2, irq_value2);
+ }
+
+ if ((irq_msk & (1 << phy_no)) && irq_value0) {
+ if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
+ phy_bcast_v2_hw(phy_no, hisi_hba);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT0, irq_value0
+ & (~CHL_INT0_HOTPLUG_TOUT_MSK)
+ & (~CHL_INT0_SL_PHY_ENABLE_MSK)
+ & (~CHL_INT0_NOT_RDY_MSK));
}
irq_msk &= ~(1 << phy_no);
phy_no++;
@@ -2906,7 +2972,7 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
val = hisi_sas_read32(hisi_hba, ecc_error->reg);
val &= ecc_error->msk;
val >>= ecc_error->shift;
- dev_warn(dev, ecc_error->msg, irq_value, val);
+ dev_err(dev, ecc_error->msg, irq_value, val);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
}
@@ -3015,12 +3081,12 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
for (; sub->msk || sub->msg; sub++) {
if (!(err_value & sub->msk))
continue;
- dev_warn(dev, "%s (0x%x) found!\n",
+ dev_err(dev, "%s (0x%x) found!\n",
sub->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
} else {
- dev_warn(dev, "%s (0x%x) found!\n",
+ dev_err(dev, "%s (0x%x) found!\n",
axi_error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -3206,7 +3272,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
phy->identify.device_type = SAS_SATA_DEV;
phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
@@ -3392,7 +3458,7 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
udelay(10);
if (cnt++ > 10) {
- dev_info(dev, "wait axi bus state to idle timeout!\n");
+ dev_err(dev, "wait axi bus state to idle timeout!\n");
return -1;
}
}
@@ -3408,6 +3474,44 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
return 0;
}
+static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ struct device *dev = hisi_hba->dev;
+ int phy_no, count;
+
+ if (!hisi_hba->sgpio_regs)
+ return -EOPNOTSUPP;
+
+ switch (reg_type) {
+ case SAS_GPIO_REG_TX:
+ count = reg_count * 4;
+ count = min(count, hisi_hba->n_phy);
+
+ for (phy_no = 0; phy_no < count; phy_no++) {
+ /*
+ * GPIO_TX[n] register has the highest numbered drive
+ * of the four in the first byte and the lowest
+ * numbered drive in the fourth byte.
+ * See SFF-8485 Rev. 0.7 Table 24.
+ */
+ void __iomem *reg_addr = hisi_hba->sgpio_regs +
+ reg_index * 4 + phy_no;
+ int data_idx = phy_no + 3 - (phy_no % 4) * 2;
+
+ writeb(write_data[data_idx], reg_addr);
+ }
+
+ break;
+ default:
+ dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
+ reg_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
.setup_itct = setup_itct_v2_hw,
@@ -3415,6 +3519,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.alloc_dev = alloc_dev_quirk_v2_hw,
.sl_notify = sl_notify_v2_hw,
.get_wideport_bitmap = get_wideport_bitmap_v2_hw,
+ .clear_itct = clear_itct_v2_hw,
.free_device = free_device_v2_hw,
.prep_smp = prep_smp_v2_hw,
.prep_ssp = prep_ssp_v2_hw,
@@ -3434,6 +3539,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
.soft_reset = soft_reset_v2_hw,
.get_phys_state = get_phys_state_v2_hw,
+ .write_gpio = write_gpio_v2_hw,
};
static int hisi_sas_v2_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 19b1f2ffec17..a1f18689729a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -140,6 +140,7 @@
#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
#define STP_LINK_TIMER (PORT_BASE + 0x120)
+#define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124)
#define CON_CFG_DRIVER (PORT_BASE + 0x130)
#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
@@ -165,6 +166,8 @@
#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
+#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
@@ -204,6 +207,13 @@
#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8
#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
+/* RAS registers need init */
+#define RAS_BASE (0x6000)
+#define SAS_RAS_INTR0 (RAS_BASE)
+#define SAS_RAS_INTR1 (RAS_BASE + 0x04)
+#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
+#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
+
/* HW dma structures */
/* Delivery queue header */
/* dw0 */
@@ -422,7 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
@@ -496,6 +506,10 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
upper_32_bits(hisi_hba->initial_fis_dma));
+
+ /* RAS registers init */
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
}
static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -588,7 +602,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void free_device_v3_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
@@ -1033,7 +1047,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
dw1 |= 1 << CMD_HDR_RESET_OFF;
dw1 |= (hisi_sas_get_ata_protocol(
- task->ata_task.fis.command, task->data_dir))
+ &task->ata_task.fis, task->data_dir))
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
@@ -1138,7 +1152,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct dev_to_host_fis *fis;
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
- dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
initial_fis = &hisi_hba->initial_fis[phy_no];
fis = &initial_fis->fis;
sas_phy->oob_mode = SATA_OOB_MODE;
@@ -1181,7 +1195,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
phy->port_id = port_id;
phy->phy_attached = 1;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -1322,7 +1336,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
if (!(irq_value1 & error->irq_msk))
continue;
- dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ dev_err(dev, "%s error (phy%d 0x%x) found!\n",
error->msg, phy_no, irq_value1);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -1331,9 +1345,31 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
CHL_INT1, irq_value1);
}
- if (irq_msk & (8 << (phy_no * 4)) && irq_value2)
+ if (irq_msk & (8 << (phy_no * 4)) && irq_value2) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+
+ if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n",
+ phy_no);
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
+
+ }
+
+ if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
+ u32 reg_value = hisi_sas_phy_read32(hisi_hba,
+ phy_no, STP_LINK_TIMEOUT_STATE);
+
+ dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
+ phy_no, reg_value);
+ if (reg_value & BIT(4))
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
+ }
+
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT2, irq_value2);
+ }
if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
@@ -1432,12 +1468,12 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
if (!(err_value & sub->msk))
continue;
- dev_warn(dev, "%s error (0x%x) found!\n",
+ dev_err(dev, "%s error (0x%x) found!\n",
sub->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
} else {
- dev_warn(dev, "%s error (0x%x) found!\n",
+ dev_err(dev, "%s error (0x%x) found!\n",
error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -1542,6 +1578,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
memset(ts, 0, sizeof(*ts));
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
+ dev_dbg(dev, "slot complete: task(%p) aborted\n", task);
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
@@ -1583,7 +1620,18 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
/* check for erroneous completion */
if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
+ u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
+
slot_err_v3_hw(hisi_hba, task, slot);
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%p "
+ "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
+ "Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task,
+ complete_hdr->dw0, complete_hdr->dw1,
+ complete_hdr->act, complete_hdr->dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
if (unlikely(slot->abort))
return ts->stat;
goto out;
@@ -1628,7 +1676,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
if (!slot->port->port_attached) {
- dev_err(dev, "slot complete: port %d has removed\n",
+ dev_warn(dev, "slot complete: port %d has removed\n",
slot->port->sas_port.id);
ts->stat = SAS_PHY_DOWN;
}
@@ -1653,9 +1701,8 @@ static void cq_tasklet_v3_hw(unsigned long val)
struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
struct hisi_hba *hisi_hba = cq->hisi_hba;
struct hisi_sas_slot *slot;
- struct hisi_sas_itct *itct;
struct hisi_sas_complete_v3_hdr *complete_queue;
- u32 rd_point = cq->rd_point, wr_point, dev_id;
+ u32 rd_point = cq->rd_point, wr_point;
int queue = cq->id;
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
@@ -1671,38 +1718,11 @@ static void cq_tasklet_v3_hw(unsigned long val)
complete_hdr = &complete_queue[rd_point];
- /* Check for NCQ completion */
- if (complete_hdr->act) {
- u32 act_tmp = complete_hdr->act;
- int ncq_tag_count = ffs(act_tmp);
-
- dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
- CMPLT_HDR_DEV_ID_OFF;
- itct = &hisi_hba->itct[dev_id];
-
- /* The NCQ tags are held in the itct header */
- while (ncq_tag_count) {
- __le64 *ncq_tag = &itct->qw4_15[0];
-
- ncq_tag_count -= 1;
- iptt = (ncq_tag[ncq_tag_count / 5]
- >> (ncq_tag_count % 5) * 12) & 0xfff;
-
- slot = &hisi_hba->slot_info[iptt];
- slot->cmplt_queue_slot = rd_point;
- slot->cmplt_queue = queue;
- slot_complete_v3_hw(hisi_hba, slot);
-
- act_tmp &= ~(1 << ncq_tag_count);
- ncq_tag_count = ffs(act_tmp);
- }
- } else {
- iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
- slot = &hisi_hba->slot_info[iptt];
- slot->cmplt_queue_slot = rd_point;
- slot->cmplt_queue = queue;
- slot_complete_v3_hw(hisi_hba, slot);
- }
+ iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v3_hw(hisi_hba, slot);
if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
rd_point = 0;
@@ -1951,7 +1971,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
.get_wideport_bitmap = get_wideport_bitmap_v3_hw,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
- .free_device = free_device_v3_hw,
+ .clear_itct = clear_itct_v3_hw,
.sl_notify = sl_notify_v3_hw,
.prep_ssp = prep_ssp_v3_hw,
.prep_smp = prep_smp_v3_hw,
@@ -2157,21 +2177,243 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
scsi_host_put(shost);
}
+static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = {
+ { .irq_msk = BIT(19), .msg = "HILINK_INT" },
+ { .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" },
+ { .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" },
+ { .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" },
+ { .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" },
+ { .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" },
+ { .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" },
+ { .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" },
+ { .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" },
+ { .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" },
+ { .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" },
+ { .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" },
+ { .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" },
+};
+
+static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
+ { .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" },
+ { .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" },
+ { .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" },
+ { .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" },
+ { .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" },
+ { .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" },
+ { .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" },
+ { .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" },
+ { .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" },
+ { .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" },
+ { .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" },
+ { .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" },
+ { .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" },
+ { .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" },
+ { .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" },
+ { .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" },
+ { .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" },
+ { .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" },
+ { .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" },
+ { .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" },
+ { .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" },
+ { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
+};
+
+static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ const struct hisi_sas_hw_error *ras_error;
+ bool need_reset = false;
+ u32 irq_value;
+ int i;
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0);
+ for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) {
+ ras_error = &sas_ras_intr0_nfe[i];
+ if (ras_error->irq_msk & irq_value) {
+ dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n",
+ ras_error->msg, irq_value);
+ need_reset = true;
+ }
+ }
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value);
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1);
+ for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) {
+ ras_error = &sas_ras_intr1_nfe[i];
+ if (ras_error->irq_msk & irq_value) {
+ dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n",
+ ras_error->msg, irq_value);
+ need_reset = true;
+ }
+ }
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
+
+ return need_reset;
+}
+
+static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+
+ dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (process_non_fatal_error_v3_hw(hisi_hba))
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev)
+{
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
+
+ dev_info(dev, "PCI error: slot reset callback!!\n");
+ queue_work(hisi_hba->wq, &r.work);
+ wait_for_completion(r.completion);
+ if (r.done)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
enum {
/* instances of the controller */
hip08,
};
+static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ u32 device_state, status;
+ int rc;
+ u32 reg_val;
+ unsigned long flags;
+
+ if (!pdev->pm_cap) {
+ dev_err(dev, "PCI PM not supported\n");
+ return -ENODEV;
+ }
+
+ set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ scsi_block_requests(shost);
+ set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ flush_workqueue(hisi_hba->wq);
+ /* disable DQ/PHY/bus */
+ interrupt_disable_v3_hw(hisi_hba);
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+ hisi_sas_kill_tasklets(hisi_hba);
+
+ hisi_sas_stop_phys(hisi_hba);
+
+ reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL);
+ reg_val |= 0x1;
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL, reg_val);
+
+ /* wait until bus idle */
+ rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
+ AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
+ if (rc) {
+ dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
+ return rc;
+ }
+
+ hisi_sas_init_mem(hisi_hba);
+
+ device_state = pci_choose_state(pdev, state);
+ dev_warn(dev, "entering operating state [D%d]\n",
+ device_state);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, device_state);
+
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_tasks(hisi_hba);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+ sas_suspend_ha(sha);
+ return 0;
+}
+
+static int hisi_sas_v3_resume(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct device *dev = hisi_hba->dev;
+ unsigned int rc;
+ u32 device_state = pdev->current_state;
+
+ dev_warn(dev, "resuming from operating state [D%d]\n",
+ device_state);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc)
+ dev_err(dev, "enable device failed during resume (%d)\n", rc);
+
+ pci_set_master(pdev);
+ scsi_unblock_requests(shost);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+
+ sas_prep_resume_ha(sha);
+ init_reg_v3_hw(hisi_hba);
+ hisi_hba->hw->phys_init(hisi_hba);
+ sas_resume_ha(sha);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+
+ return 0;
+}
+
static const struct pci_device_id sas_v3_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
{}
};
+static const struct pci_error_handlers hisi_sas_err_handler = {
+ .error_detected = hisi_sas_error_detected_v3_hw,
+ .mmio_enabled = hisi_sas_mmio_enabled_v3_hw,
+ .slot_reset = hisi_sas_slot_reset_v3_hw,
+};
+
static struct pci_driver sas_v3_pci_driver = {
.name = DRV_NAME,
.id_table = sas_v3_pci_table,
.probe = hisi_sas_v3_probe,
.remove = hisi_sas_v3_remove,
+ .suspend = hisi_sas_v3_suspend,
+ .resume = hisi_sas_v3_resume,
+ .err_handler = &hisi_sas_err_handler,
};
module_pci_driver(sas_v3_pci_driver);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index fe3a0da3ec97..57bf43e34863 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev)
scsi_proc_hostdir_rm(shost->hostt);
+ /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
+ rcu_barrier();
+
if (shost->tmf_work_q)
destroy_workqueue(shost->tmf_work_q);
if (shost->ehandler)
@@ -325,6 +328,8 @@ static void scsi_host_dev_release(struct device *dev)
if (shost->work_q)
destroy_workqueue(shost->work_q);
+ destroy_rcu_head(&shost->rcu);
+
if (shost->shost_state == SHOST_CREATED) {
/*
* Free the shost_dev device name here if scsi_host_alloc()
@@ -399,6 +404,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
+ init_rcu_head(&shost->rcu);
index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
if (index < 0)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 287e5eb0723f..87b260e403ec 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3518,7 +3518,7 @@ out:
if (rc != IO_OK)
hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
- "Error, could not get enclosure information\n");
+ "Error, could not get enclosure information");
}
static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
@@ -4619,21 +4619,13 @@ sglist_finished:
return 0;
}
-#define BUFLEN 128
static inline void warn_zero_length_transfer(struct ctlr_info *h,
u8 *cdb, int cdb_len,
const char *func)
{
- char buf[BUFLEN];
- int outlen;
- int i;
-
- outlen = scnprintf(buf, BUFLEN,
- "%s: Blocking zero-length request: CDB:", func);
- for (i = 0; i < cdb_len; i++)
- outlen += scnprintf(buf+outlen, BUFLEN - outlen,
- "%02hhx", cdb[i]);
- dev_warn(&h->pdev->dev, "%s\n", buf);
+ dev_warn(&h->pdev->dev,
+ "%s: Blocking zero-length request: CDB:%*phN\n",
+ func, cdb_len, cdb);
}
#define IO_ACCEL_INELIGIBLE 1
@@ -8223,8 +8215,6 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
if (!device)
continue;
- if (!device->scsi3addr)
- continue;
if (!hpsa_vpd_page_supported(h, device->scsi3addr,
HPSA_VPD_LV_IOACCEL_STATUS))
continue;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 0d2f7eb3acb6..b1b1d3a3b173 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -181,7 +181,7 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
break;
default:
break;
- };
+ }
}
/**
@@ -220,7 +220,7 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
default:
break;
- };
+ }
}
#else
@@ -464,7 +464,7 @@ static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
default:
vhost->state = state;
break;
- };
+ }
return rc;
}
@@ -500,7 +500,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
break;
default:
break;
- };
+ }
break;
case IBMVFC_HOST_ACTION_TGT_INIT:
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
@@ -515,7 +515,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
default:
vhost->action = action;
break;
- };
+ }
break;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -526,7 +526,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
default:
vhost->action = action;
break;
- };
+ }
}
/**
@@ -1601,7 +1601,7 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
case IBMVFC_ACTIVE:
result = 0;
break;
- };
+ }
return result;
}
@@ -1856,7 +1856,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
break;
default:
return -ENOTSUPP;
- };
+ }
if (port_id == -1)
return -EINVAL;
@@ -2661,7 +2661,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
break;
- };
+ }
break;
case IBMVFC_AE_LINK_UP:
@@ -2715,7 +2715,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
default:
dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
break;
- };
+ }
}
/**
@@ -3351,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
rsp->status, rsp->error, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3451,7 +3451,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3522,7 +3522,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
default:
tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
break;
- };
+ }
if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
@@ -3626,7 +3626,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
ibmvfc_get_fc_type(fc_reason), fc_reason,
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3838,7 +3838,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
rsp->fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -4236,7 +4236,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_REENABLE:
default:
break;
- };
+ }
return 1;
}
@@ -4464,7 +4464,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
break;
default:
break;
- };
+ }
spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 2799a6b08f73..c3a76af9f5fa 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -122,7 +122,7 @@ static bool connection_broken(struct scsi_info *vscsi)
cpu_to_be64(buffer[MSG_HI]),
cpu_to_be64(buffer[MSG_LOW]));
- pr_debug("connection_broken: rc %ld\n", h_return_code);
+ dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code);
if (h_return_code == H_CLOSED)
rc = true;
@@ -210,7 +210,7 @@ static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
}
} while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
- pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
+ dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
return rc;
}
@@ -291,9 +291,9 @@ static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
ibmvscsis_delete_client_info(vscsi, false);
}
- pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
- vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
- vscsi->phyp_acr_state);
+ dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
}
return rc;
}
@@ -428,8 +428,8 @@ static void ibmvscsis_disconnect(struct work_struct *work)
vscsi->flags |= DISCONNECT_SCHEDULED;
vscsi->flags &= ~SCHEDULE_DISCONNECT;
- pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
/*
* check which state we are in and see if we
@@ -540,13 +540,14 @@ static void ibmvscsis_disconnect(struct work_struct *work)
}
if (wait_idle) {
- pr_debug("disconnect start wait, active %d, sched %d\n",
- (int)list_empty(&vscsi->active_q),
- (int)list_empty(&vscsi->schedule_q));
+ dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n",
+ (int)list_empty(&vscsi->active_q),
+ (int)list_empty(&vscsi->schedule_q));
if (!list_empty(&vscsi->active_q) ||
!list_empty(&vscsi->schedule_q)) {
vscsi->flags |= WAIT_FOR_IDLE;
- pr_debug("disconnect flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n",
+ vscsi->flags);
/*
* This routine is can not be called with the interrupt
* lock held.
@@ -555,7 +556,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
wait_for_completion(&vscsi->wait_idle);
spin_lock_bh(&vscsi->intr_lock);
}
- pr_debug("disconnect stop wait\n");
+ dev_dbg(&vscsi->dev, "disconnect stop wait\n");
ibmvscsis_adapter_idle(vscsi);
}
@@ -597,8 +598,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
vscsi->flags |= flag_bits;
- pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
- new_state, flag_bits, vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
+ new_state, flag_bits, vscsi->flags, vscsi->state);
if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
vscsi->flags |= SCHEDULE_DISCONNECT;
@@ -648,8 +649,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
}
}
- pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
- vscsi->flags, vscsi->new_state);
+ dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
+ vscsi->flags, vscsi->new_state);
}
/**
@@ -724,7 +725,8 @@ static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
break;
case H_CLOSED:
- pr_warn("init_msg: failed to send, rc %ld\n", rc);
+ dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
rc = 0;
break;
}
@@ -768,7 +770,7 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
{
long rc = ADAPT_SUCCESS;
- pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+ dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state);
rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
(u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
@@ -776,10 +778,10 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
if (rc == H_SUCCESS) {
vscsi->client_data.partition_number =
be64_to_cpu(*(u64 *)vscsi->map_buf);
- pr_debug("init_msg, part num %d\n",
- vscsi->client_data.partition_number);
+ dev_dbg(&vscsi->dev, "init_msg, part num %d\n",
+ vscsi->client_data.partition_number);
} else {
- pr_debug("init_msg h_vioctl rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc);
rc = ADAPT_SUCCESS;
}
@@ -813,7 +815,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
if (rc == H_SUCCESS)
vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
else if (rc != H_NOT_FOUND)
- pr_err("Error from Enable Prepare for Suspend: %ld\n", rc);
+ dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n",
+ rc);
vscsi->flags &= PRESERVE_FLAG_FIELDS;
vscsi->rsp_q_timer.timer_pops = 0;
@@ -822,8 +825,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
rc = vio_enable_interrupts(vscsi->dma_dev);
if (rc) {
- pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
- rc);
+ dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n",
+ rc);
return rc;
}
@@ -883,7 +886,7 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
int bytes;
long rc = ADAPT_SUCCESS;
- pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags);
/* don't reset, the client did it for us */
if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
@@ -906,7 +909,8 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
}
if (rc != ADAPT_SUCCESS) {
- pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n",
+ rc);
vscsi->state = ERR_DISCONNECTED;
vscsi->flags |= RESPONSE_Q_DOWN;
@@ -985,14 +989,15 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
/* See if there is a Resume event in the queue */
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
- pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
- vscsi->flags, vscsi->state, (int)crq->valid);
+ dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
+ vscsi->flags, vscsi->state, (int)crq->valid);
if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
0, 0);
if (rc) {
- pr_err("Ready for Suspend Vioctl failed: %ld\n", rc);
+ dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n",
+ rc);
rc = 0;
}
} else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
@@ -1012,7 +1017,7 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
(crq->format != RESUME_FROM_SUSP)))
- pr_err("Invalid element in CRQ after Prepare for Suspend");
+ dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend");
}
vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
@@ -1036,8 +1041,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
{
long rc = ADAPT_SUCCESS;
- pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
- (int)crq->format, vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
+ (int)crq->format, vscsi->flags, vscsi->state);
switch (crq->format) {
case MIGRATED:
@@ -1073,14 +1078,14 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
!list_empty(&vscsi->schedule_q) ||
!list_empty(&vscsi->waiting_rsp) ||
!list_empty(&vscsi->active_q)) {
- pr_debug("debit %d, sched %d, wait %d, active %d\n",
- vscsi->debit,
- (int)list_empty(&vscsi->schedule_q),
- (int)list_empty(&vscsi->waiting_rsp),
- (int)list_empty(&vscsi->active_q));
- pr_warn("connection lost with outstanding work\n");
+ dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n",
+ vscsi->debit,
+ (int)list_empty(&vscsi->schedule_q),
+ (int)list_empty(&vscsi->waiting_rsp),
+ (int)list_empty(&vscsi->active_q));
+ dev_warn(&vscsi->dev, "connection lost with outstanding work\n");
} else {
- pr_debug("trans_event: SRP Processing, but no outstanding work\n");
+ dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n");
}
ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
@@ -1097,8 +1102,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
break;
case PREPARE_FOR_SUSPEND:
- pr_debug("Prep for Suspend, crq status = 0x%x\n",
- (int)crq->status);
+ dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n",
+ (int)crq->status);
switch (vscsi->state) {
case ERR_DISCONNECTED:
case WAIT_CONNECTION:
@@ -1119,15 +1124,15 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
case ERR_DISCONNECT:
case ERR_DISCONNECT_RECONNECT:
case WAIT_IDLE:
- pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
- vscsi->state);
+ dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
+ vscsi->state);
break;
}
break;
case RESUME_FROM_SUSP:
- pr_debug("Resume from Suspend, crq status = 0x%x\n",
- (int)crq->status);
+ dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n",
+ (int)crq->status);
if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
} else {
@@ -1152,8 +1157,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
rc = vscsi->flags & SCHEDULE_DISCONNECT;
- pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
- vscsi->flags, vscsi->state, rc);
+ dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
+ vscsi->flags, vscsi->state, rc);
return rc;
}
@@ -1175,8 +1180,8 @@ static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
bool ack = true;
volatile u8 valid;
- pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
- vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+ dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
rc = vscsi->flags & SCHEDULE_DISCONNECT;
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
@@ -1204,7 +1209,7 @@ poll_work:
* if a tranport event has occurred leave
* everything but transport events on the queue
*/
- pr_debug("poll_cmd_q, ignoring\n");
+ dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n");
/*
* need to decrement the queue index so we can
@@ -1233,7 +1238,7 @@ poll_work:
if (ack) {
vio_enable_interrupts(vscsi->dma_dev);
ack = false;
- pr_debug("poll_cmd_q, reenabling interrupts\n");
+ dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n");
}
valid = crq->valid;
dma_rmb();
@@ -1241,7 +1246,7 @@ poll_work:
goto poll_work;
}
- pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc);
}
/**
@@ -1258,9 +1263,9 @@ static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
{
struct ibmvscsis_cmd *cmd, *nxt;
- pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
- (int)list_empty(&vscsi->waiting_rsp),
- vscsi->rsp_q_timer.started);
+ dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
+ (int)list_empty(&vscsi->waiting_rsp),
+ vscsi->rsp_q_timer.started);
list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
list_del(&cmd->list);
@@ -1317,8 +1322,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
int free_qs = false;
long rc = 0;
- pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
/* Only need to free qs if we're disconnecting from client */
if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
@@ -1336,7 +1341,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
break;
case ERR_DISCONNECT_RECONNECT:
ibmvscsis_reset_queue(vscsi);
- pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n",
+ vscsi->flags);
break;
case ERR_DISCONNECT:
@@ -1347,8 +1353,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->state = ERR_DISCONNECTED;
else
vscsi->state = WAIT_ENABLED;
- pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
break;
case WAIT_IDLE:
@@ -1370,15 +1376,15 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->flags &= ~DISCONNECT_SCHEDULED;
}
- pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
ibmvscsis_poll_cmd_q(vscsi);
break;
case ERR_DISCONNECTED:
vscsi->flags &= ~DISCONNECT_SCHEDULED;
- pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
break;
default:
@@ -1419,13 +1425,13 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->phyp_acr_state = 0;
vscsi->phyp_acr_flags = 0;
- pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
- vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
- vscsi->phyp_acr_state);
+ dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
}
- pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
- vscsi->flags, vscsi->state, vscsi->new_state);
+ dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->new_state);
}
/**
@@ -1464,8 +1470,8 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
cmd->init_time = mftb();
iue->remote_token = crq->IU_data_ptr;
iue->iu_len = len;
- pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
- be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
+ dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
+ be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
break;
case H_PERMISSION:
if (connection_broken(vscsi))
@@ -1536,10 +1542,10 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
}
- pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
- rc);
- pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
- be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
+ dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
+ rc);
+ dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
+ be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
flag_bits);
goto free_dma;
@@ -1595,7 +1601,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
free_dma:
dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
- pr_debug("Leaving adapter_info, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc);
return rc;
}
@@ -1629,7 +1635,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
*/
min_len = offsetof(struct capabilities, migration);
if ((olen < min_len) || (olen > PAGE_SIZE)) {
- pr_warn("cap_mad: invalid len %d\n", olen);
+ dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen);
mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
return 0;
}
@@ -1654,9 +1660,9 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
common = (struct mad_capability_common *)&cap->migration;
while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
- pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
- len, be32_to_cpu(common->cap_type),
- be16_to_cpu(common->length));
+ dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
+ len, be32_to_cpu(common->cap_type),
+ be16_to_cpu(common->length));
cap_len = be16_to_cpu(common->length);
if (cap_len > len) {
@@ -1673,7 +1679,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
switch (common->cap_type) {
default:
- pr_debug("cap_mad: unsupported capability\n");
+ dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n");
common->server_support = 0;
flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
cap->flags &= ~flag;
@@ -1693,8 +1699,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
be64_to_cpu(mad->buffer));
if (rc != H_SUCCESS) {
- pr_debug("cap_mad: failed to copy to client, rc %ld\n",
- rc);
+ dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n",
+ rc);
if (rc == H_PERMISSION) {
if (connection_broken(vscsi))
@@ -1702,8 +1708,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
CLIENT_FAILED);
}
- pr_warn("cap_mad: error copying data to client, rc %ld\n",
- rc);
+ dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n",
+ rc);
ibmvscsis_post_disconnect(vscsi,
ERR_DISCONNECT_RECONNECT,
flag_bits);
@@ -1712,8 +1718,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
- pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
- rc, vscsi->client_cap);
+ dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
+ rc, vscsi->client_cap);
return rc;
}
@@ -1749,7 +1755,7 @@ static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
vscsi->fast_fail = true;
mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
} else {
- pr_warn("fast fail mad sent after login\n");
+ dev_warn(&vscsi->dev, "fast fail mad sent after login\n");
mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
}
break;
@@ -1809,9 +1815,9 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
*/
if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
(vscsi->state == SRP_PROCESSING)) {
- pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
- vscsi->flags, (int)vscsi->rsp_q_timer.started,
- vscsi->rsp_q_timer.timer_pops);
+ dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
+ vscsi->flags, (int)vscsi->rsp_q_timer.started,
+ vscsi->rsp_q_timer.timer_pops);
/*
* Check if the timer is running; if it
@@ -1947,8 +1953,9 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
be64_to_cpu(msg_hi),
be64_to_cpu(cmd->rsp.tag));
- pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
- cmd, be64_to_cpu(cmd->rsp.tag), rc);
+ dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+ cmd, be64_to_cpu(cmd->rsp.tag),
+ rc);
/* if all ok free up the command
* element resources
@@ -2003,7 +2010,8 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
list_add_tail(&cmd->list, &vscsi->waiting_rsp);
ibmvscsis_send_messages(vscsi);
} else {
- pr_debug("Error sending mad response, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n",
+ rc);
if (rc == H_PERMISSION) {
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
@@ -2039,8 +2047,8 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
* expecting a response.
*/
case WAIT_CONNECTION:
- pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
- vscsi->flags);
+ dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
+ vscsi->flags);
return ADAPT_SUCCESS;
case SRP_PROCESSING:
@@ -2075,12 +2083,12 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
if (!rc) {
mad = (struct mad_common *)&vio_iu(iue)->mad;
- pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
+ dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type));
rc = ibmvscsis_process_mad(vscsi, iue);
- pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
- rc);
+ dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n",
+ be16_to_cpu(mad->status), rc);
if (!rc)
ibmvscsis_send_mad_resp(vscsi, cmd, crq);
@@ -2088,7 +2096,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
ibmvscsis_free_cmd_resources(vscsi, cmd);
}
- pr_debug("Leaving mad, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc);
return rc;
}
@@ -2211,16 +2219,17 @@ static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
{
char *name = tport->tport_name;
struct ibmvscsis_nexus *nexus;
+ struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
int rc;
if (tport->ibmv_nexus) {
- pr_debug("tport->ibmv_nexus already exists\n");
+ dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n");
return 0;
}
nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
if (!nexus) {
- pr_err("Unable to allocate struct ibmvscsis_nexus\n");
+ dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n");
return -ENOMEM;
}
@@ -2316,7 +2325,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
cmd->rsp.format = VIOSRP_SRP_FORMAT;
cmd->rsp.tag = req->tag;
- pr_debug("srp_login: reason 0x%x\n", reason);
+ dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason);
if (reason)
rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
@@ -2333,7 +2342,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
ibmvscsis_free_cmd_resources(vscsi, cmd);
}
- pr_debug("Leaving srp_login, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc);
return rc;
}
@@ -2415,8 +2424,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
case SRP_TSK_MGMT:
tsk = &vio_iu(iue)->srp.tsk_mgmt;
- pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
- tsk->tag);
+ dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n",
+ tsk->tag, tsk->tag);
cmd->rsp.tag = tsk->tag;
vscsi->debit += 1;
cmd->type = TASK_MANAGEMENT;
@@ -2425,8 +2434,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
break;
case SRP_CMD:
- pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
- srp->tag);
+ dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n",
+ srp->tag, srp->tag);
cmd->rsp.tag = srp->tag;
vscsi->debit += 1;
cmd->type = SCSI_CDB;
@@ -2603,7 +2612,7 @@ static int read_dma_window(struct scsi_info *vscsi)
"ibm,my-dma-window",
NULL);
if (!dma_window) {
- pr_err("Couldn't find ibm,my-dma-window property\n");
+ dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n");
return -1;
}
@@ -2613,7 +2622,7 @@ static int read_dma_window(struct scsi_info *vscsi)
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
NULL);
if (!prop) {
- pr_warn("Couldn't find ibm,#dma-address-cells property\n");
+ dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n");
dma_window++;
} else {
dma_window += be32_to_cpu(*prop);
@@ -2622,7 +2631,7 @@ static int read_dma_window(struct scsi_info *vscsi)
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
NULL);
if (!prop) {
- pr_warn("Couldn't find ibm,#dma-size-cells property\n");
+ dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n");
dma_window++;
} else {
dma_window += be32_to_cpu(*prop);
@@ -2808,8 +2817,8 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
srp_tsk->lun.scsi_lun[0] &= 0x3f;
- pr_debug("calling submit_tmr, func %d\n",
- srp_tsk->tsk_mgmt_func);
+ dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n",
+ srp_tsk->tsk_mgmt_func);
rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
scsilun_to_int(&srp_tsk->lun), srp_tsk,
tcm_type, GFP_KERNEL, tag_to_abort, 0);
@@ -3113,8 +3122,8 @@ static long srp_build_response(struct scsi_info *vscsi,
if (cmd->type == SCSI_CDB) {
rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
if (rsp->status) {
- pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
- (int)rsp->status);
+ dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n",
+ cmd, (int)rsp->status);
ibmvscsis_determine_resid(se_cmd, rsp);
if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
rsp->sense_data_len =
@@ -3127,7 +3136,8 @@ static long srp_build_response(struct scsi_info *vscsi,
rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
UCSOLNT_RESP_SHIFT;
} else if (cmd->flags & CMD_FAST_FAIL) {
- pr_debug("build_resp: cmd %p, fast fail\n", cmd);
+ dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n",
+ cmd);
rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
UCSOLNT_RESP_SHIFT;
} else {
@@ -3340,7 +3350,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
spin_lock_bh(&vscsi->intr_lock);
- pr_debug("got interrupt\n");
+ dev_dbg(&vscsi->dev, "got interrupt\n");
/*
* if we are in a path where we are waiting for all pending commands
@@ -3350,8 +3360,8 @@ static void ibmvscsis_handle_crq(unsigned long data)
if (TARGET_STOP(vscsi)) {
vio_enable_interrupts(vscsi->dma_dev);
- pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
return;
}
@@ -3414,20 +3424,20 @@ cmd_work:
if (ack) {
vio_enable_interrupts(vscsi->dma_dev);
ack = false;
- pr_debug("handle_crq, reenabling interrupts\n");
+ dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n");
}
valid = crq->valid;
dma_rmb();
if (valid)
goto cmd_work;
} else {
- pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
- vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+ dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
}
- pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
- (int)list_empty(&vscsi->schedule_q), vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
+ (int)list_empty(&vscsi->schedule_q), vscsi->flags,
+ vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
}
@@ -3443,7 +3453,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
if (!vscsi) {
rc = -ENOMEM;
- pr_err("probe: allocation of adapter failed\n");
+ dev_err(&vdev->dev, "probe: allocation of adapter failed\n");
return rc;
}
@@ -3456,14 +3466,14 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
dev_name(&vdev->dev));
- pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
+ dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name);
rc = read_dma_window(vscsi);
if (rc)
goto free_adapter;
- pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
- vscsi->dds.window[LOCAL].liobn,
- vscsi->dds.window[REMOTE].liobn);
+ dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
+ vscsi->dds.window[LOCAL].liobn,
+ vscsi->dds.window[REMOTE].liobn);
strcpy(vscsi->eye, "VSCSI ");
strncat(vscsi->eye, vdev->name, MAX_EYE);
@@ -3541,8 +3551,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
* client can connect" and the client isn't activated yet.
* We'll make the call again when he sends an init msg.
*/
- pr_debug("probe hrc %ld, client partition num %d\n",
- hrc, vscsi->client_data.partition_number);
+ dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n",
+ hrc, vscsi->client_data.partition_number);
tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
(unsigned long)vscsi);
@@ -3602,7 +3612,7 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
{
struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
- pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
+ dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
spin_lock_bh(&vscsi->intr_lock);
ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
@@ -3766,14 +3776,16 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
* attempt an srp_transfer_data.
*/
if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
- pr_err("write_pending failed since: %d\n", vscsi->flags);
+ dev_err(&vscsi->dev, "write_pending failed since: %d\n",
+ vscsi->flags);
return -EIO;
+
}
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
1, 1);
if (rc) {
- pr_err("srp_transfer_data() failed: %d\n", rc);
+ dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc);
return -EIO;
}
/*
@@ -3811,7 +3823,7 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
1);
if (rc) {
- pr_err("srp_transfer_data failed: %d\n", rc);
+ dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
sd = se_cmd->sense_buffer;
se_cmd->scsi_sense_length = 18;
memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
@@ -3834,7 +3846,7 @@ static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
struct scsi_info *vscsi = cmd->adapter;
uint len;
- pr_debug("queue_status %p\n", se_cmd);
+ dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd);
srp_build_response(vscsi, cmd, &len);
cmd->rsp.format = SRP_FORMAT;
@@ -3854,8 +3866,8 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
uint len;
- pr_debug("queue_tm_rsp %p, status %d\n",
- se_cmd, (int)se_cmd->se_tmr_req->response);
+ dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n",
+ se_cmd, (int)se_cmd->se_tmr_req->response);
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
@@ -3877,8 +3889,12 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
{
- pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n",
- se_cmd, se_cmd->tag);
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct scsi_info *vscsi = cmd->adapter;
+
+ dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n",
+ se_cmd, se_cmd->tag);
}
static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
@@ -3886,12 +3902,14 @@ static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
const char *name)
{
struct ibmvscsis_tport *tport;
+ struct scsi_info *vscsi;
tport = ibmvscsis_lookup_port(name);
if (tport) {
+ vscsi = container_of(tport, struct scsi_info, tport);
tport->tport_proto_id = SCSI_PROTOCOL_SRP;
- pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
- name, tport, tport->tport_proto_id);
+ dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n",
+ name, tport, tport->tport_proto_id);
return &tport->tport_wwn;
}
@@ -3903,9 +3921,10 @@ static void ibmvscsis_drop_tport(struct se_wwn *wwn)
struct ibmvscsis_tport *tport = container_of(wwn,
struct ibmvscsis_tport,
tport_wwn);
+ struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
- pr_debug("drop_tport(%s)\n",
- config_item_name(&tport->tport_wwn.wwn_group.cg_item));
+ dev_dbg(&vscsi->dev, "drop_tport(%s)\n",
+ config_item_name(&tport->tport_wwn.wwn_group.cg_item));
}
static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
@@ -3990,12 +4009,12 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
rc = kstrtoul(page, 0, &tmp);
if (rc < 0) {
- pr_err("Unable to extract srpt_tpg_store_enable\n");
+ dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- pr_err("Illegal value for srpt_tpg_store_enable\n");
+ dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
return -EINVAL;
}
@@ -4004,8 +4023,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
tport->enabled = true;
lrc = ibmvscsis_enable_change_state(vscsi);
if (lrc)
- pr_err("enable_change_state failed, rc %ld state %d\n",
- lrc, vscsi->state);
+ dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n",
+ lrc, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
} else {
spin_lock_bh(&vscsi->intr_lock);
@@ -4015,7 +4034,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
spin_unlock_bh(&vscsi->intr_lock);
}
- pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
+ dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
+ vscsi->state);
return count;
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index cc0187965eee..e07dd990e585 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9653,8 +9653,8 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
if (i == 0) {
entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
ioa_cfg->hrrq[i].min_cmd_id = 0;
- ioa_cfg->hrrq[i].max_cmd_id =
- (entries_each_hrrq - 1);
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (entries_each_hrrq - 1);
} else {
entries_each_hrrq =
IPR_NUM_BASE_CMD_BLKS/
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 4d934d6c3e13..6198559abbd8 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -307,6 +307,7 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
/**
* iscsi_sw_tcp_xmit - TCP transmit
+ * @conn: iscsi connection
**/
static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
{
@@ -357,6 +358,7 @@ error:
/**
* iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+ * @conn: iscsi connection
*/
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
{
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 9c50d2d9f27c..15a2fef51e38 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1696,6 +1696,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
*/
switch (session->state) {
case ISCSI_STATE_FAILED:
+ /*
+ * cmds should fail during shutdown, if the session
+ * state is bad, allowing completion to happen
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ reason = FAILURE_SESSION_FAILED;
+ sc->result = DID_NO_CONNECT << 16;
+ break;
+ }
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1979,6 +1988,19 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
+ * During shutdown, if session is prematurely disconnected,
+ * recovery won't happen and there will be hung cmds. Not
+ * handling cmds would trigger EH, also bad in this case.
+ * Instead, handle cmd, allow completion to happen and let
+ * upper layer to deal with the result.
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ sc->result = DID_NO_CONNECT << 16;
+ ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
+ rc = BLK_EH_HANDLED;
+ goto done;
+ }
+ /*
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
*/
@@ -2082,7 +2104,7 @@ done:
task->last_timeout = jiffies;
spin_unlock(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
- "timer reset" : "nh");
+ "timer reset" : "shutdown or nh");
return rc;
}
EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
@@ -2722,8 +2744,10 @@ static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
* @iscsit: iscsi transport template
* @shost: scsi host
* @cmds_max: session can queue
+ * @dd_size: private driver data size, added to session allocation size
* @cmd_task_size: LLD task private data size
* @initial_cmdsn: initial CmdSN
+ * @id: target ID to add to this session
*
* This can be used by software iscsi_transports that allocate
* a session per scsi host.
@@ -2951,7 +2975,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_setup);
/**
* iscsi_conn_teardown - teardown iscsi connection
- * cls_conn: iscsi class connection
+ * @cls_conn: iscsi class connection
*
* TODO: we may need to make this into a two step process
* like scsi-mls remove + put host
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 63a1d69ff515..369ef8f23b24 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -798,6 +798,8 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
/**
* iscsi_tcp_hdr_recv_done - process PDU header
+ * @tcp_conn: iSCSI TCP connection
+ * @segment: the buffer segment being processed
*
* This is the callback invoked when the PDU header has
* been received. If the header is followed by additional
@@ -876,9 +878,10 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
* @conn: iscsi connection
* @skb: network buffer with header and/or data segment
* @offset: offset in skb
- * @offload: bool indicating if transfer was offloaded
+ * @offloaded: bool indicating if transfer was offloaded
+ * @status: iscsi TCP status result
*
- * Will return status of transfer in status. And will return
+ * Will return status of transfer in @status. And will return
* number of bytes copied.
*/
int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
@@ -955,9 +958,7 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
/**
* iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
- * @conn: iscsi connection
* @task: scsi command task
- * @sc: scsi command
*/
int iscsi_tcp_task_init(struct iscsi_task *task)
{
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 70be4425ae0b..2b3637b40dde 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -730,7 +730,6 @@ int sas_discover_sata(struct domain_device *dev)
if (res)
return res;
- sas_discover_event(dev->port, DISCE_PROBE);
return 0;
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 60de66252fa2..e4fd078e4175 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -212,13 +212,9 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
}
}
-static void sas_probe_devices(struct work_struct *work)
+static void sas_probe_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
- struct sas_discovery_event *ev = to_sas_discovery_event(work);
- struct asd_sas_port *port = ev->port;
-
- clear_bit(DISCE_PROBE, &port->disc.pending);
/* devices must be domain members before link recovery and probe */
list_for_each_entry(dev, &port->disco_list, disco_list_node) {
@@ -294,7 +290,6 @@ int sas_discover_end_dev(struct domain_device *dev)
res = sas_notify_lldd_dev_found(dev);
if (res)
return res;
- sas_discover_event(dev->port, DISCE_PROBE);
return 0;
}
@@ -353,13 +348,9 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
sas_put_device(dev);
}
-static void sas_destruct_devices(struct work_struct *work)
+void sas_destruct_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
- struct sas_discovery_event *ev = to_sas_discovery_event(work);
- struct asd_sas_port *port = ev->port;
-
- clear_bit(DISCE_DESTRUCT, &port->disc.pending);
list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
list_del_init(&dev->disco_list_node);
@@ -370,6 +361,16 @@ static void sas_destruct_devices(struct work_struct *work)
}
}
+static void sas_destruct_ports(struct asd_sas_port *port)
+{
+ struct sas_port *sas_port, *p;
+
+ list_for_each_entry_safe(sas_port, p, &port->sas_port_del_list, del_list) {
+ list_del_init(&sas_port->del_list);
+ sas_port_delete(sas_port);
+ }
+}
+
void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
{
if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
@@ -384,7 +385,6 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
sas_rphy_unlink(dev->rphy);
list_move_tail(&dev->disco_list_node, &port->destroy_list);
- sas_discover_event(dev->port, DISCE_DESTRUCT);
}
}
@@ -490,6 +490,8 @@ static void sas_discover_domain(struct work_struct *work)
port->port_dev = NULL;
}
+ sas_probe_devices(port);
+
SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
task_pid_nr(current), error);
}
@@ -523,6 +525,10 @@ static void sas_revalidate_domain(struct work_struct *work)
port->id, task_pid_nr(current), res);
out:
mutex_unlock(&ha->disco_mutex);
+
+ sas_destruct_devices(port);
+ sas_destruct_ports(port);
+ sas_probe_devices(port);
}
/* ---------- Events ---------- */
@@ -534,7 +540,7 @@ static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
* workqueue, or known to be submitted from a context that is
* not racing against draining
*/
- scsi_queue_work(ha->core.shost, &sw->work);
+ queue_work(ha->disco_q, &sw->work);
}
static void sas_chain_event(int event, unsigned long *pending,
@@ -578,10 +584,8 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
- [DISCE_PROBE] = sas_probe_devices,
[DISCE_SUSPEND] = sas_suspend_devices,
[DISCE_RESUME] = sas_resume_devices,
- [DISCE_DESTRUCT] = sas_destruct_devices,
};
disc->pending = 0;
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 0bb9eefc08c8..ae923eb6de95 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -29,7 +29,8 @@
int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
- int rc = 0;
+ /* it's added to the defer_q when draining so return succeed */
+ int rc = 1;
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
return 0;
@@ -39,24 +40,20 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
if (list_empty(&sw->drain_node))
list_add_tail(&sw->drain_node, &ha->defer_q);
} else
- rc = scsi_queue_work(ha->core.shost, &sw->work);
+ rc = queue_work(ha->event_q, &sw->work);
return rc;
}
-static int sas_queue_event(int event, unsigned long *pending,
- struct sas_work *work,
+static int sas_queue_event(int event, struct sas_work *work,
struct sas_ha_struct *ha)
{
- int rc = 0;
+ unsigned long flags;
+ int rc;
- if (!test_and_set_bit(event, pending)) {
- unsigned long flags;
-
- spin_lock_irqsave(&ha->lock, flags);
- rc = sas_queue_work(ha, work);
- spin_unlock_irqrestore(&ha->lock, flags);
- }
+ spin_lock_irqsave(&ha->lock, flags);
+ rc = sas_queue_work(ha, work);
+ spin_unlock_irqrestore(&ha->lock, flags);
return rc;
}
@@ -64,21 +61,25 @@ static int sas_queue_event(int event, unsigned long *pending,
void __sas_drain_work(struct sas_ha_struct *ha)
{
- struct workqueue_struct *wq = ha->core.shost->work_q;
struct sas_work *sw, *_sw;
+ int ret;
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
spin_lock_irq(&ha->lock);
spin_unlock_irq(&ha->lock);
- drain_workqueue(wq);
+ drain_workqueue(ha->event_q);
+ drain_workqueue(ha->disco_q);
spin_lock_irq(&ha->lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
list_del_init(&sw->drain_node);
- sas_queue_work(ha, sw);
+ ret = sas_queue_work(ha, sw);
+ if (ret != 1)
+ sas_free_event(to_asd_sas_event(&sw->work));
+
}
spin_unlock_irq(&ha->lock);
}
@@ -115,33 +116,78 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
struct asd_sas_port *port = ha->sas_port[i];
const int ev = DISCE_REVALIDATE_DOMAIN;
struct sas_discovery *d = &port->disc;
+ struct asd_sas_phy *sas_phy;
if (!test_and_clear_bit(ev, &d->pending))
continue;
- sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
+ if (list_empty(&port->phy_list))
+ continue;
+
+ sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
+ port_phy_el);
+ ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
}
mutex_unlock(&ha->disco_mutex);
}
+
+static void sas_port_event_worker(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+
+ sas_port_event_fns[ev->event](work);
+ sas_free_event(ev);
+}
+
+static void sas_phy_event_worker(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+
+ sas_phy_event_fns[ev->event](work);
+ sas_free_event(ev);
+}
+
static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
{
+ struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
+ int ret;
BUG_ON(event >= PORT_NUM_EVENTS);
- return sas_queue_event(event, &phy->port_events_pending,
- &phy->port_events[event].work, ha);
+ ev = sas_alloc_event(phy);
+ if (!ev)
+ return -ENOMEM;
+
+ INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
+
+ ret = sas_queue_event(event, &ev->work, ha);
+ if (ret != 1)
+ sas_free_event(ev);
+
+ return ret;
}
int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
{
+ struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
+ int ret;
BUG_ON(event >= PHY_NUM_EVENTS);
- return sas_queue_event(event, &phy->phy_events_pending,
- &phy->phy_events[event].work, ha);
+ ev = sas_alloc_event(phy);
+ if (!ev)
+ return -ENOMEM;
+
+ INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
+
+ ret = sas_queue_event(event, &ev->work, ha);
+ if (ret != 1)
+ sas_free_event(ev);
+
+ return ret;
}
int sas_init_events(struct sas_ha_struct *sas_ha)
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3183d63de4da..6a4f8198b78e 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -293,6 +293,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->minimum_linkrate = dr->pmin_linkrate;
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
+ phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
skip:
if (new_phy)
@@ -686,7 +687,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
resp, RPEL_RESP_SIZE);
- if (!res)
+ if (res)
goto out;
phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -695,6 +696,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
out:
+ kfree(req);
kfree(resp);
return res;
@@ -1914,7 +1916,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
sas_port_delete_phy(phy->port, phy->phy);
sas_device_set_phy(found, phy->port);
if (phy->port->num_phys == 0)
- sas_port_delete(phy->port);
+ list_add_tail(&phy->port->del_list,
+ &parent->port->sas_port_del_list);
phy->port = NULL;
}
}
@@ -2122,7 +2125,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
struct domain_device *dev = NULL;
res = sas_find_bcast_dev(port_dev, &dev);
- while (res == 0 && dev) {
+ if (res == 0 && dev) {
struct expander_device *ex = &dev->ex_dev;
int i = 0, phy_id;
@@ -2134,9 +2137,6 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
res = sas_rediscover(dev, phy_id);
i = phy_id + 1;
} while (i < ex->num_phys);
-
- dev = NULL;
- res = sas_find_bcast_dev(port_dev, &dev);
}
return res;
}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 64fa6f53cb8b..c81a63b5dc71 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -39,6 +39,7 @@
#include "../scsi_sas_internal.h"
static struct kmem_cache *sas_task_cache;
+static struct kmem_cache *sas_event_cache;
struct sas_task *sas_alloc_task(gfp_t flags)
{
@@ -109,6 +110,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
int sas_register_ha(struct sas_ha_struct *sas_ha)
{
+ char name[64];
int error = 0;
mutex_init(&sas_ha->disco_mutex);
@@ -122,6 +124,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
INIT_LIST_HEAD(&sas_ha->defer_q);
INIT_LIST_HEAD(&sas_ha->eh_dev_q);
+ sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
+
error = sas_register_phys(sas_ha);
if (error) {
printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
@@ -140,10 +144,24 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
goto Undo_ports;
}
+ error = -ENOMEM;
+ snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
+ sas_ha->event_q = create_singlethread_workqueue(name);
+ if (!sas_ha->event_q)
+ goto Undo_ports;
+
+ snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
+ sas_ha->disco_q = create_singlethread_workqueue(name);
+ if (!sas_ha->disco_q)
+ goto Undo_event_q;
+
INIT_LIST_HEAD(&sas_ha->eh_done_q);
INIT_LIST_HEAD(&sas_ha->eh_ata_q);
return 0;
+
+Undo_event_q:
+ destroy_workqueue(sas_ha->event_q);
Undo_ports:
sas_unregister_ports(sas_ha);
Undo_phys:
@@ -174,6 +192,9 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
+ destroy_workqueue(sas_ha->disco_q);
+ destroy_workqueue(sas_ha->event_q);
+
return 0;
}
@@ -364,8 +385,6 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
struct asd_sas_phy *phy = ha->sas_phy[i];
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- phy->port_events_pending = 0;
- phy->phy_events_pending = 0;
phy->frame_rcvd_size = 0;
}
}
@@ -537,6 +556,37 @@ static struct sas_function_template sft = {
.smp_handler = sas_smp_handler,
};
+static inline ssize_t phy_event_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
+}
+
+static inline ssize_t phy_event_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ sha->event_thres = simple_strtol(buf, NULL, 10);
+
+ /* threshold cannot be set too small */
+ if (sha->event_thres < 32)
+ sha->event_thres = 32;
+
+ return count;
+}
+
+DEVICE_ATTR(phy_event_threshold,
+ S_IRUGO|S_IWUSR,
+ phy_event_threshold_show,
+ phy_event_threshold_store);
+EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
+
struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *dft)
{
@@ -555,20 +605,71 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
}
EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
+
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
+{
+ struct asd_sas_event *event;
+ gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ event = kmem_cache_zalloc(sas_event_cache, flags);
+ if (!event)
+ return NULL;
+
+ atomic_inc(&phy->event_nr);
+
+ if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
+ if (i->dft->lldd_control_phy) {
+ if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
+ sas_printk("The phy%02d bursting events, shut it down.\n",
+ phy->id);
+ sas_notify_phy_event(phy, PHYE_SHUTDOWN);
+ }
+ } else {
+ /* Do not support PHY control, stop allocating events */
+ WARN_ONCE(1, "PHY control not supported.\n");
+ kmem_cache_free(sas_event_cache, event);
+ atomic_dec(&phy->event_nr);
+ event = NULL;
+ }
+ }
+
+ return event;
+}
+
+void sas_free_event(struct asd_sas_event *event)
+{
+ struct asd_sas_phy *phy = event->phy;
+
+ kmem_cache_free(sas_event_cache, event);
+ atomic_dec(&phy->event_nr);
+}
+
/* ---------- SAS Class register/unregister ---------- */
static int __init sas_class_init(void)
{
sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
if (!sas_task_cache)
- return -ENOMEM;
+ goto out;
+
+ sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
+ if (!sas_event_cache)
+ goto free_task_kmem;
return 0;
+free_task_kmem:
+ kmem_cache_destroy(sas_task_cache);
+out:
+ return -ENOMEM;
}
static void __exit sas_class_exit(void)
{
kmem_cache_destroy(sas_task_cache);
+ kmem_cache_destroy(sas_event_cache);
}
MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index c07e08136491..50e12d662ffe 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -61,6 +61,9 @@ int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
int sas_register_phys(struct sas_ha_struct *sas_ha);
void sas_unregister_phys(struct sas_ha_struct *sas_ha);
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy);
+void sas_free_event(struct asd_sas_event *event);
+
int sas_register_ports(struct sas_ha_struct *sas_ha);
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
@@ -98,6 +101,10 @@ int sas_try_ata_reset(struct asd_sas_phy *phy);
void sas_hae_reset(struct work_struct *work);
void sas_free_device(struct kref *kref);
+void sas_destruct_devices(struct asd_sas_port *port);
+
+extern const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS];
+extern const work_func_t sas_port_event_fns[PORT_NUM_EVENTS];
#ifdef CONFIG_SCSI_SAS_HOST_SMP
extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index cdee446c29e1..bf3e1b979ca6 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -35,7 +35,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
+ phy->in_shutdown = 0;
phy->error = 0;
sas_deform_port(phy, 1);
}
@@ -45,7 +45,7 @@ static void sas_phye_oob_done(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
+ phy->in_shutdown = 0;
phy->error = 0;
}
@@ -58,8 +58,6 @@ static void sas_phye_oob_error(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
-
sas_deform_port(phy, 1);
if (!port && phy->enabled && i->dft->lldd_control_phy) {
@@ -88,8 +86,6 @@ static void sas_phye_spinup_hold(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
-
phy->error = 0;
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
}
@@ -99,8 +95,6 @@ static void sas_phye_resume_timeout(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
-
/* phew, lldd got the phy back in the nick of time */
if (!phy->suspended) {
dev_info(&phy->phy->dev, "resume timeout cancelled\n");
@@ -113,45 +107,41 @@ static void sas_phye_resume_timeout(struct work_struct *work)
}
+static void sas_phye_shutdown(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ if (phy->enabled) {
+ int ret;
+
+ phy->error = 0;
+ phy->enabled = 0;
+ ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
+ if (ret)
+ sas_printk("lldd disable phy%02d returned %d\n",
+ phy->id, ret);
+ } else
+ sas_printk("phy%02d is not enabled, cannot shutdown\n",
+ phy->id);
+}
+
/* ---------- Phy class registration ---------- */
int sas_register_phys(struct sas_ha_struct *sas_ha)
{
int i;
- static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
- [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
- [PHYE_OOB_DONE] = sas_phye_oob_done,
- [PHYE_OOB_ERROR] = sas_phye_oob_error,
- [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
- [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
-
- };
-
- static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
- [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
- [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
- [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
- [PORTE_TIMER_EVENT] = sas_porte_timer_event,
- [PORTE_HARD_RESET] = sas_porte_hard_reset,
- };
-
/* Now register the phys. */
for (i = 0; i < sas_ha->num_phys; i++) {
- int k;
struct asd_sas_phy *phy = sas_ha->sas_phy[i];
phy->error = 0;
+ atomic_set(&phy->event_nr, 0);
INIT_LIST_HEAD(&phy->port_phy_el);
- for (k = 0; k < PORT_NUM_EVENTS; k++) {
- INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
- phy->port_events[k].phy = phy;
- }
-
- for (k = 0; k < PHY_NUM_EVENTS; k++) {
- INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
- phy->phy_events[k].phy = phy;
- }
phy->port = NULL;
phy->ha = sas_ha;
@@ -179,3 +169,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
return 0;
}
+
+const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
+ [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
+ [PHYE_OOB_DONE] = sas_phye_oob_done,
+ [PHYE_OOB_ERROR] = sas_phye_oob_error,
+ [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
+ [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
+ [PHYE_SHUTDOWN] = sas_phye_shutdown,
+};
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index d3c5297c6c89..f07e55d3aa73 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -66,6 +66,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
rc = sas_notify_lldd_dev_found(dev);
if (rc) {
sas_unregister_dev(port, dev);
+ sas_destruct_devices(port);
continue;
}
@@ -192,6 +193,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
si->dft->lldd_port_formed(phy);
sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
+ flush_workqueue(sas_ha->disco_q);
}
/**
@@ -219,6 +221,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
if (port->num_phys == 1) {
sas_unregister_domain_devices(port, gone);
+ sas_destruct_devices(port);
sas_port_delete(port->port);
port->port = NULL;
} else {
@@ -261,8 +264,6 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
-
sas_form_port(phy);
}
@@ -273,14 +274,15 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
unsigned long flags;
u32 prim;
- clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
-
spin_lock_irqsave(&phy->sas_prim_lock, flags);
prim = phy->sas_prim;
spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
SAS_DPRINTK("broadcast received: %d\n", prim);
sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
+
+ if (phy->port)
+ flush_workqueue(phy->port->ha->disco_q);
}
void sas_porte_link_reset_err(struct work_struct *work)
@@ -288,8 +290,6 @@ void sas_porte_link_reset_err(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -298,8 +298,6 @@ void sas_porte_timer_event(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -308,8 +306,6 @@ void sas_porte_hard_reset(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -323,6 +319,7 @@ static void sas_init_port(struct asd_sas_port *port,
INIT_LIST_HEAD(&port->dev_list);
INIT_LIST_HEAD(&port->disco_list);
INIT_LIST_HEAD(&port->destroy_list);
+ INIT_LIST_HEAD(&port->sas_port_del_list);
spin_lock_init(&port->phy_list_lock);
INIT_LIST_HEAD(&port->phy_list);
port->ha = sas_ha;
@@ -353,3 +350,11 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
sas_deform_port(sas_ha->sas_phy[i], 0);
}
+
+const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
+ [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
+ [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
+ [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
+ [PORTE_TIMER_EVENT] = sas_porte_timer_event,
+ [PORTE_HARD_RESET] = sas_porte_hard_reset,
+};
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index c9406852c3e9..6de9681ace82 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -27,6 +27,7 @@
#include <linux/firmware.h>
#include <linux/export.h>
#include <linux/ctype.h>
+#include <linux/kernel.h>
#include "sas_internal.h"
@@ -959,21 +960,6 @@ void sas_target_destroy(struct scsi_target *starget)
sas_put_device(found_dev);
}
-static void sas_parse_addr(u8 *sas_addr, const char *p)
-{
- int i;
- for (i = 0; i < SAS_ADDR_SIZE; i++) {
- u8 h, l;
- if (!*p)
- break;
- h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
- p++;
- l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
- p++;
- sas_addr[i] = (h<<4) | l;
- }
-}
-
#define SAS_STRING_ADDR_SIZE 16
int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
@@ -990,7 +976,9 @@ int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
goto out;
}
- sas_parse_addr(addr, fw->data);
+ res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
+ if (res)
+ goto out;
out:
release_firmware(fw);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 231302273257..61fb46da05d4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -55,9 +55,10 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
+#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
-#define LPFC_MAX_NVME_SEG_CNT 128 /* max SGL element cnt per NVME cmnd */
+#define LPFC_MAX_NVME_SEG_CNT 256 /* max SGL element cnt per NVME cmnd */
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@@ -705,7 +706,6 @@ struct lpfc_hba {
* capability
*/
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
-#define NVME_XRI_ABORT_EVENT 0x100000
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -945,6 +945,8 @@ struct lpfc_hba {
struct list_head lpfc_nvme_buf_list_get;
struct list_head lpfc_nvme_buf_list_put;
uint32_t total_nvme_bufs;
+ uint32_t get_nvme_bufs;
+ uint32_t put_nvme_bufs;
struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs;
struct list_head active_rrq_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 82f6e219ee34..d188fb565a32 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -148,6 +148,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
struct lpfc_nodelist *ndlp;
struct nvme_fc_remote_port *nrport;
uint64_t data1, data2, data3, tot;
@@ -198,10 +199,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
}
len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf+len, PAGE_SIZE-len,
@@ -236,6 +242,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf+len, PAGE_SIZE-len,
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
"ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
@@ -271,6 +283,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
}
+ lport = (struct lpfc_nvme_lport *)localport->private;
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
spin_lock_irq(shost->host_lock);
@@ -347,9 +360,16 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %016x Cmpl %016x\n",
+ "LS: Xmt %010x Cmpl %010x Abort %08x\n",
atomic_read(&phba->fc4NvmeLsRequests),
- atomic_read(&phba->fc4NvmeLsCmpls));
+ atomic_read(&phba->fc4NvmeLsCmpls),
+ atomic_read(&lport->xmt_ls_abort));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
tot = atomic_read(&phba->fc4NvmeIoCmpls);
data1 = atomic_read(&phba->fc4NvmeInputRequests);
@@ -360,8 +380,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
data1, data2, data3);
len += snprintf(buf+len, PAGE_SIZE-len,
- " Cmpl %016llx Outstanding %016llx\n",
- tot, (data1 + data2 + data3) - tot);
+ " noxri %08x nondlp %08x qdepth %08x "
+ "wqerr %08x\n",
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_wqerr));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " Cmpl %016llx Outstanding %016llx Abort %08x\n",
+ tot, ((data1 + data2 + data3) - tot),
+ atomic_read(&lport->xmt_fcp_abort));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "FCP CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
return len;
}
@@ -3366,12 +3400,13 @@ LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
/*
* lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
+ * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
* lpfc_nvmet_mrq = 1 use a single RQ pair
* lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
*
*/
LPFC_ATTR_R(nvmet_mrq,
- 1, 1, 16,
+ LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
"Specify number of RQ pairs for processing NVMET cmds");
/*
@@ -5139,7 +5174,7 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4
* and will be limited to 512 if BlockGuard is enabled under SLI3.
*/
-LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
+LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
/*
@@ -6362,6 +6397,9 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
}
+ if (!phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
@@ -6369,10 +6407,13 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
}
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
+
} else {
/* Not NVME Target mode. Turn off Target parameters. */
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvmet_fb_size = 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4e858b38529a..559f9aa0ed08 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -254,6 +254,8 @@ void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
struct lpfc_nvmet_ctxbuf *ctxp);
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr);
+void lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba);
+void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index f77673ab4a84..9d20d2c208c7 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -471,6 +471,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
/* By default, the driver expects to support FCP FC4 */
if (fc4_type == FC_TYPE_FCP)
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
@@ -685,6 +686,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_els_flush_rscn(vport);
goto out;
}
+
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_flag & FC_RSCN_DEFERRED) {
+ vport->fc_flag &= ~FC_RSCN_DEFERRED;
+ spin_unlock_irq(shost->host_lock);
+
+ /*
+ * Skip processing the NS response
+ * Re-issue the NS cmd
+ */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0151 Process Deferred RSCN Data: x%x x%x\n",
+ vport->fc_flag, vport->fc_rscn_id_cnt);
+ lpfc_els_handle_rscn(vport);
+
+ goto out;
+ }
+ spin_unlock_irq(shost->host_lock);
+
if (irsp->ulpStatus) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2bf5ad3b1512..17ea3bb04266 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -750,6 +750,8 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
uint64_t tot, data1, data2, data3;
int len = 0;
int cnt;
@@ -775,10 +777,15 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
}
len += snprintf(buf + len, size - len,
- "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+
+ len += snprintf(buf + len, size - len,
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf + len, size - len,
@@ -812,6 +819,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf + len, size - len,
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+
+ len += snprintf(buf + len, size - len,
"ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
@@ -885,8 +898,38 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
data1, data2, data3);
len += snprintf(buf + len, size - len,
- " Cmpl %016llx Outstanding %016llx\n",
+ " Cmpl %016llx Outstanding %016llx\n",
tot, (data1 + data2 + data3) - tot);
+
+ localport = vport->localport;
+ if (!localport)
+ return len;
+ lport = (struct lpfc_nvme_lport *)localport->private;
+ if (!lport)
+ return len;
+
+ len += snprintf(buf + len, size - len,
+ "LS Xmt Err: Abrt %08x Err %08x "
+ "Cmpl Err: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_abort),
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
+
+ len += snprintf(buf + len, size - len,
+ "FCP Xmt Err: noxri %06x nondlp %06x "
+ "qdepth %06x wqerr %06x Abrt %06x\n",
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_wqerr),
+ atomic_read(&lport->xmt_fcp_abort));
+
+ len += snprintf(buf + len, size - len,
+ "FCP Cmpl Err: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
+
}
return len;
@@ -3213,7 +3256,7 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
return 1;
}
- if (eqidx < phba->cfg_nvmet_mrq) {
+ if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
/* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx];
*len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len);
@@ -3246,7 +3289,7 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
- "bs:x%x proc:x%llx eqd %d]\n",
+ "cqe_proc:x%x eqe_proc:x%llx eqd %d]\n",
eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
(unsigned long long)qp->q_cnt_4, qp->q_mode);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
@@ -3366,6 +3409,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (len >= max_cnt)
goto too_big;
+ qp = phba->sli4_hba.hdr_rq;
+ len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
+ "ELS RQpair", pbuffer, len);
+ if (len >= max_cnt)
+ goto too_big;
+
/* Slow-path NVME LS response CQ */
qp = phba->sli4_hba.nvmels_cq;
len = __lpfc_idiag_print_cq(qp, "NVME LS",
@@ -3383,12 +3432,6 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (len >= max_cnt)
goto too_big;
- qp = phba->sli4_hba.hdr_rq;
- len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
- "RQpair", pbuffer, len);
- if (len >= max_cnt)
- goto too_big;
-
goto out;
}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index f9a566eaef04..5a7547f9d8d8 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -134,6 +134,8 @@ struct lpfc_nodelist {
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
uint32_t upcall_flags;
+#define NLP_WAIT_FOR_UNREG 0x1
+
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
};
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 39d5b146202e..234c7c015982 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -858,6 +858,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_flag |= FC_PT2PT;
spin_unlock_irq(shost->host_lock);
+ /* If we are pt2pt with another NPort, force NPIV off! */
+ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
lpfc_unregister_fcf_prep(phba);
@@ -916,28 +919,29 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- } else
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto fail;
+ }
+ } else {
/* This side will wait for the PLOGI, decrement ndlp reference
* count indicating that ndlp can be released when other
* references to it are done.
*/
lpfc_nlp_put(ndlp);
- /* If we are pt2pt with another NPort, force NPIV off! */
- phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
-
- lpfc_config_link(phba, mbox);
-
- mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- goto fail;
+ /* Start discovery - this should just do CLEAR_LA */
+ lpfc_disc_start(vport);
}
return 0;
@@ -1030,30 +1034,31 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
stop_rr_fcf_flogi:
/* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2858 FLOGI failure Status:x%x/x%x TMO:x%x "
- "Data x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, phba->hba_flag,
- phba->fcf.fcf_flag);
+ if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_LOOP_OPEN_FAILURE)))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2858 FLOGI failure Status:x%x/x%x "
+ "TMO:x%x Data x%x x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, phba->hba_flag,
+ phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
- /* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout);
-
-
/* If this is not a loop open failure, bail out */
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
goto flogifail;
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0150 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout);
+
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -1670,6 +1675,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did on the nodelist */
ndlp->nlp_DID = keepDID;
+ lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
memcpy(ndlp->active_rrqs_xri_bitmap,
@@ -2088,6 +2094,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
+
+ /* Driver supports multiple FC4 types. Counters matter. */
+ vport->fc_prli_sent--;
+ ndlp->fc4_prli_sent--;
spin_unlock_irq(shost->host_lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -2095,9 +2105,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4],
ndlp->nlp_DID);
- /* Ddriver supports multiple FC4 types. Counters matter. */
- vport->fc_prli_sent--;
-
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0103 PRLI completes to NPort x%06x "
@@ -2111,7 +2118,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus) {
/* Check for retry */
- ndlp->fc4_prli_sent--;
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
goto out;
@@ -2190,6 +2196,15 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
local_nlp_type = ndlp->nlp_fc4_type;
+ /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
+ * fields here before any of them can complete.
+ */
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ ndlp->nvme_fb_size = 0;
+
send_next_prli:
if (local_nlp_type & NLP_FC4_FCP) {
/* Payload is 4 + 16 = 20 x14 bytes. */
@@ -2298,6 +2313,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_PRLI_SND;
+
+ /* The vport counters are used for lpfc_scan_finished, but
+ * the ndlp is used to track outstanding PRLIs for different
+ * FC4 types.
+ */
+ vport->fc_prli_sent++;
+ ndlp->fc4_prli_sent++;
spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
@@ -2308,12 +2330,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 1;
}
- /* The vport counters are used for lpfc_scan_finished, but
- * the ndlp is used to track outstanding PRLIs for different
- * FC4 types.
- */
- vport->fc_prli_sent++;
- ndlp->fc4_prli_sent++;
/* The driver supports 2 FC4 types. Make sure
* a PRLI is issued for all types before exiting.
@@ -2951,8 +2967,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of node.
*/
-
- lpfc_nlp_put(ndlp);
+ if (!(vport->fc_flag & FC_PT2PT))
+ lpfc_nlp_put(ndlp);
return 0;
}
@@ -6172,9 +6188,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
/* send RECOVERY event for ALL nodes that match RSCN payload */
lpfc_rscn_recovery_check(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_DEFERRED;
- spin_unlock_irq(shost->host_lock);
return 0;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6849,7 +6862,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 1;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
- *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t); /* Skip past command */
/* use the command's xri in the response */
@@ -8060,13 +8073,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
-
- /* NVMET accepts NVME PRLI only. Reject FCP PRLI */
- if (cmd == ELS_CMD_PRLI && phba->nvmet_support) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- rjt_exp = LSEXP_REQ_UNSUPPORTED;
- break;
- }
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
break;
case ELS_CMD_LIRR:
@@ -8149,9 +8155,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_nlp_put(ndlp);
break;
case ELS_CMD_REC:
- /* receive this due to exchange closed */
- rjt_err = LSRJT_UNABLE_TPC;
- rjt_exp = LSEXP_INVALID_OX_RX;
+ /* receive this due to exchange closed */
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_INVALID_OX_RX;
break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2bafde2b7cfe..b159a5c4e388 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -640,8 +640,6 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_rrq_active(phba);
if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
lpfc_sli4_fcp_xri_abort_event_proc(phba);
- if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
- lpfc_sli4_nvme_xri_abort_event_proc(phba);
if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
lpfc_sli4_els_xri_abort_event_proc(phba);
if (phba->hba_flag & ASYNC_EVENT)
@@ -4178,12 +4176,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
vport->phba->nport_event_cnt++;
- if (vport->phba->nvmet_support == 0)
- /* Start devloss */
- lpfc_nvme_unregister_port(vport, ndlp);
- else
+ if (vport->phba->nvmet_support == 0) {
+ /* Start devloss if target. */
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ lpfc_nvme_unregister_port(vport, ndlp);
+ } else {
/* NVMET has no upcall. */
lpfc_nlp_put(ndlp);
+ }
}
}
@@ -4207,11 +4207,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type & NLP_FC4_NVME) {
if (vport->phba->nvmet_support == 0) {
/* Register this rport with the transport.
- * Initiators take the NDLP ref count in
- * the register.
+ * Only NVME Target Rports are registered with
+ * the transport.
*/
- vport->phba->nport_event_cnt++;
- lpfc_nvme_register_port(vport, ndlp);
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
+ vport->phba->nport_event_cnt++;
+ lpfc_nvme_register_port(vport, ndlp);
+ }
} else {
/* Just take an NDLP ref count since the
* target does not register rports.
@@ -5838,9 +5840,12 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
if (filter(ndlp, param)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"3185 FIND node filter %p DID "
- "Data: x%p x%x x%x\n",
+ "ndlp %p did x%x flg x%x st x%x "
+ "xri x%x type x%x rpi x%x\n",
filter, ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag);
+ ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_xri, ndlp->nlp_type,
+ ndlp->nlp_rpi);
return ndlp;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2b145966c73f..73c2f6971d2b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1122,6 +1122,7 @@ struct cq_context {
#define LPFC_CQ_CNT_256 0x0
#define LPFC_CQ_CNT_512 0x1
#define LPFC_CQ_CNT_1024 0x2
+#define LPFC_CQ_CNT_WORD7 0x3
uint32_t word1;
#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */
#define lpfc_cq_eq_id_MASK 0x000000FF
@@ -1129,7 +1130,7 @@ struct cq_context {
#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */
#define lpfc_cq_eq_id_2_MASK 0x0000FFFF
#define lpfc_cq_eq_id_2_WORD word1
- uint32_t reserved0;
+ uint32_t lpfc_cq_context_count; /* Version 2 Only */
uint32_t reserved1;
};
@@ -1193,6 +1194,9 @@ struct lpfc_mbx_cq_create_set {
#define lpfc_mbx_cq_create_set_arm_SHIFT 31
#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001
#define lpfc_mbx_cq_create_set_arm_WORD word2
+#define lpfc_mbx_cq_create_set_cq_cnt_SHIFT 16
+#define lpfc_mbx_cq_create_set_cq_cnt_MASK 0x00007FFF
+#define lpfc_mbx_cq_create_set_cq_cnt_WORD word2
#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0
#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_num_cq_WORD word2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2b7ea7e53e12..f539c554588c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1034,6 +1034,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
LIST_HEAD(nvmet_aborts);
unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
+ int cnt;
lpfc_sli_hbqbuf_free_all(phba);
@@ -1090,11 +1091,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ cnt = 0;
list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
psb->pCmd = NULL;
psb->status = IOSTAT_SUCCESS;
+ cnt++;
}
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+ phba->put_nvme_bufs += cnt;
list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
@@ -3339,6 +3343,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_put, list) {
list_del(&lpfc_ncmd->list);
+ phba->put_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
@@ -3350,6 +3355,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
list_del(&lpfc_ncmd->list);
+ phba->get_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
@@ -3754,9 +3760,11 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
uint16_t i, lxri, els_xri_cnt;
uint16_t nvme_xri_cnt, nvme_xri_max;
LIST_HEAD(nvme_sgl_list);
- int rc;
+ int rc, cnt;
phba->total_nvme_bufs = 0;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return 0;
@@ -3780,6 +3788,9 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
+ cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -3824,6 +3835,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
spin_lock_irq(&phba->nvme_buf_list_get_lock);
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = cnt;
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -5609,8 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Initialize the NVME buffer list used by driver for NVME IO */
spin_lock_init(&phba->nvme_buf_list_get_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = 0;
spin_lock_init(&phba->nvme_buf_list_put_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs = 0;
}
/* Initialize the fabric iocb list */
@@ -5806,6 +5820,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
struct lpfc_mqe *mqe;
int longs;
int fof_vectors = 0;
+ int extra;
uint64_t wwn;
phba->sli4_hba.num_online_cpu = num_online_cpus();
@@ -5860,13 +5875,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
/*
+ * 1 for cmd, 1 for rsp, NVME adds an extra one
+ * for boundary conditions in its max_sgl_segment template.
+ */
+ extra = 2;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ extra++;
+
+ /*
* It doesn't matter what family our adapter is in, we are
* limited to 2 Pages, 512 SGEs, for our SGL.
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/
max_buf_size = (2 * SLI4_PAGE_SIZE);
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
- phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5899,14 +5922,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + 2) *
+ ((phba->cfg_sg_seg_cnt + extra) *
sizeof(struct sli4_sge));
/* Total SGEs for scsi_sg_list */
- phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
/*
- * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
+ * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
* need to post 1 page for the SGL.
*/
}
@@ -5947,9 +5970,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
-
- /* Fast-path XRI aborted CQ Event work queue list */
- INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
}
/* This abort list used by worker thread */
@@ -7936,8 +7956,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
phba->cfg_fcp_io_channel = io_channel;
if (phba->cfg_nvme_io_channel > io_channel)
phba->cfg_nvme_io_channel = io_channel;
- if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ if (phba->nvmet_support) {
+ if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ }
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
@@ -7958,10 +7982,10 @@ static int
lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
- int cnt;
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0508 Failed allocate fast-path NVME CQ (%d)\n",
@@ -7970,8 +7994,8 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
}
phba->sli4_hba.nvme_cq[wqidx] = qdesc;
- cnt = LPFC_NVME_WQSIZE;
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0509 Failed allocate fast-path NVME WQ (%d)\n",
@@ -7987,11 +8011,18 @@ static int
lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
/* Create Fast Path FCP CQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ if (phba->fcp_embed_io)
+ /* Increase the CQ size when WQEs contain an embedded cdb */
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
@@ -8000,9 +8031,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
phba->sli4_hba.fcp_cq[wqidx] = qdesc;
/* Create Fast Path FCP WQs */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
+ if (phba->fcp_embed_io)
+ /* Increase the WQ size when WQEs contain an embedded cdb */
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE,
+ LPFC_WQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0503 Failed allocate fast-path FCP WQ (%d)\n",
@@ -8173,7 +8210,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create HBA Event Queues (EQs) */
for (idx = 0; idx < io_channel; idx++) {
/* Create EQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8196,8 +8234,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (phba->nvmet_support) {
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
qdesc = lpfc_sli4_queue_alloc(phba,
- phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3142 Failed allocate NVME "
@@ -8213,7 +8252,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create slow-path Mailbox Command Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8223,7 +8263,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.mbx_cq = qdesc;
/* Create slow-path ELS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8239,7 +8280,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create Mailbox Command Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.mq_esize,
phba->sli4_hba.mq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8253,7 +8295,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create slow-path ELS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8265,7 +8308,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Create NVME LS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8275,7 +8319,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.nvmels_cq = qdesc;
/* Create NVME LS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8291,7 +8336,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create Receive Queue for header */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8301,7 +8347,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.hdr_rq = qdesc;
/* Create Receive Queue for data */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8314,6 +8361,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
/* Create NVMET Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
@@ -8339,6 +8387,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create NVMET Receive Queue for data */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
@@ -8437,13 +8486,15 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release NVME CQ mapping array */
lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
- phba->cfg_nvmet_mrq);
+ if (phba->nvmet_support) {
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
+ phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
- phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
- phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
+ phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
+ phba->cfg_nvmet_mrq);
+ }
/* Release mailbox command work queue */
__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
@@ -8514,6 +8565,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
qidx, (uint32_t)rc);
return rc;
}
+ cq->chann = qidx;
if (qtype != LPFC_MBOX) {
/* Setup nvme_cq_map for fast lookup */
@@ -8533,6 +8585,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
/* no need to tear down cq - caller will do so */
return rc;
}
+ wq->chann = qidx;
/* Bind this CQ/WQ to the NVME ring */
pring = wq->pring;
@@ -8773,6 +8826,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
}
+ phba->sli4_hba.nvmet_cqset[0]->chann = 0;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"6090 NVMET CQ setup: cq-id=%d, "
"parent eq-id=%d\n",
@@ -8994,19 +9049,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
- /* Unset NVMET MRQ queue */
- if (phba->sli4_hba.nvmet_mrq_hdr) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_rq_destroy(phba,
+ if (phba->nvmet_support) {
+ /* Unset NVMET MRQ queue */
+ if (phba->sli4_hba.nvmet_mrq_hdr) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_rq_destroy(
+ phba,
phba->sli4_hba.nvmet_mrq_hdr[qidx],
phba->sli4_hba.nvmet_mrq_data[qidx]);
- }
+ }
- /* Unset NVMET CQ Set complete queue */
- if (phba->sli4_hba.nvmet_cqset) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_cq_destroy(phba,
- phba->sli4_hba.nvmet_cqset[qidx]);
+ /* Unset NVMET CQ Set complete queue */
+ if (phba->sli4_hba.nvmet_cqset) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_cq_destroy(
+ phba, phba->sli4_hba.nvmet_cqset[qidx]);
+ }
}
/* Unset FCP response complete queue */
@@ -9175,11 +9233,6 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
/* Pending ELS XRI abort events */
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
&cqelist);
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- /* Pending NVME XRI abort events */
- list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- &cqelist);
- }
/* Pending asynnc events */
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
&cqelist);
@@ -9421,44 +9474,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
lpfc_sli4_bar0_register_memmap(phba, if_type);
}
- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
- /*
- * Map SLI4 if type 0 HBA Control Register base to a kernel
- * virtual address and setup the registers.
- */
- phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
- bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
- phba->sli4_hba.ctrl_regs_memmap_p =
- ioremap(phba->pci_bar1_map, bar1map_len);
- if (!phba->sli4_hba.ctrl_regs_memmap_p) {
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLI4 HBA control registers.\n");
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
+ /*
+ * Map SLI4 if type 0 HBA Control Register base to a
+ * kernel virtual address and setup the registers.
+ */
+ phba->pci_bar1_map = pci_resource_start(pdev,
+ PCI_64BIT_BAR2);
+ bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+ phba->sli4_hba.ctrl_regs_memmap_p =
+ ioremap(phba->pci_bar1_map,
+ bar1map_len);
+ if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA "
+ "control registers.\n");
+ error = -ENOMEM;
+ goto out_iounmap_conf;
+ }
+ phba->pci_bar2_memmap_p =
+ phba->sli4_hba.ctrl_regs_memmap_p;
+ lpfc_sli4_bar1_register_memmap(phba);
+ } else {
+ error = -ENOMEM;
goto out_iounmap_conf;
}
- phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
- lpfc_sli4_bar1_register_memmap(phba);
}
- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
- /*
- * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
- * virtual address and setup the registers.
- */
- phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
- bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
- phba->sli4_hba.drbl_regs_memmap_p =
- ioremap(phba->pci_bar2_map, bar2map_len);
- if (!phba->sli4_hba.drbl_regs_memmap_p) {
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLI4 HBA doorbell registers.\n");
- goto out_iounmap_ctrl;
- }
- phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
- error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
- if (error)
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
+ /*
+ * Map SLI4 if type 0 HBA Doorbell Register base to
+ * a kernel virtual address and setup the registers.
+ */
+ phba->pci_bar2_map = pci_resource_start(pdev,
+ PCI_64BIT_BAR4);
+ bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+ phba->sli4_hba.drbl_regs_memmap_p =
+ ioremap(phba->pci_bar2_map,
+ bar2map_len);
+ if (!phba->sli4_hba.drbl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA"
+ " doorbell registers.\n");
+ error = -ENOMEM;
+ goto out_iounmap_ctrl;
+ }
+ phba->pci_bar4_memmap_p =
+ phba->sli4_hba.drbl_regs_memmap_p;
+ error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+ if (error)
+ goto out_iounmap_all;
+ } else {
+ error = -ENOMEM;
goto out_iounmap_all;
+ }
}
return 0;
@@ -10093,6 +10164,16 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+ /* Driver just aborted IOs during the hba_unset process. Pause
+ * here to give the HBA time to complete the IO and get entries
+ * into the abts lists.
+ */
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
+
+ /* Wait for NVME pending IO to flush back to transport. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_nvme_wait_for_io_drain(phba);
+
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
fcp_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
@@ -10369,7 +10450,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
!phba->nvme_support) {
phba->nvme_support = 0;
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvme_io_channel = 0;
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
@@ -11616,6 +11697,10 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
lpfc_sli_flush_fcp_rings(phba);
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
+
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -11647,6 +11732,10 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
+
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
}
/**
@@ -12138,10 +12227,10 @@ int
lpfc_fof_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
/* Create FOF EQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc)
goto out_error;
@@ -12151,7 +12240,15 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
if (phba->cfg_fof) {
/* Create OAS CQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ if (phba->fcp_embed_io)
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc)
goto out_error;
@@ -12159,11 +12256,16 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.oas_cq = qdesc;
/* Create OAS WQ */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
- phba->sli4_hba.wq_ecount);
-
+ if (phba->fcp_embed_io)
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE,
+ LPFC_WQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc)
goto out_error;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b6957d944b9a..d841aa42f607 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -390,6 +390,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
break;
}
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+
/* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -727,6 +732,41 @@ out:
return 0;
}
+static uint32_t
+lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct ls_rjt stat;
+ uint32_t *payload;
+ uint32_t cmd;
+
+ payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ cmd = *payload;
+ if (vport->phba->nvmet_support) {
+ /* Must be a NVME PRLI */
+ if (cmd == ELS_CMD_PRLI)
+ goto out;
+ } else {
+ /* Initiator mode. */
+ if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
+ goto out;
+ }
+ return 1;
+out:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
+ "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
+ "state x%x flags x%x\n",
+ cmd, ndlp->nlp_rpi, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ ndlp, NULL);
+ return 0;
+}
+
static void
lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
@@ -742,9 +782,6 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lp = (uint32_t *) pcmd->virt;
npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- ndlp->nlp_flag &= ~NLP_FIRSTBURST;
if ((npr->prliType == PRLI_FCP_TYPE) ||
(npr->prliType == PRLI_NVME_TYPE)) {
if (npr->initiatorFunc) {
@@ -769,8 +806,12 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* type. Target mode does not issue gft_id so doesn't get
* the fc4 type set until now.
*/
- if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE))
+ if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ }
+ if (npr->prliType == PRLI_FCP_TYPE)
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
}
if (rport) {
/* We need to update the rport role values */
@@ -1373,7 +1414,8 @@ lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@@ -1544,6 +1586,9 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
struct ls_rjt stat;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) {
+ return ndlp->nlp_state;
+ }
if (vport->phba->nvmet_support) {
/* NVME Target mode. Handle and respond to the PRLI and
* transition to UNMAPPED provided the RPI has completed
@@ -1552,28 +1597,22 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else {
/* RPI registration has not completed. Reject the PRLI
* to prevent an illegal state transition when the
* rpi registration does complete.
*/
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
- "6115 NVMET ndlp rpi %d state "
- "unknown, state x%x flags x%08x\n",
- ndlp->nlp_rpi, ndlp->nlp_state,
- ndlp->nlp_flag);
memset(&stat, 0, sizeof(struct ls_rjt));
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+ stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
ndlp, NULL);
+ return ndlp->nlp_state;
}
} else {
/* Initiator mode. */
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
}
-
return ndlp->nlp_state;
}
@@ -1819,6 +1858,8 @@ lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@@ -1922,13 +1963,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
}
- /* Check out PRLI rsp */
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
-
- /* NVME or FCP first burst must be negotiated for each PRLI. */
- ndlp->nlp_flag &= ~NLP_FIRSTBURST;
- ndlp->nvme_fb_size = 0;
if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
(npr->prliType == PRLI_FCP_TYPE)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
@@ -1945,8 +1979,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
- /* PRLI completed. Decrement count. */
- ndlp->fc4_prli_sent--;
} else if (nvpr &&
(bf_get_be32(prli_acc_rsp_code, nvpr) ==
PRLI_REQ_EXECUTED) &&
@@ -1991,8 +2023,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
be32_to_cpu(nvpr->word5),
ndlp->nlp_flag, ndlp->nlp_fcp_info,
ndlp->nlp_type);
- /* PRLI completed. Decrement count. */
- ndlp->fc4_prli_sent--;
}
if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
(vport->port_type == LPFC_NPIV_PORT) &&
@@ -2016,7 +2046,8 @@ out_err:
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
- else
+ else if (ndlp->nlp_type &
+ (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else
lpfc_printf_vlog(vport,
@@ -2241,6 +2272,9 @@ lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
+
lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
@@ -2310,6 +2344,8 @@ lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 517ae570e507..81e3a4f10c3c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -57,11 +57,13 @@
/* NVME initiator-based functions */
static struct lpfc_nvme_buf *
-lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ int expedite);
static void
lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
+static struct nvme_fc_port_template lpfc_nvme_template;
/**
* lpfc_nvme_create_queue -
@@ -88,6 +90,9 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_qhandle *qhandle;
char *str;
+ if (!pnvme_lport->private)
+ return -ENOMEM;
+
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
@@ -140,6 +145,9 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
+ if (!pnvme_lport->private)
+ return;
+
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
@@ -154,6 +162,10 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
{
struct lpfc_nvme_lport *lport = localport->private;
+ lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
+ "6173 localport %p delete complete\n",
+ lport);
+
/* release any threads waiting for the unreg to complete */
complete(&lport->lport_unreg_done);
}
@@ -189,16 +201,19 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete complete %p\n",
+ "6146 remoteport delete of remoteport %p\n",
remoteport);
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
+ spin_unlock_irq(&vport->phba->hbalock);
+
+ /* Remove original register reference. The host transport
+ * won't reference this rport/remoteport any further.
+ */
lpfc_nlp_put(ndlp);
rport_err:
- /* This call has to execute as long as the rport is valid.
- * Release any threads waiting for the unreg to complete.
- */
- complete(&rport->rport_unreg_done);
+ return;
}
static void
@@ -206,6 +221,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe)
{
struct lpfc_vport *vport = cmdwqe->vport;
+ struct lpfc_nvme_lport *lport;
uint32_t status;
struct nvmefc_ls_req *pnvme_lsreq;
struct lpfc_dmabuf *buf_ptr;
@@ -215,6 +231,13 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+ if (status) {
+ lport = (struct lpfc_nvme_lport *)vport->localport->private;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_ls_xb);
+ atomic_inc(&lport->cmpl_ls_err);
+ }
+
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6047 nvme cmpl Enter "
@@ -419,6 +442,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
if (vport->load_flag & FC_UNLOADING)
return -ENODEV;
+ if (vport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
@@ -490,6 +516,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
ndlp, 2, 30, 0);
if (ret != WQE_SUCCESS) {
+ atomic_inc(&lport->xmt_ls_err);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6052 EXIT. issue ls wqe failed lport %p, "
"rport %p lsreq%p Status %x DID %x\n",
@@ -534,6 +561,9 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
phba = vport->phba;
+ if (vport->load_flag & FC_UNLOADING)
+ return;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
@@ -571,6 +601,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
/* Abort the targeted IOs and remove them from the abort list. */
list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
+ atomic_inc(&lport->xmt_ls_abort);
spin_lock_irq(&phba->hbalock);
list_del_init(&wqe->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, wqe);
@@ -774,8 +805,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_nvme_rport *rport;
struct lpfc_nodelist *ndlp;
struct lpfc_nvme_fcpreq_priv *freqpriv;
+ struct lpfc_nvme_lport *lport;
unsigned long flags;
- uint32_t code;
+ uint32_t code, status;
uint16_t cid, sqhd, data;
uint32_t *ptr;
@@ -790,10 +822,17 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd = lpfc_ncmd->nvmeCmd;
rport = lpfc_ncmd->nrport;
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ if (status) {
+ lport = (struct lpfc_nvme_lport *)vport->localport->private;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_fcp_xb);
+ atomic_inc(&lport->cmpl_fcp_err);
+ }
lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
- bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter);
+ status, wcqe->parameter);
/*
* Catch race where our node has transitioned, but the
* transport is still transitioning.
@@ -851,8 +890,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
nCmd->transferred_length = nCmd->payload_length;
} else {
- lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
- LPFC_IOCB_STATUS_MASK);
+ lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
/* For NVME, the only failure path that results in an
@@ -946,10 +984,13 @@ out_err:
freqpriv->nvme_buf = NULL;
/* NVME targets need completion held off until the abort exchange
- * completes.
+ * completes unless the NVME Rport is getting unregistered.
*/
- if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY))
+
+ if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
nCmd->done(nCmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_ncmd->nrport = NULL;
@@ -1149,7 +1190,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
- if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) {
+ if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6058 Too many sg segments from "
"NVME Transport. Max %d, "
@@ -1239,6 +1280,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct nvmefc_fcp_req *pnvme_fcreq)
{
int ret = 0;
+ int expedite = 0;
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -1246,13 +1288,30 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_buf *lpfc_ncmd;
struct lpfc_nvme_rport *rport;
struct lpfc_nvme_qhandle *lpfc_queue_info;
- struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
+ struct nvme_common_command *sqe;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0;
#endif
+ /* Validate pointers. LLDD fault handling with transport does
+ * have timing races.
+ */
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ if (unlikely(!lport)) {
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
vport = lport->vport;
+
+ if (unlikely(!hw_queue_handle)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+ "6129 Fail Abort, NULL hw_queue_handle\n");
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
phba = vport->phba;
if (vport->load_flag & FC_UNLOADING) {
@@ -1260,16 +1319,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_fail;
}
- /* Validate pointers. */
- if (!pnvme_lport || !pnvme_rport || !freqpriv) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE,
- "6117 No Send:IO submit ptrs NULL, lport %p, "
- "rport %p fcreq_priv %p\n",
- pnvme_lport, pnvme_rport, freqpriv);
+ if (vport->load_flag & FC_UNLOADING) {
ret = -ENODEV;
goto out_fail;
}
+ freqpriv = pnvme_fcreq->private;
+ if (unlikely(!freqpriv)) {
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on)
start = ktime_get_ns();
@@ -1293,6 +1353,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
"6066 Missing node for DID %x\n",
pnvme_rport->port_id);
+ atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -ENODEV;
goto out_fail;
}
@@ -1306,21 +1367,36 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"IO. State x%x, Type x%x\n",
rport, pnvme_rport->port_id,
ndlp->nlp_state, ndlp->nlp_type);
+ atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -ENODEV;
goto out_fail;
}
+ /* Currently only NVME Keep alive commands should be expedited
+ * if the driver runs out of a resource. These should only be
+ * issued on the admin queue, qidx 0
+ */
+ if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
+ sqe = &((struct nvme_fc_cmd_iu *)
+ pnvme_fcreq->cmdaddr)->sqe.common;
+ if (sqe->opcode == nvme_admin_keep_alive)
+ expedite = 1;
+ }
+
/* The node is shared with FCP IO, make sure the IO pending count does
* not exceed the programmed depth.
*/
- if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
+ if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
+ !expedite) {
+ atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY;
goto out_fail;
}
- lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp);
+ lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
if (lpfc_ncmd == NULL) {
+ atomic_inc(&lport->xmt_fcp_noxri);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6065 driver's buffer pool is empty, "
"IO failed\n");
@@ -1373,6 +1449,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
+ atomic_inc(&lport->xmt_fcp_wqerr);
atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 FCP could not issue WQE err %x "
@@ -1473,19 +1550,36 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct lpfc_nvme_rport *rport;
struct lpfc_nvme_buf *lpfc_nbuf;
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
- struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
union lpfc_wqe *abts_wqe;
unsigned long flags;
int ret_val;
+ /* Validate pointers. LLDD fault handling with transport does
+ * have timing races.
+ */
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
- rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+ if (unlikely(!lport))
+ return;
+
vport = lport->vport;
+
+ if (unlikely(!hw_queue_handle)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+ "6129 Fail Abort, HW Queue Handle NULL.\n");
+ return;
+ }
+
phba = vport->phba;
+ freqpriv = pnvme_fcreq->private;
+
+ if (unlikely(!freqpriv))
+ return;
+ if (vport->load_flag & FC_UNLOADING)
+ return;
/* Announce entry to new IO submit field. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
@@ -1552,6 +1646,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
+ atomic_inc(&lport->xmt_fcp_abort);
lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
nvmereq_wqe->sli4_xritag,
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
@@ -1931,6 +2026,8 @@ lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -2067,6 +2164,20 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
return num_posted;
}
+static inline struct lpfc_nvme_buf *
+lpfc_nvme_buf(struct lpfc_hba *phba)
+{
+ struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &phba->lpfc_nvme_buf_list_get, list) {
+ list_del_init(&lpfc_ncmd->list);
+ phba->get_nvme_bufs--;
+ return lpfc_ncmd;
+ }
+ return NULL;
+}
+
/**
* lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
@@ -2079,35 +2190,27 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
* Pointer to lpfc_nvme_buf - Success
**/
static struct lpfc_nvme_buf *
-lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ int expedite)
{
- struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+ struct lpfc_nvme_buf *lpfc_ncmd = NULL;
unsigned long iflag = 0;
- int found = 0;
spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_get, list) {
- list_del_init(&lpfc_ncmd->list);
- found = 1;
- break;
- }
- if (!found) {
+ if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
+ lpfc_ncmd = lpfc_nvme_buf(phba);
+ if (!lpfc_ncmd) {
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice(&phba->lpfc_nvme_buf_list_put,
&phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs += phba->put_nvme_bufs;
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_get, list) {
- list_del_init(&lpfc_ncmd->list);
- found = 1;
- break;
- }
+ if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
+ lpfc_ncmd = lpfc_nvme_buf(phba);
}
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
- if (!found)
- return NULL;
return lpfc_ncmd;
}
@@ -2145,6 +2248,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs++;
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
}
}
@@ -2221,6 +2325,18 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport->vport = vport;
vport->nvmei_support = 1;
+ atomic_set(&lport->xmt_fcp_noxri, 0);
+ atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
+ atomic_set(&lport->xmt_fcp_qdepth, 0);
+ atomic_set(&lport->xmt_fcp_wqerr, 0);
+ atomic_set(&lport->xmt_fcp_abort, 0);
+ atomic_set(&lport->xmt_ls_abort, 0);
+ atomic_set(&lport->xmt_ls_err, 0);
+ atomic_set(&lport->cmpl_fcp_xb, 0);
+ atomic_set(&lport->cmpl_fcp_err, 0);
+ atomic_set(&lport->cmpl_ls_xb, 0);
+ atomic_set(&lport->cmpl_ls_err, 0);
+
/* Don't post more new bufs if repost already recovered
* the nvme sgls.
*/
@@ -2234,6 +2350,47 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
return ret;
}
+/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
+ *
+ * The driver has to wait for the host nvme transport to callback
+ * indicating the localport has successfully unregistered all
+ * resources. Since this is an uninterruptible wait, loop every ten
+ * seconds and print a message indicating no progress.
+ *
+ * An uninterruptible wait is used because of the risk of transport-to-
+ * driver state mismatch.
+ */
+void
+lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
+ struct lpfc_nvme_lport *lport)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ u32 wait_tmo;
+ int ret;
+
+ /* Host transport has to clean up and confirm requiring an indefinite
+ * wait. Print a message if a 10 second wait expires and renew the
+ * wait. This is unexpected.
+ */
+ wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
+ while (true) {
+ ret = wait_for_completion_timeout(&lport->lport_unreg_done,
+ wait_tmo);
+ if (unlikely(!ret)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ "6176 Lport %p Localport %p wait "
+ "timed out. Renewing.\n",
+ lport, vport->localport);
+ continue;
+ }
+ break;
+ }
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6177 Lport %p Localport %p Complete Success\n",
+ lport, vport->localport);
+#endif
+}
+
/**
* lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
* @pnvme: pointer to lpfc nvme data structure.
@@ -2268,7 +2425,11 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
*/
init_completion(&lport->lport_unreg_done);
ret = nvme_fc_unregister_localport(localport);
- wait_for_completion_timeout(&lport->lport_unreg_done, 5);
+
+ /* Wait for completion. This either blocks
+ * indefinitely or succeeds
+ */
+ lpfc_nvme_lport_unreg_wait(vport, lport);
/* Regardless of the unregister upcall response, clear
* nvmei_support. All rports are unregistered and the
@@ -2365,6 +2526,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ if (!ndlp->nrport)
+ lpfc_nlp_get(ndlp);
+
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
if (!ret) {
/* If the ndlp already has an nrport, this is just
@@ -2373,23 +2537,33 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
rport = remote_port->private;
if (ndlp->nrport) {
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
- LOG_NVME_DISC,
- "6014 Rebinding lport to "
- "rport wwpn 0x%llx, "
- "Data: x%x x%x x%x x%06x\n",
- remote_port->port_name,
- remote_port->port_id,
- remote_port->port_role,
- ndlp->nlp_type,
- ndlp->nlp_DID);
+ if (ndlp->nrport == remote_port->private) {
+ /* Same remoteport. Just reuse. */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NVME_DISC,
+ "6014 Rebinding lport to "
+ "remoteport %p wwpn 0x%llx, "
+ "Data: x%x x%x %p x%x x%06x\n",
+ remote_port,
+ remote_port->port_name,
+ remote_port->port_id,
+ remote_port->port_role,
+ ndlp,
+ ndlp->nlp_type,
+ ndlp->nlp_DID);
+ return 0;
+ }
prev_ndlp = rport->ndlp;
- /* Sever the ndlp<->rport connection before dropping
- * the ndlp ref from register.
+ /* Sever the ndlp<->rport association
+ * before dropping the ndlp ref from
+ * register.
*/
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
+ spin_unlock_irq(&vport->phba->hbalock);
rport->ndlp = NULL;
+ rport->remoteport = NULL;
if (prev_ndlp)
lpfc_nlp_put(ndlp);
}
@@ -2397,19 +2571,20 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Clean bind the rport to the ndlp. */
rport->remoteport = remote_port;
rport->lport = lport;
- rport->ndlp = lpfc_nlp_get(ndlp);
- if (!rport->ndlp)
- return -1;
+ rport->ndlp = ndlp;
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = rport;
+ spin_unlock_irq(&vport->phba->hbalock);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
"6022 Binding new rport to "
- "lport %p Rport WWNN 0x%llx, "
+ "lport %p Remoteport %p WWNN 0x%llx, "
"Rport WWPN 0x%llx DID "
- "x%06x Role x%x\n",
- lport,
+ "x%06x Role x%x, ndlp %p\n",
+ lport, remote_port,
rpinfo.node_name, rpinfo.port_name,
- rpinfo.port_id, rpinfo.port_role);
+ rpinfo.port_id, rpinfo.port_role,
+ ndlp);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
@@ -2473,20 +2648,20 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Sanity check ndlp type. Only call for NVME ports. Don't
* clear any rport state until the transport calls back.
*/
- if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
- init_completion(&rport->rport_unreg_done);
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/
+ ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
ret = nvme_fc_unregister_remoteport(remoteport);
if (ret != 0) {
+ lpfc_nlp_put(ndlp);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
"6167 NVME unregister failed %d "
"port_state x%x\n",
ret, remoteport->port_state);
}
-
}
return;
@@ -2545,8 +2720,11 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
* before the abort exchange command fully completes.
* Once completed, it is available via the put list.
*/
- nvme_cmd = lpfc_ncmd->nvmeCmd;
- nvme_cmd->done(nvme_cmd);
+ if (lpfc_ncmd->nvmeCmd) {
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->done(nvme_cmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
lpfc_release_nvme_buf(phba, lpfc_ncmd);
return;
}
@@ -2558,3 +2736,45 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
"6312 XRI Aborted xri x%x not found\n", xri);
}
+
+/**
+ * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
+ * @phba: Pointer to HBA context object.
+ *
+ * This function flushes all wqes in the nvme rings and frees all resources
+ * in the txcmplq. This function does not issue abort wqes for the IO
+ * commands in txcmplq, they will just be returned with
+ * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
+ * slot has been permanently disabled.
+ **/
+void
+lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
+{
+ struct lpfc_sli_ring *pring;
+ u32 i, wait_cnt = 0;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return;
+
+ /* Cycle through all NVME rings and make sure all outstanding
+ * WQEs have been removed from the txcmplqs.
+ */
+ for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+ pring = phba->sli4_hba.nvme_wq[i]->pring;
+
+ /* Retrieve everything on the txcmplq */
+ while (!list_empty(&pring->txcmplq)) {
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
+ wait_cnt++;
+
+ /* The sleep is 10mS. Every ten seconds,
+ * dump a message. Something is wrong.
+ */
+ if ((wait_cnt % 1000) == 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6178 NVME IO not empty, "
+ "cnt %d\n", wait_cnt);
+ }
+ }
+ }
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index d192bb268f99..e79f8f75758c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -22,10 +22,12 @@
********************************************************************/
#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
-#define LPFC_NVME_WQSIZE 256
#define LPFC_NVME_ERSP_LEN 0x20
+#define LPFC_NVME_WAIT_TMO 10
+#define LPFC_NVME_EXPEDITE_XRICNT 8
+
struct lpfc_nvme_qhandle {
uint32_t index; /* WQ index to use */
uint32_t qidx; /* queue index passed to create */
@@ -36,7 +38,18 @@ struct lpfc_nvme_qhandle {
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
struct completion lport_unreg_done;
- /* Add sttats counters here */
+ /* Add stats counters here */
+ atomic_t xmt_fcp_noxri;
+ atomic_t xmt_fcp_bad_ndlp;
+ atomic_t xmt_fcp_qdepth;
+ atomic_t xmt_fcp_wqerr;
+ atomic_t xmt_fcp_abort;
+ atomic_t xmt_ls_abort;
+ atomic_t xmt_ls_err;
+ atomic_t cmpl_fcp_xb;
+ atomic_t cmpl_fcp_err;
+ atomic_t cmpl_ls_xb;
+ atomic_t cmpl_ls_err;
};
struct lpfc_nvme_rport {
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 84cf1b9079f7..8dbf5c9d51aa 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -38,6 +38,7 @@
#include <../drivers/nvme/host/nvme.h>
#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
#include "lpfc_version.h"
#include "lpfc_hw4.h"
@@ -126,10 +127,17 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- if (status)
- atomic_inc(&tgtp->xmt_ls_rsp_error);
- else
- atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
+ if (tgtp) {
+ if (status) {
+ atomic_inc(&tgtp->xmt_ls_rsp_error);
+ if (status == IOERR_ABORT_REQUESTED)
+ atomic_inc(&tgtp->xmt_ls_rsp_aborted);
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
+ } else {
+ atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
+ }
+ }
out:
rsp = &ctxp->ctx.ls_req;
@@ -218,6 +226,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
+ ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -253,6 +262,17 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
return;
}
+ /* Processing of FCP command is deferred */
+ if (rc == -EOVERFLOW) {
+ lpfc_nvmeio_data(phba,
+ "NVMET RCV BUSY: xri x%x sz %d "
+ "from %06x\n",
+ oxid, size, sid);
+ /* defer repost rcv buffer till .defer_rcv callback */
+ ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ return;
+ }
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
@@ -519,8 +539,11 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (status) {
rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
rsp->transferred_length = 0;
- if (tgtp)
+ if (tgtp) {
atomic_inc(&tgtp->xmt_fcp_rsp_error);
+ if (status == IOERR_ABORT_REQUESTED)
+ atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
+ }
logerr = LOG_NVME_IOERR;
@@ -528,6 +551,8 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
ctxp->flag |= LPFC_NVMET_XBUSY;
logerr |= LOG_NVME_ABTS;
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
} else {
ctxp->flag &= ~LPFC_NVMET_XBUSY;
@@ -635,6 +660,9 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return -ENODEV;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
@@ -721,6 +749,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
goto aerr;
}
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ rc = -ENODEV;
+ goto aerr;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_RSP)
@@ -823,6 +856,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
ctxp->oxid, ctxp->flag, ctxp->state);
@@ -910,7 +946,11 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
tgtp = phba->targetport->private;
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+ if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+ else
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+ ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
}
static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -1216,6 +1256,8 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->xmt_ls_rsp, 0);
atomic_set(&tgtp->xmt_ls_drop, 0);
atomic_set(&tgtp->xmt_ls_rsp_error, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
@@ -1228,7 +1270,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->xmt_fcp_release, 0);
atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
+ atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
atomic_set(&tgtp->xmt_fcp_abort, 0);
atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
atomic_set(&tgtp->xmt_abort_unsol, 0);
@@ -1270,6 +1315,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nodelist *ndlp;
unsigned long iflag = 0;
int rrq_empty = 0;
@@ -1280,6 +1326,12 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
+
+ if (phba->targetport) {
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
+ }
+
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp,
@@ -1682,6 +1734,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
+ ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1715,6 +1768,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
/* Process FCP command */
if (rc == 0) {
+ ctxp->rqb_buffer = NULL;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
return;
@@ -1726,10 +1780,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
"NVMET RCV BUSY: xri x%x sz %d from %06x\n",
oxid, size, sid);
/* defer reposting rcv buffer till .defer_rcv callback */
- ctxp->rqb_buffer = nvmebuf;
+ ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
return;
}
+ ctxp->rqb_buffer = nvmebuf;
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1992,7 +2047,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
return NULL;
}
- if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
+ if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6109 NVMET prep FCP wqe: seg cnt err: "
"NPORT x%x oxid x%x ste %d cnt %d\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 25a65b0bb7f3..5b32c9e4d4ef 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -25,6 +25,10 @@
#define LPFC_NVMET_RQE_DEF_COUNT 512
#define LPFC_NVMET_SUCCESS_LEN 12
+#define LPFC_NVMET_MRQ_OFF 0xffff
+#define LPFC_NVMET_MRQ_AUTO 0
+#define LPFC_NVMET_MRQ_MAX 16
+
/* Used for NVME Target */
struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba;
@@ -43,6 +47,8 @@ struct lpfc_nvmet_tgtport {
/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
atomic_t xmt_ls_rsp_error;
+ atomic_t xmt_ls_rsp_aborted;
+ atomic_t xmt_ls_rsp_xb_set;
atomic_t xmt_ls_rsp_cmpl;
/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
@@ -60,12 +66,15 @@ struct lpfc_nvmet_tgtport {
atomic_t xmt_fcp_rsp;
/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
+ atomic_t xmt_fcp_rsp_xb_set;
atomic_t xmt_fcp_rsp_cmpl;
atomic_t xmt_fcp_rsp_error;
+ atomic_t xmt_fcp_rsp_aborted;
atomic_t xmt_fcp_rsp_drop;
/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
+ atomic_t xmt_fcp_xri_abort_cqe;
atomic_t xmt_fcp_abort;
atomic_t xmt_fcp_abort_cmpl;
atomic_t xmt_abort_sol;
@@ -122,6 +131,7 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
+#define LPFC_NVMET_DEFER_RCV_REPOST 0x20 /* repost to RQ on defer rcv */
struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index aecd2399005d..5f5528a12308 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -475,28 +475,30 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
- int put_index;
+ int hq_put_index;
+ int dq_put_index;
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM;
- put_index = hq->host_index;
- temp_hrqe = hq->qe[put_index].rqe;
- temp_drqe = dq->qe[dq->host_index].rqe;
+ hq_put_index = hq->host_index;
+ dq_put_index = dq->host_index;
+ temp_hrqe = hq->qe[hq_put_index].rqe;
+ temp_drqe = dq->qe[dq_put_index].rqe;
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL;
- if (put_index != dq->host_index)
+ if (hq_put_index != dq_put_index)
return -EINVAL;
/* If the host has not yet processed the next entry then we are done */
- if (((put_index + 1) % hq->entry_count) == hq->hba_index)
+ if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
return -EBUSY;
lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
/* Update the host index to point to the next slot */
- hq->host_index = ((put_index + 1) % hq->entry_count);
- dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+ hq->host_index = ((hq_put_index + 1) % hq->entry_count);
+ dq->host_index = ((dq_put_index + 1) % dq->entry_count);
hq->RQ_buf_posted++;
/* Ring The Header Receive Queue Doorbell */
@@ -517,7 +519,7 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
}
writel(doorbell.word0, hq->db_regaddr);
}
- return put_index;
+ return hq_put_index;
}
/**
@@ -12318,41 +12320,6 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked by the worker thread to process all the pending
- * SLI4 NVME abort XRI events.
- **/
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
-{
- struct lpfc_cq_event *cq_event;
-
- /* First, declare the fcp xri abort event has been handled */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
- spin_unlock_irq(&phba->hbalock);
- /* Now, handle all the fcp xri abort events */
- while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
- /* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
- list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
- /* Notify aborted XRI for NVME work queue */
- if (phba->nvmet_support) {
- lpfc_sli4_nvmet_xri_aborted(phba,
- &cq_event->cqe.wcqe_axri);
- } else {
- lpfc_sli4_nvme_xri_aborted(phba,
- &cq_event->cqe.wcqe_axri);
- }
- /* Free the event processed back to the free pool */
- lpfc_sli4_cq_event_release(phba, cq_event);
- }
-}
-
-/**
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
* @phba: pointer to lpfc hba data structure.
*
@@ -12548,6 +12515,24 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return irspiocbq;
}
+inline struct lpfc_cq_event *
+lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* Allocate a new internal CQ_EVENT entry */
+ cq_event = lpfc_sli4_cq_event_alloc(phba);
+ if (!cq_event) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0602 Failed to alloc CQ_EVENT entry\n");
+ return NULL;
+ }
+
+ /* Move the CQE into the event */
+ memcpy(&cq_event->cqe, entry, size);
+ return cq_event;
+}
+
/**
* lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
* @phba: Pointer to HBA context object.
@@ -12569,16 +12554,9 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
"word2:x%x, word3:x%x\n", mcqe->word0,
mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
- /* Allocate a new internal CQ_EVENT entry */
- cq_event = lpfc_sli4_cq_event_alloc(phba);
- if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0394 Failed to allocate CQ_EVENT entry\n");
+ cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
+ if (!cq_event)
return false;
- }
-
- /* Move the CQE into an asynchronous event entry */
- memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
/* Set the async event flag */
@@ -12824,18 +12802,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
struct lpfc_cq_event *cq_event;
unsigned long iflags;
- /* Allocate a new internal CQ_EVENT entry */
- cq_event = lpfc_sli4_cq_event_alloc(phba);
- if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0602 Failed to allocate CQ_EVENT entry\n");
- return false;
- }
-
- /* Move the CQE into the proper xri abort event list */
- memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
switch (cq->subtype) {
case LPFC_FCP:
+ cq_event = lpfc_cq_event_setup(
+ phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ if (!cq_event)
+ return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
@@ -12844,7 +12816,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
+ case LPFC_NVME_LS: /* NVME LS uses ELS resources */
case LPFC_ELS:
+ cq_event = lpfc_cq_event_setup(
+ phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ if (!cq_event)
+ return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
@@ -12854,13 +12831,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
workposted = true;
break;
case LPFC_NVME:
- spin_lock_irqsave(&phba->hbalock, iflags);
- list_add_tail(&cq_event->list,
- &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
- /* Set the nvme xri abort event flag */
- phba->hba_flag |= NVME_XRI_ABORT_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- workposted = true;
+ /* Notify aborted XRI for NVME work queue */
+ if (phba->nvmet_support)
+ lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
+ else
+ lpfc_sli4_nvme_xri_aborted(phba, wcqe);
+
+ workposted = false;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12868,7 +12845,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
"%08x %08x %08x %08x\n",
cq->subtype, wcqe->word0, wcqe->parameter,
wcqe->word2, wcqe->word3);
- lpfc_sli4_cq_event_release(phba, cq_event);
workposted = false;
break;
}
@@ -12913,8 +12889,8 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n");
case FC_STATUS_RQ_SUCCESS:
- lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
if (!dma_buf) {
hrq->RQ_no_buf_found++;
@@ -13316,8 +13292,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
"6126 Receive Frame Truncated!!\n");
/* Drop thru */
case FC_STATUS_RQ_SUCCESS:
- lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
if (!dma_buf) {
hrq->RQ_no_buf_found++;
@@ -13919,7 +13895,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
while (!list_empty(&queue->page_list)) {
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
list);
- dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
@@ -13938,6 +13914,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
/**
* lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
* @phba: The HBA that this queue is being created on.
+ * @page_size: The size of a queue page
* @entry_size: The size of each queue entry for this queue.
* @entry count: The number of entries that this queue will handle.
*
@@ -13946,8 +13923,8 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
* queue on the HBA.
**/
struct lpfc_queue *
-lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
- uint32_t entry_count)
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
+ uint32_t entry_size, uint32_t entry_count)
{
struct lpfc_queue *queue;
struct lpfc_dmabuf *dmabuf;
@@ -13956,7 +13933,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = page_size;
queue = kzalloc(sizeof(struct lpfc_queue) +
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
@@ -13973,6 +13950,15 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
+
+ /* Set queue parameters now. If the system cannot provide memory
+ * resources, the free routine needs to know what was allocated.
+ */
+ queue->entry_size = entry_size;
+ queue->entry_count = entry_count;
+ queue->page_size = hw_page_size;
+ queue->phba = phba;
+
for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
@@ -13994,9 +13980,6 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
queue->qe[total_qe_count].address = dma_pointer;
}
}
- queue->entry_size = entry_size;
- queue->entry_count = entry_count;
- queue->phba = phba;
INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
@@ -14299,7 +14282,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (!cq || !eq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = cq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14318,8 +14301,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.cqv);
if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
- /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
- bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
+ bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
+ (cq->page_size / SLI4_PAGE_SIZE));
bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
eq->queue_id);
} else {
@@ -14327,6 +14310,18 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
eq->queue_id);
}
switch (cq->entry_count) {
+ case 2048:
+ case 4096:
+ if (phba->sli4_hba.pc_sli4_params.cqv ==
+ LPFC_Q_CREATE_VERSION_2) {
+ cq_create->u.request.context.lpfc_cq_context_count =
+ cq->entry_count;
+ bf_set(lpfc_cq_context_count,
+ &cq_create->u.request.context,
+ LPFC_CQ_CNT_WORD7);
+ break;
+ }
+ /* Fall Thru */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0361 Unsupported CQ count: "
@@ -14352,7 +14347,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
break;
}
list_for_each_entry(dmabuf, &cq->page_list, list) {
- memset(dmabuf->virt, 0, hw_page_size);
+ memset(dmabuf->virt, 0, cq->page_size);
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -14433,8 +14428,6 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
numcq = phba->cfg_nvmet_mrq;
if (!cqp || !eqp || !numcq)
return -ENODEV;
- if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14465,6 +14458,8 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
status = -ENOMEM;
goto out;
}
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = cq->page_size;
switch (idx) {
case 0:
@@ -14482,6 +14477,19 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
bf_set(lpfc_mbx_cq_create_set_num_cq,
&cq_set->u.request, numcq);
switch (cq->entry_count) {
+ case 2048:
+ case 4096:
+ if (phba->sli4_hba.pc_sli4_params.cqv ==
+ LPFC_Q_CREATE_VERSION_2) {
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request,
+ cq->entry_count);
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request,
+ LPFC_CQ_CNT_WORD7);
+ break;
+ }
+ /* Fall Thru */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3118 Bad CQ count. (%d)\n",
@@ -14578,6 +14586,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
cq->host_index = 0;
cq->hba_index = 0;
cq->entry_repost = LPFC_CQ_REPOST;
+ cq->chann = idx;
rc = 0;
list_for_each_entry(dmabuf, &cq->page_list, list) {
@@ -14872,12 +14881,13 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
void __iomem *bar_memmap_p;
uint32_t db_offset;
uint16_t pci_barset;
+ uint8_t wq_create_version;
/* sanity check on queue memory */
if (!wq || !cq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = wq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14898,7 +14908,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
- switch (phba->sli4_hba.pc_sli4_params.wqv) {
+ if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
+ wq_create_version = LPFC_Q_CREATE_VERSION_1;
+ else
+ wq_create_version = LPFC_Q_CREATE_VERSION_0;
+
+ switch (wq_create_version) {
case LPFC_Q_CREATE_VERSION_0:
switch (wq->entry_size) {
default:
@@ -14956,7 +14971,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
}
bf_set(lpfc_mbx_wq_create_page_size,
&wq_create->u.request_1,
- LPFC_WQ_PAGE_SIZE_4096);
+ (wq->page_size / SLI4_PAGE_SIZE));
page = wq_create->u.request_1.page;
break;
default:
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 13b8f4d4da34..81fb58e59e60 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -161,7 +161,6 @@ struct lpfc_queue {
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
- uint32_t page_count; /* Number of pages allocated for this queue */
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
@@ -169,6 +168,11 @@ struct lpfc_queue {
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
uint32_t q_mode;
+ uint16_t page_count; /* Number of pages allocated for this queue */
+ uint16_t page_size; /* size of page allocated for this queue */
+#define LPFC_EXPANDED_PAGE_SIZE 16384
+#define LPFC_DEFAULT_PAGE_SIZE 4096
+ uint16_t chann; /* IO channel this queue is associated with */
uint16_t db_format;
#define LPFC_DB_RING_FORMAT 0x01
#define LPFC_DB_LIST_FORMAT 0x02
@@ -366,9 +370,9 @@ struct lpfc_bmbx {
#define LPFC_EQE_DEF_COUNT 1024
#define LPFC_CQE_DEF_COUNT 1024
+#define LPFC_CQE_EXP_COUNT 4096
#define LPFC_WQE_DEF_COUNT 256
-#define LPFC_WQE128_DEF_COUNT 128
-#define LPFC_WQE128_MAX_COUNT 256
+#define LPFC_WQE_EXP_COUNT 1024
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
@@ -668,7 +672,6 @@ struct lpfc_sli4_hba {
struct list_head sp_asynce_work_queue;
struct list_head sp_fcp_xri_aborted_work_queue;
struct list_head sp_els_xri_aborted_work_queue;
- struct list_head sp_nvme_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
struct lpfc_sli4_lnk_info lnk_info;
@@ -769,7 +772,7 @@ int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
void lpfc_sli4_hba_reset(struct lpfc_hba *);
struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
- uint32_t);
+ uint32_t, uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
@@ -820,7 +823,6 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e0181371af09..c232bf0e8998 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.4.0.4"
+#define LPFC_DRIVER_VERSION "11.4.0.6"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index f5a36ccb8606..ba6503f37756 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.703.05.00-rc1"
-#define MEGASAS_RELDATE "October 5, 2017"
+#define MEGASAS_VERSION "07.704.04.00-rc1"
+#define MEGASAS_RELDATE "December 7, 2017"
/*
* Device IDs
@@ -197,6 +197,7 @@ enum MFI_CMD_OP {
MFI_CMD_ABORT = 0x6,
MFI_CMD_SMP = 0x7,
MFI_CMD_STP = 0x8,
+ MFI_CMD_NVME = 0x9,
MFI_CMD_OP_COUNT,
MFI_CMD_INVALID = 0xff
};
@@ -230,7 +231,7 @@ enum MFI_CMD_OP {
/*
* Global functions
*/
-extern u8 MR_ValidateMapInfo(struct megasas_instance *instance);
+extern u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id);
/*
@@ -1352,7 +1353,13 @@ struct megasas_ctrl_info {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u16 reserved:8;
+ u16 reserved:2;
+ u16 support_nvme_passthru:1;
+ u16 support_pl_debug_info:1;
+ u16 support_flash_comp_info:1;
+ u16 support_host_info:1;
+ u16 support_dual_fw_update:1;
+ u16 support_ssc_rev3:1;
u16 fw_swaps_bbu_vpd_info:1;
u16 support_pd_map_target_id:1;
u16 support_ses_ctrl_in_multipathcfg:1;
@@ -1377,7 +1384,19 @@ struct megasas_ctrl_info {
* provide the data in little endian order
*/
u16 fw_swaps_bbu_vpd_info:1;
- u16 reserved:8;
+ u16 support_ssc_rev3:1;
+ /* FW supports CacheCade 3.0, only one SSCD creation allowed */
+ u16 support_dual_fw_update:1;
+ /* FW supports dual firmware update feature */
+ u16 support_host_info:1;
+ /* FW supports MR_DCMD_CTRL_HOST_INFO_SET/GET */
+ u16 support_flash_comp_info:1;
+ /* FW supports MR_DCMD_CTRL_FLASH_COMP_INFO_GET */
+ u16 support_pl_debug_info:1;
+ /* FW supports retrieval of PL debug information through apps */
+ u16 support_nvme_passthru:1;
+ /* FW supports NVMe passthru commands */
+ u16 reserved:2;
#endif
} adapter_operations4;
u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
@@ -1630,7 +1649,8 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:18;
+ u32 reserved:17;
+ u32 support_nvme_passthru:1;
u32 support_64bit_mode:1;
u32 support_pd_map_target_id:1;
u32 support_qd_throttling:1;
@@ -1660,7 +1680,8 @@ typedef union _MFI_CAPABILITIES {
u32 support_qd_throttling:1;
u32 support_pd_map_target_id:1;
u32 support_64bit_mode:1;
- u32 reserved:18;
+ u32 support_nvme_passthru:1;
+ u32 reserved:17;
#endif
} mfi_capabilities;
__le32 reg;
@@ -2188,7 +2209,6 @@ struct megasas_instance {
struct megasas_evt_detail *evt_detail;
dma_addr_t evt_detail_h;
struct megasas_cmd *aen_cmd;
- struct mutex hba_mutex;
struct semaphore ioctl_sem;
struct Scsi_Host *host;
@@ -2269,6 +2289,7 @@ struct megasas_instance {
u32 nvme_page_size;
u8 adapter_type;
bool consistent_mask_64bit;
+ bool support_nvme_passthru;
};
struct MR_LD_VF_MAP {
u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 2c8cfa27909d..2791141bd035 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -181,6 +181,7 @@ static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
static u32 support_poll_for_event;
u32 megasas_dbg_lvl;
static u32 support_device_change;
+static bool support_nvme_encapsulation;
/* define lock for aen poll */
spinlock_t poll_aen_lock;
@@ -1952,7 +1953,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
}
}
- mutex_lock(&instance->hba_mutex);
+ mutex_lock(&instance->reset_mutex);
/* Send DCMD to Firmware and cache the information */
if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
megasas_get_pd_info(instance, sdev);
@@ -1966,7 +1967,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
megasas_set_static_target_properties(sdev, is_target_prop);
- mutex_unlock(&instance->hba_mutex);
+ mutex_unlock(&instance->reset_mutex);
/* This sdev property may change post OCR */
megasas_set_dynamic_target_properties(sdev);
@@ -3122,6 +3123,16 @@ megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr
return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
}
+static ssize_t
+megasas_fw_cmds_outstanding_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
+}
+
static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
@@ -3132,6 +3143,8 @@ static DEVICE_ATTR(page_size, S_IRUGO,
megasas_page_size_show, NULL);
static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
megasas_ldio_outstanding_show, NULL);
+static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
+ megasas_fw_cmds_outstanding_show, NULL);
struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_buffer_size,
@@ -3139,6 +3152,7 @@ struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_state,
&dev_attr_page_size,
&dev_attr_ldio_outstanding,
+ &dev_attr_fw_cmds_outstanding,
NULL,
};
@@ -3321,6 +3335,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_CMD_SMP:
case MFI_CMD_STP:
+ case MFI_CMD_NVME:
megasas_complete_int_cmd(instance, cmd);
break;
@@ -3331,10 +3346,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
fusion->fast_path_io = 0;
spin_lock_irqsave(instance->host->host_lock, flags);
+ status = cmd->frame->hdr.cmd_status;
instance->map_update_cmd = NULL;
- if (cmd->frame->hdr.cmd_status != 0) {
- if (cmd->frame->hdr.cmd_status !=
- MFI_STAT_NOT_FOUND)
+ if (status != MFI_STAT_OK) {
+ if (status != MFI_STAT_NOT_FOUND)
dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
cmd->frame->hdr.cmd_status);
else {
@@ -3344,8 +3359,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
flags);
break;
}
- } else
- instance->map_id++;
+ }
+
megasas_return_cmd(instance, cmd);
/*
@@ -3353,10 +3368,14 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
* Validate Map will set proper value.
* Meanwhile all IOs will go as LD IO.
*/
- if (MR_ValidateMapInfo(instance))
+ if (status == MFI_STAT_OK &&
+ (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
+ instance->map_id++;
fusion->fast_path_io = 1;
- else
+ } else {
fusion->fast_path_io = 0;
+ }
+
megasas_sync_map_info(instance);
spin_unlock_irqrestore(instance->host->host_lock,
flags);
@@ -4677,10 +4696,12 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
sizeof(struct megasas_ctrl_info));
if ((instance->adapter_type != MFI_SERIES) &&
- !instance->mask_interrupts)
+ !instance->mask_interrupts) {
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
- else
+ } else {
ret = megasas_issue_polled(instance, cmd);
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ }
switch (ret) {
case DCMD_SUCCESS:
@@ -4702,6 +4723,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
ci->adapterOperations3.useSeqNumJbodFP;
instance->support_morethan256jbod =
ci->adapter_operations4.support_pd_map_target_id;
+ instance->support_nvme_passthru =
+ ci->adapter_operations4.support_nvme_passthru;
/*Check whether controller is iMR or MR */
instance->is_imr = (ci->memory_size ? 0 : 1);
@@ -4718,6 +4741,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
instance->secure_jbod_support ? "Yes" : "No");
+ dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
+ instance->support_nvme_passthru ? "Yes" : "No");
break;
case DCMD_TIMEOUT:
@@ -5387,7 +5412,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
fusion->stream_detect_by_ld[i] =
- kmalloc(sizeof(struct LD_STREAM_DETECT),
+ kzalloc(sizeof(struct LD_STREAM_DETECT),
GFP_KERNEL);
if (!fusion->stream_detect_by_ld[i]) {
dev_err(&instance->pdev->dev,
@@ -5432,7 +5457,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) {
struct fusion_context *fusion = instance->ctrl_context;
- if (MR_ValidateMapInfo(instance))
+ if (MR_ValidateMapInfo(instance, instance->map_id))
fusion->fast_path_io = 1;
else
fusion->fast_path_io = 0;
@@ -5581,6 +5606,7 @@ megasas_get_seq_num(struct megasas_instance *instance,
struct megasas_dcmd_frame *dcmd;
struct megasas_evt_log_info *el_info;
dma_addr_t el_info_h = 0;
+ int ret;
cmd = megasas_get_cmd(instance);
@@ -5613,26 +5639,29 @@ megasas_get_seq_num(struct megasas_instance *instance,
megasas_set_dma_settings(instance, dcmd, el_info_h,
sizeof(struct megasas_evt_log_info));
- if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
- DCMD_SUCCESS) {
- /*
- * Copy the data back into callers buffer
- */
- eli->newest_seq_num = el_info->newest_seq_num;
- eli->oldest_seq_num = el_info->oldest_seq_num;
- eli->clear_seq_num = el_info->clear_seq_num;
- eli->shutdown_seq_num = el_info->shutdown_seq_num;
- eli->boot_seq_num = el_info->boot_seq_num;
- } else
- dev_err(&instance->pdev->dev, "DCMD failed "
- "from %s\n", __func__);
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
+ if (ret != DCMD_SUCCESS) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ goto dcmd_failed;
+ }
+ /*
+ * Copy the data back into callers buffer
+ */
+ eli->newest_seq_num = el_info->newest_seq_num;
+ eli->oldest_seq_num = el_info->oldest_seq_num;
+ eli->clear_seq_num = el_info->clear_seq_num;
+ eli->shutdown_seq_num = el_info->shutdown_seq_num;
+ eli->boot_seq_num = el_info->boot_seq_num;
+
+dcmd_failed:
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
megasas_return_cmd(instance, cmd);
- return 0;
+ return ret;
}
/**
@@ -6346,7 +6375,6 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
spin_lock_init(&instance->stream_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->hba_mutex);
mutex_init(&instance->reset_mutex);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
@@ -6704,6 +6732,7 @@ megasas_resume(struct pci_dev *pdev)
*/
atomic_set(&instance->fw_outstanding, 0);
+ atomic_set(&instance->ldio_outstanding, 0);
/* Now re-enable MSI-X */
if (instance->msix_vectors) {
@@ -6822,7 +6851,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
u32 pd_seq_map_sz;
instance = pci_get_drvdata(pdev);
- instance->unload = 1;
host = instance->host;
fusion = instance->ctrl_context;
@@ -6833,6 +6861,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->fw_crash_state != UNAVAILABLE)
megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
+ instance->unload = 1;
if (megasas_wait_for_adapter_operational(instance))
goto skip_firing_dcmds;
@@ -7087,7 +7116,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
return -EINVAL;
}
- if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) {
+ if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
+ ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
+ !instance->support_nvme_passthru)) {
dev_err(&instance->pdev->dev,
"Received invalid ioctl command 0x%x\n",
ioc->frame.hdr.cmd);
@@ -7301,9 +7332,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
struct megasas_iocpacket *ioc;
struct megasas_instance *instance;
int error;
- int i;
- unsigned long flags;
- u32 wait_time = MEGASAS_RESET_WAIT_TIME;
ioc = memdup_user(user_ioc, sizeof(*ioc));
if (IS_ERR(ioc))
@@ -7315,10 +7343,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- /* Adjust ioctl wait time for VF mode */
- if (instance->requestorId)
- wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
-
/* Block ioctls in VF mode */
if (instance->requestorId && !allow_vf_ioctls) {
error = -ENODEV;
@@ -7341,32 +7365,10 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- for (i = 0; i < wait_time; i++) {
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- break;
- }
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "waiting"
- "for controller reset to finish\n");
- }
-
- msleep(1000);
- }
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
+ if (megasas_wait_for_adapter_operational(instance)) {
error = -ENODEV;
goto out_up;
}
- spin_unlock_irqrestore(&instance->hba_lock, flags);
error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
out_up:
@@ -7382,9 +7384,6 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
struct megasas_instance *instance;
struct megasas_aen aen;
int error;
- int i;
- unsigned long flags;
- u32 wait_time = MEGASAS_RESET_WAIT_TIME;
if (file->private_data != file) {
printk(KERN_DEBUG "megasas: fasync_helper was not "
@@ -7408,32 +7407,8 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
return -ENODEV;
}
- for (i = 0; i < wait_time; i++) {
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock,
- flags);
- break;
- }
-
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "waiting for"
- "controller reset to finish\n");
- }
-
- msleep(1000);
- }
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
+ if (megasas_wait_for_adapter_operational(instance))
return -ENODEV;
- }
- spin_unlock_irqrestore(&instance->hba_lock, flags);
mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, aen.seq_num,
@@ -7613,6 +7588,14 @@ static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
}
static DRIVER_ATTR_RW(dbg_lvl);
+static ssize_t
+support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_nvme_encapsulation);
+}
+
+static DRIVER_ATTR_RO(support_nvme_encapsulation);
+
static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
{
sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
@@ -7801,6 +7784,7 @@ static int __init megasas_init(void)
support_poll_for_event = 2;
support_device_change = 1;
+ support_nvme_encapsulation = true;
memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
@@ -7850,8 +7834,17 @@ static int __init megasas_init(void)
if (rval)
goto err_dcf_support_device_change;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_nvme_encapsulation);
+ if (rval)
+ goto err_dcf_support_nvme_encapsulation;
+
return rval;
+err_dcf_support_nvme_encapsulation:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+
err_dcf_support_device_change:
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
@@ -7884,6 +7877,8 @@ static void __exit megasas_exit(void)
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_release_date);
driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_nvme_encapsulation);
pci_unregister_driver(&megasas_pci_driver);
unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index bfad9bfc313f..59ecbb3b53b5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -168,7 +168,7 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
/*
* This function will Populate Driver Map using firmware raid map
*/
-void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
@@ -181,7 +181,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
struct MR_DRV_RAID_MAP_ALL *drv_map =
- fusion->ld_drv_map[(instance->map_id & 1)];
+ fusion->ld_drv_map[(map_id & 1)];
struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
void *raid_map_data = NULL;
@@ -190,7 +190,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
if (instance->max_raid_mapsize) {
- fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
+ fw_map_dyn = fusion->ld_map[(map_id & 1)];
desc_table =
(struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
if (desc_table != fw_map_dyn->raid_map_desc_table)
@@ -255,11 +255,11 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
} else if (instance->supportmax256vd) {
fw_map_ext =
- (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)];
+ (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)];
ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
- return;
+ return 1;
}
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
@@ -282,9 +282,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
} else {
fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
- fusion->ld_map[(instance->map_id & 1)];
+ fusion->ld_map[(map_id & 1)];
pFwRaidMap = &fw_map_old->raidMap;
ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
+ if (ld_count > MAX_LOGICAL_DRIVES) {
+ dev_dbg(&instance->pdev->dev,
+ "LD count exposed in RAID map in not valid\n");
+ return 1;
+ }
+
pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
@@ -300,12 +306,14 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
sizeof(struct MR_DEV_HANDLE_INFO) *
MAX_RAIDMAP_PHYSICAL_DEVICES);
}
+
+ return 0;
}
/*
* This function will validate Map info data provided by FW
*/
-u8 MR_ValidateMapInfo(struct megasas_instance *instance)
+u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
{
struct fusion_context *fusion;
struct MR_DRV_RAID_MAP_ALL *drv_map;
@@ -317,11 +325,11 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
u16 ld;
u32 expected_size;
-
- MR_PopulateDrvRaidMap(instance);
+ if (MR_PopulateDrvRaidMap(instance, map_id))
+ return 0;
fusion = instance->ctrl_context;
- drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ drv_map = fusion->ld_drv_map[(map_id & 1)];
pDrvRaidMap = &drv_map->raidMap;
lbInfo = fusion->load_balance_info;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 65dc4fea6352..073ced07e662 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -983,7 +983,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MFI_CAPABILITIES *drv_ops;
u32 scratch_pad_2;
unsigned long flags;
- struct timeval tv;
+ ktime_t time;
bool cur_fw_64bit_dma_capable;
fusion = instance->ctrl_context;
@@ -1042,13 +1042,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
- do_gettimeofday(&tv);
+ time = ktime_get_real();
/* Convert to milliseconds as per FW requirement */
- IOCInitMessage->TimeStamp = cpu_to_le64((tv.tv_sec * 1000) +
- (tv.tv_usec / 1000));
+ IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time));
init_frame = (struct megasas_init_frame *)cmd->frame;
- memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+ memset(init_frame, 0, IOC_INIT_FRAME_SIZE);
frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = 0xFF;
@@ -1080,6 +1079,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_qd_throttling = 1;
drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
+ drv_ops->mfi_capabilities.support_nvme_passthru = 1;
if (instance->consistent_mask_64bit)
drv_ops->mfi_capabilities.support_64bit_mode = 1;
@@ -1320,7 +1320,7 @@ megasas_get_map_info(struct megasas_instance *instance)
fusion->fast_path_io = 0;
if (!megasas_get_ld_map_info(instance)) {
- if (MR_ValidateMapInfo(instance)) {
+ if (MR_ValidateMapInfo(instance, instance->map_id)) {
fusion->fast_path_io = 1;
return 0;
}
@@ -1603,7 +1603,7 @@ static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
fusion = instance->ctrl_context;
- cmd = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
if (!cmd) {
dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
@@ -2664,16 +2664,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
praid_context = &io_request->RaidContext;
if (instance->adapter_type == VENTURA_SERIES) {
- spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
- megasas_stream_detect(instance, cmd, &io_info);
- spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
- /* In ventura if stream detected for a read and it is read ahead
- * capable make this IO as LDIO
- */
- if (is_stream_detected(&io_request->RaidContext.raid_context_g35) &&
- io_info.isRead && io_info.ra_capable)
- fp_possible = false;
-
/* FP for Optimal raid level 1.
* All large RAID-1 writes (> 32 KiB, both WT and WB modes)
* are built by the driver as LD I/Os.
@@ -2699,6 +2689,20 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
}
}
+ if (!fp_possible ||
+ (io_info.isRead && io_info.ra_capable)) {
+ spin_lock_irqsave(&instance->stream_lock,
+ spinlock_flags);
+ megasas_stream_detect(instance, cmd, &io_info);
+ spin_unlock_irqrestore(&instance->stream_lock,
+ spinlock_flags);
+ /* In ventura if stream detected for a read and it is
+ * read ahead capable make this IO as LDIO
+ */
+ if (is_stream_detected(&io_request->RaidContext.raid_context_g35))
+ fp_possible = false;
+ }
+
/* If raid is NULL, set CPU affinity to default CPU0 */
if (raid)
megasas_set_raidflag_cpu_affinity(praid_context,
@@ -3953,6 +3957,8 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u16 smid;
bool refire_cmd = 0;
+ u8 result;
+ u32 opcode = 0;
fusion = instance->ctrl_context;
@@ -3963,29 +3969,53 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
cmd_fusion = fusion->cmd_list[j];
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
smid = le16_to_cpu(cmd_mfi->context.smid);
+ result = REFIRE_CMD;
if (!smid)
continue;
- /* Do not refire shutdown command */
- if (le32_to_cpu(cmd_mfi->frame->dcmd.opcode) ==
- MR_DCMD_CTRL_SHUTDOWN) {
- cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
- megasas_complete_cmd(instance, cmd_mfi, DID_OK);
- continue;
+ req_desc = megasas_get_request_descriptor(instance, smid - 1);
+
+ switch (cmd_mfi->frame->hdr.cmd) {
+ case MFI_CMD_DCMD:
+ opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode);
+ /* Do not refire shutdown command */
+ if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
+ cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
+ result = COMPLETE_CMD;
+ break;
+ }
+
+ refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) &&
+ (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
+ !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
+
+ if (!refire_cmd)
+ result = RETURN_CMD;
+
+ break;
+ case MFI_CMD_NVME:
+ if (!instance->support_nvme_passthru) {
+ cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
+ result = COMPLETE_CMD;
+ }
+
+ break;
+ default:
+ break;
}
- req_desc = megasas_get_request_descriptor
- (instance, smid - 1);
- refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
- cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
- (cmd_mfi->frame->dcmd.opcode !=
- cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
- && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
- if (refire_cmd)
+ switch (result) {
+ case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
- else
+ break;
+ case RETURN_CMD:
megasas_return_cmd(instance, cmd_mfi);
+ break;
+ case COMPLETE_CMD:
+ megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+ break;
+ }
}
}
@@ -4625,8 +4655,6 @@ transition_to_ready:
continue;
}
- megasas_refire_mgmt_cmd(instance);
-
if (megasas_get_ctrl_info(instance)) {
dev_info(&instance->pdev->dev,
"Failed from %s %d\n",
@@ -4635,6 +4663,9 @@ transition_to_ready:
retval = FAILED;
goto out;
}
+
+ megasas_refire_mgmt_cmd(instance);
+
/* Reset load balance info */
if (fusion->load_balance_info)
memset(fusion->load_balance_info, 0,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 1814d79cb98d..8e5ebee6517f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1344,6 +1344,12 @@ union desc_value {
} u;
};
+enum CMD_RET_VALUES {
+ REFIRE_CMD = 1,
+ COMPLETE_CMD = 2,
+ RETURN_CMD = 3,
+};
+
void megasas_free_cmds_fusion(struct megasas_instance *instance);
int megasas_ioc_init_fusion(struct megasas_instance *instance);
u8 megasas_get_map_info(struct megasas_instance *instance);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8027de465d47..13d6e4ec3022 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -888,6 +888,22 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
return 1;
}
+static struct scsiio_tracker *
+_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct scsi_cmnd *cmd;
+
+ if (WARN_ON(!smid) ||
+ WARN_ON(smid >= ioc->hi_priority_smid))
+ return NULL;
+
+ cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (cmd)
+ return scsi_cmd_priv(cmd);
+
+ return NULL;
+}
+
/**
* _base_get_cb_idx - obtain the callback index
* @ioc: per adapter object
@@ -899,19 +915,25 @@ static u8
_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
int i;
- u8 cb_idx;
+ u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
+ u8 cb_idx = 0xFF;
if (smid < ioc->hi_priority_smid) {
- i = smid - 1;
- cb_idx = ioc->scsi_lookup[i].cb_idx;
+ struct scsiio_tracker *st;
+
+ if (smid < ctl_smid) {
+ st = _get_st_from_smid(ioc, smid);
+ if (st)
+ cb_idx = st->cb_idx;
+ } else if (smid == ctl_smid)
+ cb_idx = ioc->ctl_cb_idx;
} else if (smid < ioc->internal_smid) {
i = smid - ioc->hi_priority_smid;
cb_idx = ioc->hpr_lookup[i].cb_idx;
} else if (smid <= ioc->hba_queue_depth) {
i = smid - ioc->internal_smid;
cb_idx = ioc->internal_lookup[i].cb_idx;
- } else
- cb_idx = 0xFF;
+ }
return cb_idx;
}
@@ -1287,14 +1309,16 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
/**
* _base_get_chain_buffer_tracker - obtain chain tracker
* @ioc: per adapter object
- * @smid: smid associated to an IO request
+ * @scmd: SCSI commands of the IO request
*
* Returns chain tracker(from ioc->free_chain_list)
*/
static struct chain_tracker *
-_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd)
{
struct chain_tracker *chain_req;
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
unsigned long flags;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
@@ -1307,8 +1331,7 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
chain_req = list_entry(ioc->free_chain_list.next,
struct chain_tracker, tracker_list);
list_del_init(&chain_req->tracker_list);
- list_add_tail(&chain_req->tracker_list,
- &ioc->scsi_lookup[smid - 1].chain_list);
+ list_add_tail(&chain_req->tracker_list, &st->chain_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return chain_req;
}
@@ -1923,7 +1946,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
/* initializing the chain flags and pointers */
chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -1963,7 +1986,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2066,7 +2089,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
}
/* initializing the pointers */
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2097,7 +2120,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2742,7 +2765,7 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
void *
mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
- return (void *)(ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl);
+ return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
}
/**
@@ -2755,7 +2778,7 @@ mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
dma_addr_t
mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
- return ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl_dma;
+ return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
}
/**
@@ -2822,26 +2845,15 @@ u16
mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
struct scsi_cmnd *scmd)
{
- unsigned long flags;
- struct scsiio_tracker *request;
+ struct scsiio_tracker *request = scsi_cmd_priv(scmd);
+ unsigned int tag = scmd->request->tag;
u16 smid;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (list_empty(&ioc->free_list)) {
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- pr_err(MPT3SAS_FMT "%s: smid not available\n",
- ioc->name, __func__);
- return 0;
- }
-
- request = list_entry(ioc->free_list.next,
- struct scsiio_tracker, tracker_list);
- request->scmd = scmd;
+ smid = tag + 1;
request->cb_idx = cb_idx;
- smid = request->smid;
request->msix_io = _base_get_msix_index(ioc);
- list_del(&request->tracker_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ request->smid = smid;
+ INIT_LIST_HEAD(&request->chain_list);
return smid;
}
@@ -2874,6 +2886,35 @@ mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
return smid;
}
+static void
+_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
+{
+ /*
+ * See _wait_for_commands_to_complete() call with regards to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
+ if (ioc->pending_io_count == 0)
+ wake_up(&ioc->reset_wq);
+ }
+}
+
+void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
+ struct scsiio_tracker *st)
+{
+ if (WARN_ON(st->smid == 0))
+ return;
+ st->cb_idx = 0xFF;
+ st->direct_io = 0;
+ if (!list_empty(&st->chain_list)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ list_splice_init(&st->chain_list, &ioc->free_chain_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ }
+}
+
/**
* mpt3sas_base_free_smid - put smid back on free_list
* @ioc: per adapter object
@@ -2886,37 +2927,22 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
unsigned long flags;
int i;
- struct chain_tracker *chain_req, *next;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (smid < ioc->hi_priority_smid) {
- /* scsiio queue */
- i = smid - 1;
- if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
- list_for_each_entry_safe(chain_req, next,
- &ioc->scsi_lookup[i].chain_list, tracker_list) {
- list_del_init(&chain_req->tracker_list);
- list_add(&chain_req->tracker_list,
- &ioc->free_chain_list);
- }
- }
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].scmd = NULL;
- ioc->scsi_lookup[i].direct_io = 0;
- list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ struct scsiio_tracker *st;
- /*
- * See _wait_for_commands_to_complete() call with regards
- * to this code.
- */
- if (ioc->shost_recovery && ioc->pending_io_count) {
- if (ioc->pending_io_count == 1)
- wake_up(&ioc->reset_wq);
- ioc->pending_io_count--;
+ st = _get_st_from_smid(ioc, smid);
+ if (!st) {
+ _base_recovery_check(ioc);
+ return;
}
+ mpt3sas_base_clear_st(ioc, st);
+ _base_recovery_check(ioc);
return;
- } else if (smid < ioc->internal_smid) {
+ }
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (smid < ioc->internal_smid) {
/* hi-priority */
i = smid - ioc->hi_priority_smid;
ioc->hpr_lookup[i].cb_idx = 0xFF;
@@ -3789,13 +3815,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
if (ioc->pcie_sgl_dma_pool) {
for (i = 0; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl)
- pci_pool_free(ioc->pcie_sgl_dma_pool,
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl,
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
+ dma_pool_free(ioc->pcie_sgl_dma_pool,
+ ioc->pcie_sg_lookup[i].pcie_sgl,
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma);
}
if (ioc->pcie_sgl_dma_pool)
- pci_pool_destroy(ioc->pcie_sgl_dma_pool);
+ dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
if (ioc->config_page) {
@@ -3806,10 +3831,6 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->config_page, ioc->config_page_dma);
}
- if (ioc->scsi_lookup) {
- free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
- ioc->scsi_lookup = NULL;
- }
kfree(ioc->hpr_lookup);
kfree(ioc->internal_lookup);
if (ioc->chain_lookup) {
@@ -4110,16 +4131,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
- sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
- ioc->scsi_lookup_pages = get_order(sz);
- ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
- GFP_KERNEL, ioc->scsi_lookup_pages);
- if (!ioc->scsi_lookup) {
- pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
- ioc->name, (int)sz);
- goto out;
- }
-
dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
ioc->name, ioc->request, ioc->scsiio_depth));
@@ -4202,23 +4213,29 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
nvme_blocks_needed++;
+ sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
+ ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
+ if (!ioc->pcie_sg_lookup) {
+ pr_info(MPT3SAS_FMT
+ "PCIe SGL lookup: kzalloc failed\n", ioc->name);
+ goto out;
+ }
sz = nvme_blocks_needed * ioc->page_size;
ioc->pcie_sgl_dma_pool =
- pci_pool_create("PCIe SGL pool", ioc->pdev, sz, 16, 0);
+ dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
if (!ioc->pcie_sgl_dma_pool) {
pr_info(MPT3SAS_FMT
- "PCIe SGL pool: pci_pool_create failed\n",
+ "PCIe SGL pool: dma_pool_create failed\n",
ioc->name);
goto out;
}
for (i = 0; i < ioc->scsiio_depth; i++) {
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl =
- pci_pool_alloc(ioc->pcie_sgl_dma_pool,
- GFP_KERNEL,
- &ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
- if (!ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) {
+ ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
+ ioc->pcie_sgl_dma_pool, GFP_KERNEL,
+ &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
pr_info(MPT3SAS_FMT
- "PCIe SGL pool: pci_pool_alloc failed\n",
+ "PCIe SGL pool: dma_pool_alloc failed\n",
ioc->name);
goto out;
}
@@ -5766,19 +5783,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
kfree(delayed_event_ack);
}
- /* initialize the scsi lookup free list */
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- INIT_LIST_HEAD(&ioc->free_list);
- smid = 1;
- for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
- INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].smid = smid;
- ioc->scsi_lookup[i].scmd = NULL;
- ioc->scsi_lookup[i].direct_io = 0;
- list_add_tail(&ioc->scsi_lookup[i].tracker_list,
- &ioc->free_list);
- }
/* hi-priority queue */
INIT_LIST_HEAD(&ioc->hpr_free_list);
@@ -6292,15 +6297,13 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
* _wait_for_commands_to_complete - reset controller
* @ioc: Pointer to MPT_ADAPTER structure
*
- * This function waiting(3s) for all pending commands to complete
+ * This function is waiting 10s for all pending commands to complete
* prior to putting controller in reset.
*/
static void
_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
{
u32 ioc_state;
- unsigned long flags;
- u16 i;
ioc->pending_io_count = 0;
@@ -6309,11 +6312,7 @@ _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
return;
/* pending command count */
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- for (i = 0; i < ioc->scsiio_depth; i++)
- if (ioc->scsi_lookup[i].cb_idx != 0xFF)
- ioc->pending_io_count++;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
if (!ioc->pending_io_count)
return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 60f42ca3954f..789bc421424b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -772,20 +772,17 @@ struct chain_tracker {
/**
* struct scsiio_tracker - scsi mf request tracker
* @smid: system message id
- * @scmd: scsi request pointer
* @cb_idx: callback index
* @direct_io: To indicate whether I/O is direct (WARPDRIVE)
- * @tracker_list: list of free request (ioc->free_list)
+ * @chain_list: list of associated firmware chain tracker
* @msix_io: IO's msix
*/
struct scsiio_tracker {
u16 smid;
- struct scsi_cmnd *scmd;
u8 cb_idx;
u8 direct_io;
struct pcie_sg_list pcie_sg_list;
struct list_head chain_list;
- struct list_head tracker_list;
u16 msix_io;
};
@@ -1248,10 +1245,8 @@ struct MPT3SAS_ADAPTER {
u8 *request;
dma_addr_t request_dma;
u32 request_dma_sz;
- struct scsiio_tracker *scsi_lookup;
- ulong scsi_lookup_pages;
+ struct pcie_sg_list *pcie_sg_lookup;
spinlock_t scsi_lookup_lock;
- struct list_head free_list;
int pending_io_count;
wait_queue_head_t reset_wq;
@@ -1270,6 +1265,7 @@ struct MPT3SAS_ADAPTER {
u16 chains_needed_per_io;
u32 chain_depth;
u16 chain_segment_sz;
+ u16 chains_per_prp_buffer;
/* hi-priority queue */
u16 hi_priority_smid;
@@ -1401,7 +1397,9 @@ void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
/* hi-priority queue */
u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
- struct scsi_cmnd *scmd);
+ struct scsi_cmnd *scmd);
+void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
+ struct scsiio_tracker *st);
u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
@@ -1437,16 +1435,16 @@ int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
+struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid);
u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout);
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout);
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
@@ -1613,14 +1611,9 @@ void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status,
u8 mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
struct _raid_device *raid_device);
-u8
-mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void
-mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io);
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
- u16 smid);
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
/* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp(struct scsi_device *sdev);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 537de1b7e8e5..9cddc3074cd1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -567,11 +567,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
Mpi2SCSITaskManagementRequest_t *tm_request)
{
u8 found = 0;
- u16 i;
+ u16 smid;
u16 handle;
struct scsi_cmnd *scmd;
struct MPT3SAS_DEVICE *priv_data;
- unsigned long flags;
Mpi2SCSITaskManagementReply_t *tm_reply;
u32 sz;
u32 lun;
@@ -587,11 +586,11 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
handle = le16_to_cpu(tm_request->DevHandle);
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- for (i = ioc->scsiio_depth; i && !found; i--) {
- scmd = ioc->scsi_lookup[i - 1].scmd;
- if (scmd == NULL || scmd->device == NULL ||
- scmd->device->hostdata == NULL)
+ for (smid = ioc->scsiio_depth; smid && !found; smid--) {
+ struct scsiio_tracker *st;
+
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
continue;
if (lun != scmd->device->lun)
continue;
@@ -600,10 +599,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
continue;
if (priv_data->sas_target->handle != handle)
continue;
- tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
+ st = scsi_cmd_priv(scmd);
+ tm_request->TaskMID = cpu_to_le16(st->smid);
found = 1;
}
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
if (!found) {
dctlprintk(ioc, pr_info(MPT3SAS_FMT
@@ -724,14 +723,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
goto out;
}
} else {
-
- smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
- if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
- ret = -EAGAIN;
- goto out;
- }
+ /* Use first reserved smid for passthrough ioctls */
+ smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
}
ret = 0;
@@ -1081,8 +1074,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
le16_to_cpu(mpi_request->FunctionDependent1));
mpt3sas_halt_firmware(ioc);
mpt3sas_scsih_issue_locked_tm(ioc,
- le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
- 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
+ le16_to_cpu(mpi_request->FunctionDependent1), 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b258f210120a..74fca184dba9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1445,156 +1445,31 @@ _scsih_is_nvme_device(u32 device_info)
}
/**
- * _scsih_scsi_lookup_get - returns scmd entry
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- */
-static struct scsi_cmnd *
-_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- return ioc->scsi_lookup[smid - 1].scmd;
-}
-
-/**
- * __scsih_scsi_lookup_get_clear - returns scmd entry without
- * holding any lock.
+ * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
*
* Returns the smid stored scmd pointer.
* Then will dereference the stored scmd pointer.
*/
-static inline struct scsi_cmnd *
-__scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc,
- u16 smid)
+struct scsi_cmnd *
+mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
struct scsi_cmnd *scmd = NULL;
+ struct scsiio_tracker *st;
- swap(scmd, ioc->scsi_lookup[smid - 1].scmd);
-
- return scmd;
-}
-
-/**
- * _scsih_scsi_lookup_get_clear - returns scmd entry
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- * Then will derefrence the stored scmd pointer.
- */
-static inline struct scsi_cmnd *
-_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- unsigned long flags;
- struct scsi_cmnd *scmd;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-
- return scmd;
-}
+ if (smid > 0 &&
+ smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
+ u32 unique_tag = smid - 1;
-/**
- * _scsih_scsi_lookup_find_by_scmd - scmd lookup
- * @ioc: per adapter object
- * @smid: system request message index
- * @scmd: pointer to scsi command object
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a scmd pointer in the scsi_lookup array,
- * returning the revelent smid. A returned value of zero means invalid.
- */
-static u16
-_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd
- *scmd)
-{
- u16 smid;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- smid = 0;
- for (i = 0; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd == scmd) {
- smid = ioc->scsi_lookup[i].smid;
- goto out;
- }
- }
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return smid;
-}
-
-/**
- * _scsih_scsi_lookup_find_by_target - search for matching channel:id
- * @ioc: per adapter object
- * @id: target id
- * @channel: channel
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a matching channel:id in the scsi_lookup array,
- * returning 1 if found.
- */
-static u8
-_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
- int channel)
-{
- u8 found;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- found = 0;
- for (i = 0 ; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd &&
- (ioc->scsi_lookup[i].scmd->device->id == id &&
- ioc->scsi_lookup[i].scmd->device->channel == channel)) {
- found = 1;
- goto out;
- }
- }
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return found;
-}
-
-/**
- * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
- * @ioc: per adapter object
- * @id: target id
- * @lun: lun number
- * @channel: channel
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a matching channel:id:lun in the scsi_lookup array,
- * returning 1 if found.
- */
-static u8
-_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
- unsigned int lun, int channel)
-{
- u8 found;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- found = 0;
- for (i = 0 ; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd &&
- (ioc->scsi_lookup[i].scmd->device->id == id &&
- ioc->scsi_lookup[i].scmd->device->channel == channel &&
- ioc->scsi_lookup[i].scmd->device->lun == lun)) {
- found = 1;
- goto out;
+ scmd = scsi_host_find_tag(ioc->shost, unique_tag);
+ if (scmd) {
+ st = scsi_cmd_priv(scmd);
+ if (st->cb_idx == 0xFF)
+ scmd = NULL;
}
}
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return found;
+ return scmd;
}
/**
@@ -2727,32 +2602,30 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/**
* mpt3sas_scsih_issue_tm - main routine for sending tm requests
* @ioc: per adapter struct
- * @device_handle: device handle
- * @channel: the channel assigned by the OS
- * @id: the id assigned by the OS
+ * @handle: device handle
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
+ * @msix_task: MSIX table index supplied by the OS
* @timeout: timeout in seconds
* Context: user
*
* A generic API for sending task management requests to firmware.
*
* The callback index is set inside `ioc->tm_cb_idx`.
+ * The caller is responsible to check for outstanding commands.
*
* Return SUCCESS or FAILED.
*/
int
-mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
- uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
u16 smid = 0;
u32 ioc_state;
- struct scsiio_tracker *scsi_lookup = NULL;
int rc;
- u16 msix_task = 0;
lockdep_assert_held(&ioc->tm_cmds.mutex);
@@ -2791,9 +2664,6 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
return FAILED;
}
- if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
- scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
-
dtmprintk(ioc, pr_info(MPT3SAS_FMT
"sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
ioc->name, handle, type, smid_task));
@@ -2809,11 +2679,6 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
- if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
- (scsi_lookup->msix_io < ioc->reply_queue_count))
- msix_task = scsi_lookup->msix_io;
- else
- msix_task = 0;
ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2847,35 +2712,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
}
}
-
- switch (type) {
- case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
- rc = SUCCESS;
- if (scsi_lookup->scmd == NULL)
- break;
- rc = FAILED;
- break;
-
- case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
- if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
- rc = FAILED;
- else
- rc = SUCCESS;
- break;
- case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
- case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
- if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
- rc = FAILED;
- else
- rc = SUCCESS;
- break;
- case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
- rc = SUCCESS;
- break;
- default:
- rc = FAILED;
- break;
- }
+ rc = SUCCESS;
out:
mpt3sas_scsih_clear_tm_flag(ioc, handle);
@@ -2884,13 +2721,13 @@ out:
}
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
{
int ret;
mutex_lock(&ioc->tm_cmds.mutex);
- ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
- smid_task, timeout);
+ ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
+ msix_task, timeout);
mutex_unlock(&ioc->tm_cmds.mutex);
return ret;
@@ -2989,7 +2826,7 @@ scsih_abort(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
- u16 smid;
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
u16 handle;
int r;
@@ -3007,9 +2844,8 @@ scsih_abort(struct scsi_cmnd *scmd)
goto out;
}
- /* search for the command */
- smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
- if (!smid) {
+ /* check for completed command */
+ if (st == NULL || st->cb_idx == 0xFF) {
scmd->result = DID_RESET << 16;
r = SUCCESS;
goto out;
@@ -3027,10 +2863,12 @@ scsih_abort(struct scsi_cmnd *scmd)
mpt3sas_halt_firmware(ioc);
handle = sas_device_priv_data->sas_target->handle;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ st->smid, st->msix_io, 30);
+ /* Command must be cleared after abort */
+ if (r == SUCCESS && st->cb_idx != 0xFF)
+ r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3086,10 +2924,11 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 30);
+ /* Check for busy commands after reset */
+ if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
+ r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3148,10 +2987,11 @@ scsih_target_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
- 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
+ /* Check for busy commands after reset */
+ if (r == SUCCESS && atomic_read(&starget->target_busy))
+ r = FAILED;
out:
starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -4600,16 +4440,18 @@ static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
{
struct scsi_cmnd *scmd;
+ struct scsiio_tracker *st;
u16 smid;
- u16 count = 0;
+ int count = 0;
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
count++;
_scsih_set_satl_pending(scmd, false);
- mpt3sas_base_free_smid(ioc, smid);
+ st = scsi_cmd_priv(scmd);
+ mpt3sas_base_clear_st(ioc, st);
scsi_dma_unmap(scmd);
if (ioc->pci_error_recovery)
scmd->result = DID_NO_CONNECT << 16;
@@ -4758,19 +4600,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
- /*
- * Bug work around for firmware SATL handling. The loop
- * is based on atomic operations and ensures consistency
- * since we're lockless at this point
- */
- do {
- if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
- scmd->result = SAM_STAT_BUSY;
- scmd->scsi_done(scmd);
- return 0;
- }
- } while (_scsih_set_satl_pending(scmd, true));
-
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -4796,6 +4625,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
sas_device_priv_data->block)
return SCSI_MLQUEUE_DEVICE_BUSY;
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
else if (scmd->sc_data_direction == DMA_TO_DEVICE)
@@ -4823,6 +4665,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (!smid) {
pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -4854,6 +4697,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
pcie_device = sas_target_priv_data->pcie_dev;
if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
mpt3sas_base_free_smid(ioc, smid);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
} else
@@ -4862,7 +4706,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
raid_device = sas_target_priv_data->raid_device;
if (raid_device && raid_device->direct_io_enabled)
mpt3sas_setup_direct_io(ioc, scmd,
- raid_device, mpi_request, smid);
+ raid_device, mpi_request);
if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
@@ -5330,6 +5174,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
Mpi25SCSIIORequest_t *mpi_request;
Mpi2SCSIIOReply_t *mpi_reply;
struct scsi_cmnd *scmd;
+ struct scsiio_tracker *st;
u16 ioc_status;
u32 xfer_cnt;
u8 scsi_state;
@@ -5337,16 +5182,10 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
u32 log_info;
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
- unsigned long flags;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
- if (ioc->broadcast_aen_busy || ioc->pci_error_recovery ||
- ioc->got_task_abort_from_ioctl)
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
- else
- scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
-
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (scmd == NULL)
return 1;
@@ -5371,13 +5210,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* WARPDRIVE: If direct_io is set then it is directIO,
* the failed direct I/O should be redirected to volume
*/
- if (mpt3sas_scsi_direct_io_get(ioc, smid) &&
+ st = scsi_cmd_priv(scmd);
+ if (st->direct_io &&
((ioc_status & MPI2_IOCSTATUS_MASK)
!= MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- ioc->scsi_lookup[smid - 1].scmd = scmd;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- mpt3sas_scsi_direct_io_set(ioc, smid, 0);
+ st->direct_io = 0;
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
@@ -5555,9 +5392,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
out:
scsi_dma_unmap(scmd);
-
+ mpt3sas_base_free_smid(ioc, smid);
scmd->scsi_done(scmd);
- return 1;
+ return 0;
}
/**
@@ -7211,7 +7048,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* Context: user.
*
*/
-static int
+static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
@@ -7221,7 +7058,6 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
u8 link_rate, prev_link_rate;
unsigned long flags;
int rc;
- int requeue_event;
Mpi26EventDataPCIeTopologyChangeList_t *event_data =
(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
struct _pcie_device *pcie_device;
@@ -7231,12 +7067,12 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery)
- return 0;
+ return;
if (fw_event->ignore) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
ioc->name));
- return 0;
+ return;
}
/* handle siblings events */
@@ -7244,10 +7080,10 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (fw_event->ignore) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"ignoring switch event\n", ioc->name));
- return 0;
+ return;
}
if (ioc->remove_host || ioc->pci_error_recovery)
- return 0;
+ return;
reason_code = event_data->PortEntry[i].PortStatus;
handle =
le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
@@ -7316,7 +7152,6 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
break;
}
}
- return requeue_event;
}
/**
@@ -7502,6 +7337,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
{
struct scsi_cmnd *scmd;
struct scsi_device *sdev;
+ struct scsiio_tracker *st;
u16 smid, handle;
u32 lun;
struct MPT3SAS_DEVICE *sas_device_priv_data;
@@ -7543,9 +7379,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
if (ioc->shost_recovery)
goto out;
- scmd = _scsih_scsi_lookup_get(ioc, smid);
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
+ st = scsi_cmd_priv(scmd);
sdev = scmd->device;
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
@@ -7567,8 +7404,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
goto out;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
+ r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
+ st->msix_io, 30);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -7607,10 +7445,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery)
goto out_no_lock;
- r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
- sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid,
- 30);
- if (r == FAILED) {
+ r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
+ st->msix_io, 30);
+ if (r == FAILED || st->cb_idx != 0xFF) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
"scmd(%p)\n", scmd);
@@ -10416,6 +10254,7 @@ static struct scsi_host_template mpt2sas_driver_template = {
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct scsiio_tracker),
};
/* raid transport support for SAS 2.0 HBA devices */
@@ -10454,6 +10293,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct scsiio_tracker),
};
/* raid transport support for SAS 3.0 HBA devices */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index ced7d9f6274c..6bfcee4757e0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -261,33 +261,6 @@ out_error:
}
/**
- * mpt3sas_scsi_direct_io_get - returns direct io flag
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- */
-inline u8
-mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- return ioc->scsi_lookup[smid - 1].direct_io;
-}
-
-/**
- * mpt3sas_scsi_direct_io_set - sets direct io flag
- * @ioc: per adapter object
- * @smid: system request message index
- * @direct_io: Zero or non-zero value to set in the direct_io flag
- *
- * Returns Nothing.
- */
-inline void
-mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
-{
- ioc->scsi_lookup[smid - 1].direct_io = direct_io;
-}
-
-/**
* mpt3sas_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O
* @ioc: per adapter object
* @scmd: pointer to scsi command object
@@ -299,12 +272,12 @@ mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
*/
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
- u16 smid)
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request)
{
sector_t v_lba, p_lba, stripe_off, column, io_size;
u32 stripe_sz, stripe_exp;
u8 num_pds, cmd = scmd->cmnd[0];
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
if (cmd != READ_10 && cmd != WRITE_10 &&
cmd != READ_16 && cmd != WRITE_16)
@@ -340,5 +313,5 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
else
put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]);
- mpt3sas_scsi_direct_io_set(ioc, smid, 1);
+ st->direct_io = 1;
}
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e58be98430b0..201c8de1853d 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5216,7 +5216,7 @@ static unsigned short pmcraid_get_minor(void)
{
int minor;
- minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
+ minor = find_first_zero_bit(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
__set_bit(minor, pmcraid_minor);
return minor;
}
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 7be5823ab036..ee86a0c62dbf 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -724,6 +724,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
cmd->SCp.phase++;
+ /* fall through */
case 3: /* Phase 3 - Ready to accept a command */
w_ctr(ppb, 0x0c);
@@ -733,6 +734,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (!ppa_send_command(cmd))
return 0;
cmd->SCp.phase++;
+ /* fall through */
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
@@ -746,6 +748,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
}
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.phase++;
+ /* fall through */
case 5: /* Phase 5 - Data transfer stage */
w_ctr(ppb, 0x0c);
@@ -758,6 +761,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (retv == 0)
return 1;
cmd->SCp.phase++;
+ /* fall through */
case 6: /* Phase 6 - Read status/message */
cmd->result = DID_OK << 16;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 7c0064500cc5..4809debc6110 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3126,6 +3126,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
if (!qedf->cmd_mgr) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
+ rc = -ENOMEM;
goto err5;
}
@@ -3149,6 +3150,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
create_workqueue(host_buf);
if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
+ rc = -ENOMEM;
goto err7;
}
@@ -3192,6 +3194,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n");
+ rc = -ENOMEM;
goto err7;
}
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index bd302d3cb9af..20a9259304f2 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -198,7 +198,7 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
qedi_cmd = task->dd_data;
- qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
+ qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC);
if (!qedi_cmd->tmf_resp_buf) {
QEDI_ERR(&qedi->dbg_ctx,
"Failed to allocate resp buf, cid=0x%x\n",
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cccc34adc0e0..58596d17f7d9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -998,7 +998,9 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
if (ret)
- continue;
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Dropping CQE 0x%x for cid=0x%x.\n",
+ que->cq_cons_idx, cqe->cqe_common.conn_id);
que->cq_cons_idx++;
if (que->cq_cons_idx == QEDI_CQ_SIZE)
@@ -1268,16 +1270,14 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
}
/* Allocate list of PBL pages */
- qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
- PAGE_SIZE,
- &qedi->bdq_pbl_list_dma,
- GFP_KERNEL);
+ qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, PAGE_SIZE,
+ &qedi->bdq_pbl_list_dma,
+ GFP_KERNEL);
if (!qedi->bdq_pbl_list) {
QEDI_ERR(&qedi->dbg_ctx,
"Could not allocate list of PBL pages.\n");
return -ENOMEM;
}
- memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
/*
* Now populate PBL list with pages that contain pointers to the
@@ -1367,11 +1367,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
(qedi->global_queues[i]->cq_pbl_size +
(QEDI_PAGE_SIZE - 1));
- qedi->global_queues[i]->cq =
- dma_alloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_mem_size,
- &qedi->global_queues[i]->cq_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_mem_size,
+ &qedi->global_queues[i]->cq_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1379,14 +1378,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedi->global_queues[i]->cq, 0,
- qedi->global_queues[i]->cq_mem_size);
-
- qedi->global_queues[i]->cq_pbl =
- dma_alloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_pbl_size,
- &qedi->global_queues[i]->cq_pbl_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_pbl_size,
+ &qedi->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1394,8 +1389,6 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedi->global_queues[i]->cq_pbl, 0,
- qedi->global_queues[i]->cq_pbl_size);
/* Create PBL */
num_pages = qedi->global_queues[i]->cq_mem_size /
@@ -1456,25 +1449,22 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
- ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
- &ep->sq_dma, GFP_KERNEL);
+ ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+ &ep->sq_dma, GFP_KERNEL);
if (!ep->sq) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue.\n");
rval = -ENOMEM;
goto out;
}
- memset(ep->sq, 0, ep->sq_mem_size);
-
- ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
- &ep->sq_pbl_dma, GFP_KERNEL);
+ ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+ &ep->sq_pbl_dma, GFP_KERNEL);
if (!ep->sq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue PBL.\n");
rval = -ENOMEM;
goto out_free_sq;
}
- memset(ep->sq_pbl, 0, ep->sq_pbl_size);
/* Create PBL */
num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 9ce28c4f9812..89a4999fa631 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1843,14 +1843,13 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (qla2x00_reset_active(vha))
goto done;
- stats = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*stats), &stats_dma, GFP_KERNEL);
+ stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
+ &stats_dma, GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x707d,
"Failed to allocate memory for stats.\n");
goto done;
}
- memset(stats, 0, sizeof(*stats));
rval = QLA_FUNCTION_FAILED;
if (IS_FWI2_CAPABLE(ha)) {
@@ -2170,6 +2169,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma);
+ vfree(vha->scan.l);
+
if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index e3ac7078d2aa..e2d5d3ca0f57 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1435,7 +1435,7 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
ha->optrom_state = QLA_SREADING;
}
- ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+ ha->optrom_buffer = vzalloc(ha->optrom_region_size);
if (!ha->optrom_buffer) {
ql_log(ql_log_warn, vha, 0x7059,
"Read: Unable to allocate memory for optrom retrieval "
@@ -1445,7 +1445,6 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
return -ENOMEM;
}
- memset(ha->optrom_buffer, 0, ha->optrom_region_size);
return 0;
}
@@ -2314,16 +2313,14 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
if (!IS_FWI2_CAPABLE(ha))
return -EPERM;
- stats = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*stats), &stats_dma, GFP_KERNEL);
+ stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
+ &stats_dma, GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x70e2,
"Failed to allocate memory for stats.\n");
return -ENOMEM;
}
- memset(stats, 0, sizeof(*stats));
-
rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
if (rval == QLA_SUCCESS) {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 01a9b8971e88..be7d6824581a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -246,8 +246,8 @@
* There is no correspondence between an N-PORT id and an AL_PA. Therefore the
* valid range of an N-PORT id is 0 through 0x7ef.
*/
-#define NPH_LAST_HANDLE 0x7ef
-#define NPH_MGMT_SERVER 0x7fa /* FFFFFA */
+#define NPH_LAST_HANDLE 0x7ee
+#define NPH_MGMT_SERVER 0x7ef /* FFFFEF */
#define NPH_SNS 0x7fc /* FFFFFC */
#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */
#define NPH_F_PORT 0x7fe /* FFFFFE */
@@ -288,6 +288,8 @@ struct name_list_extended {
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
#define FW_DEF_EXCHANGES_CNT 2048
+#define FW_MAX_EXCHANGES_CNT (32 * 1024)
+#define REDUCE_EXCHANGES_CNT (8 * 1024)
struct req_que;
struct qla_tgt_sess;
@@ -315,6 +317,29 @@ struct srb_cmd {
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
+/*
+ * 24 bit port ID type definition.
+ */
+typedef union {
+ uint32_t b24 : 24;
+
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t domain;
+ uint8_t area;
+ uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+ uint8_t rsvd_1;
+ } b;
+} port_id_t;
+#define INVALID_PORT_ID 0xFFFFFF
+
struct els_logo_payload {
uint8_t opcode;
uint8_t rsvd[3];
@@ -338,6 +363,7 @@ struct ct_arg {
u32 rsp_size;
void *req;
void *rsp;
+ port_id_t id;
};
/*
@@ -416,6 +442,7 @@ struct srb_iocb {
struct {
uint32_t cmd_hndl;
__le16 comp_status;
+ __le16 req_que_no;
struct completion comp;
} abt;
struct ct_arg ctarg;
@@ -448,6 +475,10 @@ struct srb_iocb {
uint32_t timeout_sec;
struct list_head entry;
} nvme;
+ struct {
+ u16 cmd;
+ u16 vp_index;
+ } ctrlvp;
} u;
struct timer_list timer;
@@ -476,6 +507,8 @@ struct srb_iocb {
#define SRB_NVME_CMD 19
#define SRB_NVME_LS 20
#define SRB_PRLI_CMD 21
+#define SRB_CTRL_VP 22
+#define SRB_PRLO_CMD 23
enum {
TYPE_SRB,
@@ -499,8 +532,12 @@ typedef struct srb {
const char *name;
int iocbs;
struct qla_qpair *qpair;
+ struct list_head elem;
u32 gen1; /* scratch */
u32 gen2; /* scratch */
+ int rc;
+ int retry_count;
+ struct completion comp;
union {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
@@ -2164,28 +2201,6 @@ struct imm_ntfy_from_isp {
#define REQUEST_ENTRY_SIZE (sizeof(request_t))
-/*
- * 24 bit port ID type definition.
- */
-typedef union {
- uint32_t b24 : 24;
-
- struct {
-#ifdef __BIG_ENDIAN
- uint8_t domain;
- uint8_t area;
- uint8_t al_pa;
-#elif defined(__LITTLE_ENDIAN)
- uint8_t al_pa;
- uint8_t area;
- uint8_t domain;
-#else
-#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
-#endif
- uint8_t rsvd_1;
- } b;
-} port_id_t;
-#define INVALID_PORT_ID 0xFFFFFF
/*
* Switch info gathering structure.
@@ -2257,14 +2272,17 @@ struct ct_sns_desc {
enum discovery_state {
DSC_DELETED,
+ DSC_GNN_ID,
DSC_GID_PN,
DSC_GNL,
DSC_LOGIN_PEND,
DSC_LOGIN_FAILED,
DSC_GPDB,
+ DSC_GFPN_ID,
DSC_GPSC,
DSC_UPD_FCPORT,
DSC_LOGIN_COMPLETE,
+ DSC_ADISC,
DSC_DELETE_PEND,
};
@@ -2290,7 +2308,9 @@ enum fcport_mgt_event {
FCME_GPDB_DONE,
FCME_GPNID_DONE,
FCME_GFFID_DONE,
- FCME_DELETE_DONE,
+ FCME_ADISC_DONE,
+ FCME_GNNID_DONE,
+ FCME_GFPNID_DONE,
};
enum rscn_addr_format {
@@ -2315,6 +2335,7 @@ typedef struct fc_port {
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
+ unsigned int free_pending:1;
unsigned int local:1;
unsigned int logout_on_delete:1;
unsigned int logo_ack_needed:1;
@@ -2323,6 +2344,7 @@ typedef struct fc_port {
unsigned int login_pause:1;
unsigned int login_succ:1;
unsigned int query:1;
+ unsigned int id_changed:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
@@ -2434,6 +2456,7 @@ static const char * const port_state_str[] = {
#define FCF_FCP2_DEVICE BIT_2
#define FCF_ASYNC_SENT BIT_3
#define FCF_CONF_COMP_SUPPORTED BIT_4
+#define FCF_ASYNC_ACTIVE BIT_5
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2470,6 +2493,11 @@ static const char * const port_state_str[] = {
#define GA_NXT_REQ_SIZE (16 + 4)
#define GA_NXT_RSP_SIZE (16 + 620)
+#define GPN_FT_CMD 0x172
+#define GPN_FT_REQ_SIZE (16 + 4)
+#define GNN_FT_CMD 0x173
+#define GNN_FT_REQ_SIZE (16 + 4)
+
#define GID_PT_CMD 0x1A1
#define GID_PT_REQ_SIZE (16 + 4)
@@ -2725,6 +2753,13 @@ struct ct_sns_req {
} port_id;
struct {
+ uint8_t reserved;
+ uint8_t domain;
+ uint8_t area;
+ uint8_t port_type;
+ } gpn_ft;
+
+ struct {
uint8_t port_type;
uint8_t domain;
uint8_t area;
@@ -2837,6 +2872,27 @@ struct ct_sns_gid_pt_data {
uint8_t port_id[3];
};
+/* It's the same for both GPN_FT and GNN_FT */
+struct ct_sns_gpnft_rsp {
+ struct {
+ struct ct_cmd_hdr header;
+ uint16_t response;
+ uint16_t residual;
+ uint8_t fragment_id;
+ uint8_t reason_code;
+ uint8_t explanation_code;
+ uint8_t vendor_unique;
+ };
+ /* Assume the largest number of targets for the union */
+ struct ct_sns_gpn_ft_data {
+ u8 control_byte;
+ u8 port_id[3];
+ u32 reserved;
+ u8 port_name[8];
+ } entries[1];
+};
+
+/* CT command response */
struct ct_sns_rsp {
struct ct_rsp_hdr header;
@@ -2912,6 +2968,33 @@ struct ct_sns_pkt {
} p;
};
+struct ct_sns_gpnft_pkt {
+ union {
+ struct ct_sns_req req;
+ struct ct_sns_gpnft_rsp rsp;
+ } p;
+};
+
+enum scan_flags_t {
+ SF_SCANNING = BIT_0,
+ SF_QUEUED = BIT_1,
+};
+
+struct fab_scan_rp {
+ port_id_t id;
+ u8 port_name[8];
+ u8 node_name[8];
+};
+
+struct fab_scan {
+ struct fab_scan_rp *l;
+ u32 size;
+ u16 scan_retry;
+#define MAX_SCAN_RETRIES 5
+ enum scan_flags_t scan_flags;
+ struct delayed_work scan_work;
+};
+
/*
* SNS command structures -- for 2200 compatibility.
*/
@@ -3117,7 +3200,7 @@ enum qla_work_type {
QLA_EVT_AENFX,
QLA_EVT_GIDPN,
QLA_EVT_GPNID,
- QLA_EVT_GPNID_DONE,
+ QLA_EVT_UNMAP,
QLA_EVT_NEW_SESS,
QLA_EVT_GPDB,
QLA_EVT_PRLI,
@@ -3125,6 +3208,15 @@ enum qla_work_type {
QLA_EVT_UPD_FCPORT,
QLA_EVT_GNL,
QLA_EVT_NACK,
+ QLA_EVT_RELOGIN,
+ QLA_EVT_ASYNC_PRLO,
+ QLA_EVT_ASYNC_PRLO_DONE,
+ QLA_EVT_GPNFT,
+ QLA_EVT_GPNFT_DONE,
+ QLA_EVT_GNNFT_DONE,
+ QLA_EVT_GNNID,
+ QLA_EVT_GFPNID,
+ QLA_EVT_SP_RETRY,
};
@@ -3166,7 +3258,9 @@ struct qla_work_evt {
struct {
port_id_t id;
u8 port_name[8];
+ u8 node_name[8];
void *pla;
+ u8 fc4_type;
} new_sess;
struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */
fc_port_t *fcport;
@@ -3177,6 +3271,9 @@ struct qla_work_evt {
u8 iocb[IOCB_SIZE];
int type;
} nack;
+ struct {
+ u8 fc4_type;
+ } gpnft;
} u;
};
@@ -3433,10 +3530,6 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
-#define QLA_EARLY_LINKUP(_ha) \
- ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
- _ha->flags.fw_started && !_ha->flags.fw_init_done)
-
/*
* Qlogic host adapter specific data structure.
*/
@@ -3494,8 +3587,10 @@ struct qla_hw_data {
uint32_t detected_lr_sfp:1;
uint32_t using_lr_setting:1;
+ uint32_t rida_fmt2:1;
} flags;
+ uint16_t max_exchg;
uint16_t long_range_distance; /* 32G & above */
#define LR_DISTANCE_5K 1
#define LR_DISTANCE_10K 0
@@ -3713,6 +3808,8 @@ struct qla_hw_data {
(IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
(IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
+ IS_QLA83XX(ha) || IS_QLA27XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -3795,7 +3892,7 @@ struct qla_hw_data {
int exchoffld_size;
int exchoffld_count;
- void *swl;
+ void *swl;
/* These are used by mailbox operations. */
uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
@@ -4107,6 +4204,7 @@ typedef struct scsi_qla_host {
#define LOOP_READY 5
#define LOOP_DEAD 6
+ unsigned long relogin_jif;
unsigned long dpc_flags;
#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
#define RESET_ACTIVE 1
@@ -4139,6 +4237,7 @@ typedef struct scsi_qla_host {
#define SET_ZIO_THRESHOLD_NEEDED 28
#define DETECT_SFP_CHANGE 29
#define N2N_LOGIN_NEEDED 30
+#define IOCB_WORK_ACTIVE 31
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4252,6 +4351,8 @@ typedef struct scsi_qla_host {
uint8_t n2n_node_name[WWN_SIZE];
uint8_t n2n_port_name[WWN_SIZE];
uint16_t n2n_id;
+ struct list_head gpnid_list;
+ struct fab_scan scan;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -4511,6 +4612,16 @@ struct sff_8247_a0 {
#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
(IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
+#define SAVE_TOPO(_ha) { \
+ if (_ha->current_topology) \
+ _ha->prev_topology = _ha->current_topology; \
+}
+
+#define N2N_TOPO(ha) \
+ ((ha->prev_topology == ISP_CFG_N && !ha->current_topology) || \
+ ha->current_topology == ISP_CFG_N || \
+ !ha->current_topology)
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index d231e7156134..0b190082aa8d 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -127,21 +127,32 @@ static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
- struct qla_hw_data *ha = vha->hw;
+ uint16_t mb[MAX_IOCB_MB_REG];
+ int rc;
+
+ rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
+ if (rc != QLA_SUCCESS) {
+ seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
+ } else {
+ seq_puts(s, "FW Resource count\n\n");
+ seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
+ seq_printf(s, "current TGT exchg count[%d]\n", mb[2]);
+ seq_printf(s, "original Initiator Exchange count[%d]\n", mb[3]);
+ seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[6]);
+ seq_printf(s, "Original IOCB count[%d]\n", mb[7]);
+ seq_printf(s, "Current IOCB count[%d]\n", mb[10]);
+ seq_printf(s, "MAX VP count[%d]\n", mb[11]);
+ seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
+ seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
+ mb[20]);
+ seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
+ mb[21]);
+ seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
+ mb[22]);
+ seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
+ mb[23]);
- seq_puts(s, "FW Resource count\n\n");
- seq_printf(s, "Original TGT exchg count[%d]\n",
- ha->orig_fw_tgt_xcb_count);
- seq_printf(s, "current TGT exchg count[%d]\n",
- ha->cur_fw_tgt_xcb_count);
- seq_printf(s, "original Initiator Exchange count[%d]\n",
- ha->orig_fw_xcb_count);
- seq_printf(s, "Current Initiator Exchange count[%d]\n",
- ha->cur_fw_xcb_count);
- seq_printf(s, "Original IOCB count[%d]\n", ha->orig_fw_iocb_count);
- seq_printf(s, "Current IOCB count[%d]\n", ha->cur_fw_iocb_count);
- seq_printf(s, "MAX VP count[%d]\n", ha->max_npiv_vports);
- seq_printf(s, "MAX FCF count[%d]\n", ha->fw_max_fcf_count);
+ }
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d5cef0727e72..5d8688e5bc7c 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1392,7 +1392,7 @@ struct vp_rpt_id_entry_24xx {
uint8_t port_name[8];
uint8_t node_name[8];
- uint32_t remote_nport_id;
+ uint8_t remote_nport_id[4];
uint32_t reserved_5;
} f2;
} u;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index fa115c7433e5..e9295398050c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -66,6 +66,7 @@ extern void qla84xx_put_chip(struct scsi_qla_host *);
extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
+extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
@@ -104,11 +105,18 @@ int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
struct imm_ntfy_from_isp *, int);
-int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *,
- void *);
+int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
+ void *, u8);
int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
int qla24xx_detect_sfp(scsi_qla_host_t *vha);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
+ fc_port_t *, uint16_t *);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -148,6 +156,7 @@ extern int ql2xuctrlirq;
extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
+extern int qla2xuseresexchforels;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -203,6 +212,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_abort_cmd(srb_t *);
+int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
/*
* Global Functions in qla_mid.c source file.
@@ -494,6 +504,7 @@ int qla24xx_get_port_login_templ(scsi_qla_host_t *, dma_addr_t,
extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
+int qla24xx_res_count_wait(struct scsi_qla_host *, uint16_t *, int);
/*
* Global Function Prototypes in qla_isr.c source file.
@@ -639,14 +650,26 @@ extern void qla2x00_free_fcport(fc_port_t *);
extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
-void qla24xx_async_gpnid_done(scsi_qla_host_t *, srb_t*);
void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
+int qla24xx_async_gpnft(scsi_qla_host_t *, u8);
+void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
+void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
+int qla24xx_async_gnnid(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gnnid_event(scsi_qla_host_t *, struct event_arg *);
+int qla24xx_post_gnnid_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
+void qla24xx_sp_unmap(scsi_qla_host_t *, srb_t *);
+void qla_scan_work_fn(struct work_struct *);
+
/*
* Global Function Prototypes in qla_attr.c source file.
*/
@@ -864,8 +887,7 @@ void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *,
struct fc_port *, enum qlt_plogi_link_t);
void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *);
-extern void qlt_schedule_sess_for_deletion(struct fc_port *, bool);
-extern void qlt_schedule_sess_for_deletion_lock(struct fc_port *);
+extern void qlt_schedule_sess_for_deletion(struct fc_port *);
extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
void qla24xx_delete_sess_fn(struct work_struct *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index bc3db6abc9a0..5bf9a59432f6 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -14,6 +14,10 @@ static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
static int qla2x00_sns_rft_id(scsi_qla_host_t *);
static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
+static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
+static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
+static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
+static int qla_async_rsnn_nn(scsi_qla_host_t *);
/**
* qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
@@ -175,6 +179,9 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
}
break;
+ case CS_TIMEOUT:
+ rval = QLA_FUNCTION_TIMEOUT;
+ /* fall through */
default:
ql_dbg(ql_dbg_disc, vha, 0x2033,
"%s failed, completion status (%x) on port_id: "
@@ -508,6 +515,72 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
return (rval);
}
+static void qla2x00_async_sns_sp_done(void *s, int rc)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct ct_sns_pkt *ct_sns;
+ struct qla_work_evt *e;
+
+ sp->rc = rc;
+ if (rc == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s exiting normally.\n",
+ sp->name);
+ } else if (rc == QLA_FUNCTION_TIMEOUT) {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s timeout\n", sp->name);
+ } else {
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ sp->retry_count++;
+ if (sp->retry_count > 3)
+ goto err;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s fail rc %x. Retry count %d\n",
+ sp->name, rc, sp->retry_count);
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
+ if (!e)
+ goto err2;
+
+ del_timer(&sp->u.iocb_cmd.timer);
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+ return;
+ }
+
+err:
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
+err2:
+ if (!e) {
+ /* please ignore kernel warning. otherwise, we have mem leak. */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return;
+ }
+
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+}
+
/**
* qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
* @ha: HA context
@@ -517,57 +590,87 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
int
qla2x00_rft_id(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rft_id(vha);
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RFT_ID_REQ_SIZE;
- arg.rsp_size = RFT_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rftid(vha, &vha->d_id);
+}
- /* Issue RFT_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ if (!vha->flags.online)
+ goto done;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rft_id";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD,
- RFT_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, FC-4 types */
ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
-
ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
if (vha->flags.nvme_enabled)
ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+
+ sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2043,
"RFT_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2044,
- "RFT_ID exiting normally.\n");
+ goto done_free_sp;
}
-
- return (rval);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x.\n",
+ sp->name, sp->handle, d_id->b24);
+ return rval;
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -579,12 +682,7 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
int
qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2046,
@@ -592,47 +690,81 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
return (QLA_SUCCESS);
}
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RFF_ID_REQ_SIZE;
- arg.rsp_size = RFF_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
+ FC4_TYPE_FCP_SCSI);
+}
- /* Issue RFF_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
+ u8 fc4feature, u8 fc4type)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD,
- RFF_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
- /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
- ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain;
- ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
- ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rff_id";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
- qlt_rff_id(vha, ct_req);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
- ct_req->req.rff_id.fc4_type = type; /* SCSI - FCP */
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
+ ct_req->req.rff_id.port_id[0] = d_id->b.domain;
+ ct_req->req.rff_id.port_id[1] = d_id->b.area;
+ ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
+ ct_req->req.rff_id.fc4_feature = fc4feature;
+ ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
+
+ sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2047,
"RFF_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2048,
- "RFF_ID exiting normally.\n");
+ goto done_free_sp;
}
- return (rval);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
+ sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -644,54 +776,85 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
int
qla2x00_rnn_id(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rnn_id(vha);
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RNN_ID_REQ_SIZE;
- arg.rsp_size = RNN_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
+}
- /* Issue RNN_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
+ u8 *node_name)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rnid";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, node_name */
ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
-
memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x204d,
"RNN_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x204e,
- "RNN_ID exiting normally.\n");
+ goto done_free_sp;
}
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x\n",
+ sp->name, sp->handle, d_id->b24);
- return (rval);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
void
@@ -718,12 +881,7 @@ qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
int
qla2x00_rsnn_nn(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2050,
@@ -731,22 +889,49 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
return (QLA_SUCCESS);
}
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = 0;
- arg.rsp_size = RSNN_NN_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rsnn_nn(vha);
+}
- /* Issue RSNN_NN */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rsnn_nn";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD,
- RSNN_NN_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
/* Prepare CT arguments -- node_name, symbolic node_name, size */
memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
@@ -754,32 +939,33 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
/* Prepare the Symbolic Node Name */
qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
sizeof(ct_req->req.rsnn_nn.sym_node_name));
-
- /* Calculate SNN length */
ct_req->req.rsnn_nn.name_len =
(uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
- /* Update MS IOCB request */
- ms_pkt->req_bytecount =
- cpu_to_le32(24 + 1 + ct_req->req.rsnn_nn.name_len);
- ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2051,
- "RSNN_NN issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2052,
- "RSNN_NN exiting normally.\n");
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "RFT_ID issue IOCB failed (%d).\n", rval);
+ goto done_free_sp;
}
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x.\n",
+ sp->name, sp->handle);
- return (rval);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -2790,15 +2976,20 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
fc_port_t *fcport = ea->fcport;
ql_dbg(ql_dbg_disc, vha, 0x201d,
- "%s %8phC login state %d\n",
- __func__, fcport->port_name, fcport->fw_login_state);
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
if (ea->sp->gen2 != fcport->login_gen) {
/* PLOGI/PRLI/LOGO came in while cmd was out.*/
ql_dbg(ql_dbg_disc, vha, 0x201e,
- "%s %8phC generation changed rscn %d|%d login %d|%d \n",
+ "%s %8phC generation changed rscn %d|%d n",
__func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen);
+ fcport->rscn_gen);
return;
}
@@ -2811,7 +3002,21 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
/* cable plugged into the same place */
switch (vha->host->active_mode) {
case MODE_TARGET:
- /* NOOP. let the other guy login to us.*/
+ if (fcport->fw_login_state ==
+ DSC_LS_PRLI_COMP) {
+ u16 data[2];
+ /*
+ * Late RSCN was delivered.
+ * Remote port already login'ed.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0x201f,
+ "%s %d %8phC post adisc\n",
+ __func__, __LINE__,
+ fcport->port_name);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(
+ vha, fcport, data);
+ }
break;
case MODE_INITIATOR:
case MODE_DUAL:
@@ -2820,24 +3025,29 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
"%s %d %8phC post %s\n", __func__,
__LINE__, fcport->port_name,
(atomic_read(&fcport->state) ==
- FCS_ONLINE) ? "gpdb" : "gnl");
+ FCS_ONLINE) ? "adisc" : "gnl");
if (atomic_read(&fcport->state) ==
- FCS_ONLINE)
- qla24xx_post_gpdb_work(vha,
- fcport, PDO_FORCE_ADISC);
- else
+ FCS_ONLINE) {
+ u16 data[2];
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(
+ vha, fcport, data);
+ } else {
qla24xx_post_gnl_work(vha,
fcport);
+ }
break;
}
} else { /* fcport->d_id.b24 != ea->id.b24 */
fcport->d_id.b24 = ea->id.b24;
- if (fcport->deleted == QLA_SESS_DELETED) {
+ fcport->id_changed = 1;
+ if (fcport->deleted != QLA_SESS_DELETED) {
ql_dbg(ql_dbg_disc, vha, 0x2021,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
}
} else { /* ea->sp->gen1 != fcport->rscn_gen */
@@ -2854,7 +3064,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2042,
"%s %d %8phC post del sess\n", __func__,
__LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x2045,
"%s %d %8phC login\n", __func__, __LINE__,
@@ -2878,7 +3088,7 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
struct event_arg ea;
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
@@ -2889,9 +3099,22 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
ea.rc = res;
ea.event = FCME_GIDPN_DONE;
- ql_dbg(ql_dbg_disc, vha, 0x204f,
- "Async done-%s res %x, WWPN %8phC ID %3phC \n",
- sp->name, res, fcport->port_name, id);
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s WWPN %8phC timed out.\n",
+ sp->name, fcport->port_name);
+ qla24xx_post_gidpn_work(sp->vha, fcport);
+ sp->free(sp);
+ return;
+ } else if (res) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s fail res %x, WWPN %8phC\n",
+ sp->name, res, fcport->port_name);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s good WWPN %8phC ID %3phC\n",
+ sp->name, fcport->port_name, id);
+ }
qla2x00_fcport_event_handler(vha, &ea);
@@ -2904,16 +3127,16 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GID_PN;
fcport->scan_state = QLA_FCPORT_SCAN;
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gidpn";
sp->gen1 = fcport->rscn_gen;
@@ -2954,8 +3177,8 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -2974,6 +3197,7 @@ int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -2986,9 +3210,39 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
+void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d8,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
+ ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
+}
+
static void qla24xx_async_gpsc_sp_done(void *s, int res)
{
struct srb *sp = s;
@@ -3004,7 +3258,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC \n",
sp->name, res, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (res == (DID_ERROR << 16)) {
/* entry status error */
@@ -3055,6 +3309,7 @@ done:
ea.event = FCME_GPSC_DONE;
ea.rc = res;
ea.fcport = fcport;
+ ea.sp = sp;
qla2x00_fcport_event_handler(vha, &ea);
sp->free(sp);
@@ -3066,14 +3321,14 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gpsc";
sp->gen1 = fcport->rscn_gen;
@@ -3113,8 +3368,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -3133,7 +3388,7 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
return qla2x00_post_work(vha, e);
}
-void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
+void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
{
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
@@ -3155,43 +3410,137 @@ void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
- fc_port_t *fcport;
- unsigned long flags;
+ fc_port_t *fcport, *conflict, *t;
+ u16 data[2];
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d port_id: %06x\n",
+ __func__, __LINE__, ea->id.b24);
- if (fcport) {
- /* cable moved. just plugged in */
- fcport->rscn_gen++;
- fcport->d_id = ea->id;
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->flags |= FCF_FABRIC_DEVICE;
-
- switch (fcport->disc_state) {
- case DSC_DELETED:
- ql_dbg(ql_dbg_disc, vha, 0x210d,
- "%s %d %8phC login\n", __func__, __LINE__,
- fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
- break;
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0x2064,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
- break;
+ if (ea->rc) {
+ /* cable is disconnected */
+ list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
+ if (fcport->d_id.b24 == ea->id.b24) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->disc_state);
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
+ }
}
} else {
- /* create new fcport */
- ql_dbg(ql_dbg_disc, vha, 0x2065,
- "%s %d %8phC post new sess\n",
- __func__, __LINE__, ea->port_name);
+ /* cable is connected */
+ fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
+ if (fcport) {
+ list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
+ list) {
+ if ((conflict->d_id.b24 == ea->id.b24) &&
+ (fcport != conflict)) {
+ /* 2 fcports with conflict Nport ID or
+ * an existing fcport is having nport ID
+ * conflict with new fcport.
+ */
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ conflict->port_name,
+ conflict->disc_state);
+ conflict->scan_state = QLA_FCPORT_SCAN;
+ switch (conflict->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict);
+ break;
+ }
+ }
+ }
+
+ fcport->rscn_gen++;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->flags |= FCF_FABRIC_DEVICE;
+ switch (fcport->disc_state) {
+ case DSC_LOGIN_COMPLETE:
+ /* recheck session is still intact. */
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC revalidate session with ADISC\n",
+ __func__, __LINE__, fcport->port_name);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport,
+ data);
+ break;
+ case DSC_DELETED:
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC login\n", __func__, __LINE__,
+ fcport->port_name);
+ fcport->d_id = ea->id;
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case DSC_DELETE_PEND:
+ fcport->d_id = ea->id;
+ break;
+ default:
+ fcport->d_id = ea->id;
+ break;
+ }
+ } else {
+ list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
+ list) {
+ if (conflict->d_id.b24 == ea->id.b24) {
+ /* 2 fcports with conflict Nport ID or
+ * an existing fcport is having nport ID
+ * conflict with new fcport.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ conflict->port_name,
+ conflict->disc_state);
+
+ conflict->scan_state = QLA_FCPORT_SCAN;
+ switch (conflict->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict);
+ break;
+ }
+ }
+ }
- qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL);
+ /* create new fcport */
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, ea->port_name);
+ qla24xx_post_newsess_work(vha, &ea->id,
+ ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
+ }
}
}
@@ -3205,11 +3554,18 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
struct event_arg ea;
struct qla_work_evt *e;
+ unsigned long flags;
- ql_dbg(ql_dbg_disc, vha, 0x2066,
- "Async done-%s res %x ID %3phC. %8phC\n",
- sp->name, res, ct_req->req.port_id.port_id,
- ct_rsp->rsp.gpn_id.port_name);
+ if (res)
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
+ sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
+ else
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
+ sp->name, sp->gen1, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
memset(&ea, 0, sizeof(ea));
memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
@@ -3220,9 +3576,26 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
ea.rc = res;
ea.event = FCME_GPNID_DONE;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_del(&sp->elem);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (res) {
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ qla24xx_post_gpnid_work(sp->vha, &ea.id);
+ sp->free(sp);
+ return;
+ }
+ } else if (sp->gen1) {
+ /* There was another RSCN for this Nport ID */
+ qla24xx_post_gpnid_work(sp->vha, &ea.id);
+ sp->free(sp);
+ return;
+ }
+
qla2x00_fcport_event_handler(vha, &ea);
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE);
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
if (!e) {
/* please ignore kernel warning. otherwise, we have mem leak. */
if (sp->u.iocb_cmd.u.ctarg.req) {
@@ -3253,8 +3626,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
- srb_t *sp;
+ srb_t *sp, *tsp;
struct ct_sns_pkt *ct_sns;
+ unsigned long flags;
if (!vha->flags.online)
goto done;
@@ -3265,8 +3639,22 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gpnid";
+ sp->u.iocb_cmd.u.ctarg.id = *id;
+ sp->gen1 = 0;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_for_each_entry(tsp, &vha->gpnid_list, elem) {
+ if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
+ tsp->gen1++;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ sp->free(sp);
+ goto done;
+ }
+ }
+ list_add_tail(&sp->elem, &vha->gpnid_list);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
@@ -3393,7 +3781,7 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
@@ -3441,3 +3829,720 @@ done_free_sp:
fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
+
+/* GPN_FT + GNN_FT*/
+static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
+{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
+ unsigned long flags;
+ u64 twwn;
+ int rc = 0;
+
+ if (!ha->num_vhosts)
+ return 0;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ twwn = wwn_to_u64(vp->port_name);
+ if (wwn == twwn) {
+ rc = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return rc;
+}
+
+void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+ fc_port_t *fcport;
+ u32 i, rc;
+ bool found;
+ u8 fc4type = sp->gen2;
+ struct fab_scan_rp *rp;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s enter\n", __func__);
+
+ if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s scan stop due to chip reset %x/%x\n",
+ sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
+ goto out;
+ }
+
+ rc = sp->rc;
+ if (rc) {
+ vha->scan.scan_retry++;
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Fabric scan failed on all retries.\n");
+ }
+ goto out;
+ }
+ vha->scan.scan_retry = 0;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->scan_state = QLA_FCPORT_SCAN;
+
+ for (i = 0; i < vha->hw->max_fibre_devices; i++) {
+ u64 wwn;
+
+ rp = &vha->scan.l[i];
+ found = false;
+
+ wwn = wwn_to_u64(rp->port_name);
+ if (wwn == 0)
+ continue;
+
+ if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
+ continue;
+
+ /* Bypass reserved domain fields. */
+ if ((rp->id.b.domain & 0xf0) == 0xf0)
+ continue;
+
+ /* Bypass virtual ports of the same host. */
+ if (qla2x00_is_a_vp(vha, wwn))
+ continue;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
+ continue;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->d_id.b24 = rp->id.b24;
+ found = true;
+ /*
+ * If device was not a fabric device before.
+ */
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
+ qla2x00_clear_loop_id(fcport);
+ fcport->flags |= FCF_FABRIC_DEVICE;
+ }
+ break;
+ }
+
+ if (!found) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, rp->port_name);
+ qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
+ rp->node_name, NULL, fc4type);
+ }
+ }
+
+ /*
+ * Logout all previous fabric dev marked lost, except FCP2 devices.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
+ continue;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND) {
+ if ((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ continue;
+ }
+ }
+ } else
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+
+out:
+ qla24xx_sp_unmap(vha, sp);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+}
+
+static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_work_evt *e;
+ struct ct_sns_req *ct_req =
+ (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
+ struct ct_sns_gpnft_rsp *ct_rsp =
+ (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
+ struct ct_sns_gpn_ft_data *d;
+ struct fab_scan_rp *rp;
+ int i, j, k;
+ u16 cmd = be16_to_cpu(ct_req->command);
+
+ /* gen2 field is holding the fc4type */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x FC4Type %x\n",
+ sp->name, res, sp->gen2);
+
+ if (res) {
+ unsigned long flags;
+
+ sp->free(sp);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s rescan failed on all retries\n",
+ sp->name);
+ }
+ return;
+ }
+
+ if (!res) {
+ port_id_t id;
+ u64 wwn;
+
+ j = 0;
+ for (i = 0; i < vha->hw->max_fibre_devices; i++) {
+ d = &ct_rsp->entries[i];
+
+ id.b.rsvd_1 = 0;
+ id.b.domain = d->port_id[0];
+ id.b.area = d->port_id[1];
+ id.b.al_pa = d->port_id[2];
+ wwn = wwn_to_u64(d->port_name);
+
+ if (id.b24 == 0 || wwn == 0)
+ continue;
+
+ if (cmd == GPN_FT_CMD) {
+ rp = &vha->scan.l[j];
+ rp->id = id;
+ memcpy(rp->port_name, d->port_name, 8);
+ j++;
+ } else {/* GNN_FT_CMD */
+ for (k = 0; k < vha->hw->max_fibre_devices;
+ k++) {
+ rp = &vha->scan.l[k];
+ if (id.b24 == rp->id.b24) {
+ memcpy(rp->node_name,
+ d->port_name, 8);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (cmd == GPN_FT_CMD)
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
+ else
+ e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
+ if (!e) {
+ /* please ignore kernel warning. Otherwise, we have mem leak. */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s unable to alloc work element\n",
+ sp->name);
+ sp->free(sp);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ sp->rc = res;
+ e->u.iosb.sp = sp;
+
+ qla2x00_post_work(vha, e);
+}
+
+/*
+ * Get WWNN list for fc4_type
+ *
+ * It is assumed the same SRB is re-used from GPNFT to avoid
+ * mem free & re-alloc
+ */
+static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
+ u8 fc4_type)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_pkt *ct_sns;
+
+ if (!vha->flags.online) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: req %p rsp %p are not setup\n",
+ __func__, sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.rsp);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ WARN_ON(1);
+ goto done_free_sp;
+ }
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gnnft";
+ sp->gen1 = vha->hw->base_qpair->chip_reset;
+ sp->gen2 = fc4_type;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
+ memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
+ sp->u.iocb_cmd.u.ctarg.rsp_size);
+
+ /* GPN_FT req */
+ ct_req->req.gpn_ft.port_type = fc4_type;
+
+ sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gpnft_gnnft_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+ sp->handle, ct_req->req.gpn_ft.port_type);
+ return rval;
+
+done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return rval;
+} /* GNNFT */
+
+void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s enter\n", __func__);
+ del_timer(&sp->u.iocb_cmd.timer);
+ qla24xx_async_gnnft(vha, sp, sp->gen2);
+}
+
+/* Get WWPN list for certain fc4_type */
+int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+ u32 rspsz;
+ unsigned long flags;
+
+ if (!vha->flags.online)
+ return rval;
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (vha->scan.scan_flags & SF_SCANNING) {
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
+ return rval;
+ }
+ vha->scan.scan_flags |= SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ return rval;
+ }
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gpnft";
+ sp->gen1 = vha->hw->base_qpair->chip_reset;
+ sp->gen2 = fc4_type;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ rspsz = sizeof(struct ct_sns_gpnft_rsp) +
+ ((vha->hw->max_fibre_devices - 1) *
+ sizeof(struct ct_sns_gpn_ft_data));
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(&vha->hw->pdev->dev,
+ rspsz, &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ memset(vha->scan.l, 0, vha->scan.size);
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
+
+ /* GPN_FT req */
+ ct_req->req.gpn_ft.port_type = fc4_type;
+
+ sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gpnft_gnnft_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+ sp->handle, ct_req->req.gpn_ft.port_type);
+ return rval;
+
+done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return rval;
+}
+
+void qla_scan_work_fn(struct work_struct *work)
+{
+ struct fab_scan *s = container_of(to_delayed_work(work),
+ struct fab_scan, scan_work);
+ struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
+ scan);
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: schedule loop resync\n", __func__);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_QUEUED;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+}
+
+/* GNN_ID */
+void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ qla24xx_post_gnl_work(vha, ea->fcport);
+}
+
+static void qla2x00_async_gnnid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
+ struct event_arg ea;
+ u64 wwnn;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ wwnn = wwn_to_u64(node_name);
+ if (wwnn)
+ memcpy(fcport->node_name, node_name, WWN_SIZE);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GNNID_DONE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s res %x, WWPN %8phC %8phC\n",
+ sp->name, res, fcport->port_name, fcport->node_name);
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
+
+ fcport->disc_state = DSC_GNN_ID;
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gnnid";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
+ GNN_ID_RSP_SIZE);
+
+ /* GNN_ID req */
+ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+
+
+ /* req & rsp use the same buffer */
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gnnid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b24);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ return rval;
+}
+
+int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+ int ls;
+
+ ls = atomic_read(&vha->loop_state);
+ if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+ test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+/* GPFN_ID */
+void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+ qla24xx_post_gpsc_work(vha, fcport);
+}
+
+static void qla2x00_async_gfpnid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
+ struct event_arg ea;
+ u64 wwn;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ wwn = wwn_to_u64(fpn);
+ if (wwn)
+ memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GFPNID_DONE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s res %x, WWPN %8phC %8phC\n",
+ sp->name, res, fcport->port_name, fcport->fabric_port_name);
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
+
+ fcport->disc_state = DSC_GFPN_ID;
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gfpnid";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
+ GFPN_ID_RSP_SIZE);
+
+ /* GFPN_ID req */
+ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+
+
+ /* req & rsp use the same buffer */
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gfpnid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b24);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ return rval;
+}
+
+int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+ int ls;
+
+ ls = atomic_read(&vha->loop_state);
+ if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+ test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1bafa043f9f1..aececf664654 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -41,6 +41,7 @@ static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
struct event_arg *);
static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
struct event_arg *);
+static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
/* SRB Extensions ---------------------------------------------------------- */
@@ -58,7 +59,8 @@ qla2x00_sp_timeout(struct timer_list *t)
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
- sp->free(sp);
+ if (sp->type != SRB_ELS_DCMD)
+ sp->free(sp);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
@@ -102,14 +104,21 @@ qla2x00_async_iocb_timeout(void *data)
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct event_arg ea;
- ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
- "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
- sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
+ if (fcport) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
+ "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
+ sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ } else {
+ pr_info("Async-%s timeout - hdl=%x.\n",
+ sp->name, sp->handle);
+ }
switch (sp->type) {
case SRB_LOGIN_CMD:
+ if (!fcport)
+ break;
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
@@ -123,6 +132,8 @@ qla2x00_async_iocb_timeout(void *data)
qla24xx_handle_plogi_done_event(fcport->vha, &ea);
break;
case SRB_LOGOUT_CMD:
+ if (!fcport)
+ break;
qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
break;
case SRB_CT_PTHRU_CMD:
@@ -130,6 +141,7 @@ qla2x00_async_iocb_timeout(void *data)
case SRB_NACK_PLOGI:
case SRB_NACK_PRLI:
case SRB_NACK_LOGO:
+ case SRB_CTRL_VP:
sp->done(sp, QLA_FUNCTION_TIMEOUT);
break;
}
@@ -146,7 +158,8 @@ qla2x00_async_login_sp_done(void *ptr, int res)
ql_dbg(ql_dbg_disc, vha, 0x20dd,
"%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
if (!test_bit(UNLOADING, &vha->dpc_flags)) {
memset(&ea, 0, sizeof(ea));
ea.event = FCME_PLOGI_DONE;
@@ -173,11 +186,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (!vha->flags.online)
goto done;
- if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
- (fcport->fw_login_state == DSC_LS_PRLI_PEND))
- goto done;
-
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -185,8 +193,11 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->flags |= FCF_ASYNC_SENT;
fcport->logout_completed = 0;
+ fcport->disc_state = DSC_LOGIN_PEND;
sp->type = SRB_LOGIN_CMD;
sp->name = "login";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
@@ -201,7 +212,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- fcport->flags &= ~FCF_ASYNC_SENT;
fcport->flags |= FCF_LOGIN_NEEDED;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
goto done_free_sp;
@@ -216,8 +226,8 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -227,7 +237,7 @@ qla2x00_async_logout_sp_done(void *ptr, int res)
srb_t *sp = ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
lio->u.logio.data);
@@ -239,9 +249,11 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
struct srb_iocb *lio;
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- rval = QLA_FUNCTION_FAILED;
fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
@@ -272,16 +284,126 @@ done:
return rval;
}
+void
+qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ /* Don't re-login in target mode */
+ if (!fcport->tgt_session)
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qlt_logo_completion_handler(fcport, data[0]);
+}
+
+static void
+qla2x00_async_prlo_sp_done(void *s, int res)
+{
+ srb_t *sp = (srb_t *)s;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = sp->vha;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp);
+}
+
+int
+qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ srb_t *sp;
+ struct srb_iocb *lio;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_PRLO_CMD;
+ sp->name = "prlo";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_prlo_sp_done;
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2070,
+ "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
+}
+
+static
+void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
+
+ if (ea->data[0] != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "%s %8phC: adisc fail: post delete\n",
+ __func__, ea->fcport->port_name);
+ qlt_schedule_sess_for_deletion(ea->fcport);
+ return;
+ }
+
+ if (ea->fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != ea->fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, ea->fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_gidpn_work(vha, ea->fcport);
+ return;
+ }
+
+ __qla24xx_handle_gpdb_event(vha, ea);
+}
+
static void
qla2x00_async_adisc_sp_done(void *ptr, int res)
{
srb_t *sp = ptr;
struct scsi_qla_host *vha = sp->vha;
+ struct event_arg ea;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- if (!test_bit(UNLOADING, &vha->dpc_flags))
- qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
- lio->u.logio.data);
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s res %x %8phC\n",
+ sp->name, res, sp->fcport->port_name);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_ADISC_DONE;
+ ea.rc = res;
+ ea.data[0] = lio->u.logio.data[0];
+ ea.data[1] = lio->u.logio.data[1];
+ ea.iop[0] = lio->u.logio.iop[0];
+ ea.iop[1] = lio->u.logio.iop[1];
+ ea.fcport = sp->fcport;
+ ea.sp = sp;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
sp->free(sp);
}
@@ -313,15 +435,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x206f,
- "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
return rval;
done_free_sp:
sp->free(sp);
done:
fcport->flags &= ~FCF_ASYNC_SENT;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
return rval;
}
@@ -333,9 +455,19 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
u16 i, n, found = 0, loop_id;
port_id_t id;
u64 wwn;
- u8 opt = 0, current_login_state;
+ u16 data[2];
+ u8 current_login_state;
fcport = ea->fcport;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc,
+ fcport->login_gen, fcport->last_login_gen,
+ fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
if (ea->rc) { /* rval */
if (fcport->login_retry == 0) {
@@ -356,9 +488,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
return;
} else if (fcport->last_login_gen != fcport->login_gen) {
ql_dbg(ql_dbg_disc, vha, 0x20e0,
- "%s %8phC login gen changed login %d|%d\n",
- __func__, fcport->port_name,
- fcport->last_login_gen, fcport->login_gen);
+ "%s %8phC login gen changed\n",
+ __func__, fcport->port_name);
return;
}
@@ -400,7 +531,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e3,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport, 1);
+ qlt_schedule_sess_for_deletion(fcport);
return;
}
@@ -430,8 +561,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e4,
"%s %d %8phC post gpdb\n",
__func__, __LINE__, fcport->port_name);
- opt = PDO_FORCE_ADISC;
- qla24xx_post_gpdb_work(vha, fcport, opt);
+
+ if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
break;
case DSC_LS_PORT_UNAVAIL:
default:
@@ -449,36 +586,29 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
if (!found) {
/* fw has no record of this port */
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- qla2x00_find_new_loop_id(vha, fcport);
- fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
- } else {
- for (i = 0; i < n; i++) {
- e = &vha->gnl.l[i];
- id.b.domain = e->port_id[0];
- id.b.area = e->port_id[1];
- id.b.al_pa = e->port_id[2];
- id.b.rsvd_1 = 0;
- loop_id = le16_to_cpu(e->nport_handle);
-
- if (fcport->d_id.b24 == id.b24) {
- conflict_fcport =
- qla2x00_find_fcport_by_wwpn(vha,
- e->port_name, 0);
-
- ql_dbg(ql_dbg_disc, vha, 0x20e6,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict_fcport->port_name);
- qlt_schedule_sess_for_deletion
- (conflict_fcport, 1);
- }
-
- if (fcport->loop_id == loop_id) {
- /* FW already picked this loop id for another fcport */
- qla2x00_find_new_loop_id(vha, fcport);
- }
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ id.b.domain = e->port_id[0];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[2];
+ id.b.rsvd_1 = 0;
+ loop_id = le16_to_cpu(e->nport_handle);
+
+ if (fcport->d_id.b24 == id.b24) {
+ conflict_fcport =
+ qla2x00_find_fcport_by_wwpn(vha,
+ e->port_name, 0);
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict_fcport->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict_fcport);
}
+
+ /* FW already picked this loop id for another fcport */
+ if (fcport->loop_id == loop_id)
+ fcport->loop_id = FC_NO_LOOP_ID;
}
qla24xx_fcport_handle_login(vha, fcport);
}
@@ -496,6 +626,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
struct get_name_list_extended *e;
u64 wwn;
struct list_head h;
+ bool found = false;
ql_dbg(ql_dbg_disc, vha, 0x20e7,
"Async done-%s res %x mb[1]=%x mb[2]=%x \n",
@@ -539,12 +670,44 @@ qla24xx_async_gnl_sp_done(void *s, int res)
list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
list_del_init(&fcport->gnl_entry);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
}
+ /* create new fcport if fw has knowledge of new sessions */
+ for (i = 0; i < n; i++) {
+ port_id_t id;
+ u64 wwnn;
+
+ e = &vha->gnl.l[i];
+ wwn = wwn_to_u64(e->port_name);
+
+ found = false;
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (!memcmp((u8 *)&wwn, fcport->port_name,
+ WWN_SIZE)) {
+ found = true;
+ break;
+ }
+ }
+
+ id.b.domain = e->port_id[2];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[0];
+ id.b.rsvd_1 = 0;
+
+ if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "%s %d %8phC %06x post new sess\n",
+ __func__, __LINE__, (u8 *)&wwn, id.b24);
+ wwnn = wwn_to_u64(e->node_name);
+ qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
+ (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
+ }
+ }
+
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp->free(sp);
@@ -558,14 +721,13 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
unsigned long flags;
u16 *mb;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GNL;
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
@@ -573,8 +735,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
if (vha->gnl.sent) {
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- rval = QLA_SUCCESS;
- goto done;
+ return QLA_SUCCESS;
}
vha->gnl.sent = 1;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
@@ -582,6 +743,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gnlist";
sp->gen1 = fcport->rscn_gen;
@@ -616,8 +779,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -630,6 +793,7 @@ int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -639,31 +803,18 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
- struct port_database_24xx *pd;
fc_port_t *fcport = sp->fcport;
u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
- int rval = QLA_SUCCESS;
struct event_arg ea;
ql_dbg(ql_dbg_disc, vha, 0x20db,
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
- fcport->flags &= ~FCF_ASYNC_SENT;
-
- if (res) {
- rval = res;
- goto gpd_error_out;
- }
-
- pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
-
- rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-gpd_error_out:
memset(&ea, 0, sizeof(ea));
ea.event = FCME_GPDB_DONE;
- ea.rc = rval;
ea.fcport = fcport;
ea.sp = sp;
@@ -754,7 +905,6 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- fcport->flags &= ~FCF_ASYNC_SENT;
fcport->flags |= FCF_LOGIN_NEEDED;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
goto done_free_sp;
@@ -783,6 +933,7 @@ int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
e->u.fcport.fcport = fcport;
e->u.fcport.opt = opt;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -796,16 +947,16 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
struct port_database_24xx *pd;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GPDB;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gpdb";
sp->gen1 = fcport->rscn_gen;
@@ -851,47 +1002,17 @@ done_free_sp:
dma_pool_free(ha->s_dma_pool, pd, pd_dma);
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
qla24xx_post_gpdb_work(vha, fcport, opt);
return rval;
}
static
-void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
- int rval = ea->rc;
- fc_port_t *fcport = ea->fcport;
unsigned long flags;
- fcport->flags &= ~FCF_ASYNC_SENT;
-
- ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
- fcport->disc_state, fcport->fw_login_state, rval);
-
- if (ea->sp->gen2 != fcport->login_gen) {
- /* target side must have changed it. */
- ql_dbg(ql_dbg_disc, vha, 0x20d3,
- "%s %8phC generation changed rscn %d|%d login %d|%d \n",
- __func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen, fcport->last_login_gen,
- fcport->login_gen);
- return;
- } else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
- return;
- }
-
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
- return;
- }
-
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->login_gen++;
ea->fcport->deleted = 0;
@@ -905,47 +1026,157 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
!vha->hw->flags.gpsc_supported) {
ql_dbg(ql_dbg_disc, vha, 0x20d6,
"%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name,
+ __func__, __LINE__, ea->fcport->port_name,
vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, fcport);
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0x20d7,
- "%s %d %8phC post gpsc fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name,
- vha->fcport_count);
-
- qla24xx_post_gpsc_work(vha, fcport);
+ if (ea->fcport->id_changed) {
+ ea->fcport->id_changed = 0;
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
+ "%s %d %8phC post gfpnid fcp_cnt %d\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_gfpnid_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_gpsc_work(vha, ea->fcport);
+ }
}
+ } else if (ea->fcport->login_succ) {
+ /*
+ * We have an existing session. A late RSCN delivery
+ * must have triggered the session to be re-validate.
+ * Session is still valid.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0x20d6,
+ "%s %d %8phC session revalidate success\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+}
+
+static
+void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+ struct port_database_24xx *pd;
+ struct srb *sp = ea->sp;
+
+ pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name,
+ fcport->disc_state, pd->current_login_state, ea->rc);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ switch (pd->current_login_state) {
+ case PDS_PRLI_COMPLETE:
+ __qla24xx_parse_gpdb(vha, fcport, pd);
+ break;
+ case PDS_PLOGI_PENDING:
+ case PDS_PLOGI_COMPLETE:
+ case PDS_PRLI_PENDING:
+ case PDS_PRLI2_PENDING:
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
+ __func__, __LINE__, fcport->port_name);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
+ case PDS_LOGO_PENDING:
+ case PDS_PORT_UNAVAILABLE:
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ __qla24xx_handle_gpdb_event(vha, ea);
} /* gpdb event */
-int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
{
- if (fcport->login_retry == 0)
- return 0;
+ u8 login = 0;
+ int rc;
- if (fcport->scan_state != QLA_FCPORT_FOUND)
- return 0;
+ if (qla_tgt_mode_enabled(vha))
+ return;
+
+ if (qla_dual_mode_enabled(vha)) {
+ if (N2N_TOPO(vha->hw)) {
+ u64 mywwn, wwn;
+
+ mywwn = wwn_to_u64(vha->port_name);
+ wwn = wwn_to_u64(fcport->port_name);
+ if (mywwn > wwn)
+ login = 1;
+ else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
+ && time_after_eq(jiffies,
+ fcport->plogi_nack_done_deadline))
+ login = 1;
+ } else {
+ login = 1;
+ }
+ } else {
+ /* initiator mode */
+ login = 1;
+ }
+
+ if (login) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ rc = qla2x00_find_new_loop_id(vha, fcport);
+ if (rc) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
+ "%s %d %8phC post del sess - out of loopid\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->scan_state = 0;
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ }
+ ql_dbg(ql_dbg_disc, vha, 0x20bf,
+ "%s %d %8phC post login\n",
+ __func__, __LINE__, fcport->port_name);
+ qla2x00_post_async_login_work(vha, fcport, NULL);
+ }
+}
+
+int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ u16 data[2];
+ u64 wwn;
ql_dbg(ql_dbg_disc, vha, 0x20d8,
- "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
+ "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
- fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
- fcport->loop_id);
+ fcport->login_gen, fcport->login_retry,
+ fcport->loop_id, fcport->scan_state);
- fcport->login_retry--;
+ if (fcport->login_retry == 0)
+ return 0;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ return 0;
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return 0;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
- if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return 0;
+ }
}
/* for pure Target Mode. Login will not be initiated */
@@ -957,19 +1188,23 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
return 0;
}
+ fcport->login_retry--;
+
switch (fcport->disc_state) {
case DSC_DELETED:
- if (fcport->loop_id == FC_NO_LOOP_ID) {
+ wwn = wwn_to_u64(fcport->node_name);
+ if (wwn == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post GNNID\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnnid_work(vha, fcport);
+ } else if (fcport->loop_id == FC_NO_LOOP_ID) {
ql_dbg(ql_dbg_disc, vha, 0x20bd,
"%s %d %8phC post gnl\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_async_gnl(vha, fcport);
+ qla24xx_post_gnl_work(vha, fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0x20bf,
- "%s %d %8phC post login\n",
- __func__, __LINE__, fcport->port_name);
- fcport->disc_state = DSC_LOGIN_PEND;
- qla2x00_post_async_login_work(vha, fcport, NULL);
+ qla_chk_n2n_b4_login(vha, fcport);
}
break;
@@ -981,40 +1216,26 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
}
- if (fcport->flags & FCF_FCP2_DEVICE) {
- u8 opt = PDO_FORCE_ADISC;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c9,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, fcport->port_name);
-
- fcport->disc_state = DSC_GPDB;
- qla24xx_post_gpdb_work(vha, fcport, opt);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20cf,
- "%s %d %8phC post login\n",
- __func__, __LINE__, fcport->port_name);
- fcport->disc_state = DSC_LOGIN_PEND;
- qla2x00_post_async_login_work(vha, fcport, NULL);
- }
-
+ qla_chk_n2n_b4_login(vha, fcport);
break;
case DSC_LOGIN_FAILED:
ql_dbg(ql_dbg_disc, vha, 0x20d0,
"%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
-
- qla24xx_post_gidpn_work(vha, fcport);
+ if (N2N_TOPO(vha->hw))
+ qla_chk_n2n_b4_login(vha, fcport);
+ else
+ qla24xx_post_gidpn_work(vha, fcport);
break;
case DSC_LOGIN_COMPLETE:
/* recheck login state */
ql_dbg(ql_dbg_disc, vha, 0x20d1,
- "%s %d %8phC post gpdb\n",
+ "%s %d %8phC post adisc\n",
__func__, __LINE__, fcport->port_name);
-
- qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
break;
default:
@@ -1040,16 +1261,15 @@ void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
switch (fcport->disc_state) {
case DSC_DELETED:
case DSC_LOGIN_COMPLETE:
- qla24xx_post_gidpn_work(fcport->vha, fcport);
+ qla24xx_post_gpnid_work(fcport->vha, &ea->id);
break;
-
default:
break;
}
}
int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
- u8 *port_name, void *pla)
+ u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
{
struct qla_work_evt *e;
e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
@@ -1058,47 +1278,20 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
e->u.new_sess.id = *id;
e->u.new_sess.pla = pla;
+ e->u.new_sess.fc4_type = fc4_type;
memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
+ if (node_name)
+ memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
return qla2x00_post_work(vha, e);
}
static
-int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
- struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- if (test_bit(UNLOADING, &vha->dpc_flags))
- return 0;
-
- switch (vha->host->active_mode) {
- case MODE_INITIATOR:
- case MODE_DUAL:
- if (fcport->scan_state == QLA_FCPORT_FOUND)
- qla24xx_fcport_handle_login(vha, fcport);
- break;
-
- case MODE_TARGET:
- default:
- /* no-op */
- break;
- }
-
- return 0;
-}
-
-static
void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
struct event_arg *ea)
{
fc_port_t *fcport = ea->fcport;
- if (fcport->scan_state != QLA_FCPORT_FOUND) {
- fcport->login_retry++;
- return;
- }
-
ql_dbg(ql_dbg_disc, vha, 0x2102,
"%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
__func__, fcport->port_name, fcport->disc_state,
@@ -1113,8 +1306,10 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
return;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
- if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return;
+ }
}
if (fcport->flags & FCF_ASYNC_SENT) {
@@ -1132,7 +1327,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_async_gidpn(vha, fcport);
+ qla24xx_post_gidpn_work(vha, fcport);
return;
}
@@ -1141,16 +1336,16 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
{
- fc_port_t *fcport, *f, *tf;
+ fc_port_t *f, *tf;
uint32_t id = 0, mask, rid;
- int rc;
+ unsigned long flags;
switch (ea->event) {
- case FCME_RELOGIN:
case FCME_RSCN:
case FCME_GIDPN_DONE:
case FCME_GPSC_DONE:
case FCME_GPNID_DONE:
+ case FCME_GNNID_DONE:
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
return;
@@ -1171,20 +1366,15 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
return;
switch (ea->id.b.rsvd_1) {
case RSCN_PORT_ADDR:
- fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
- if (!fcport) {
- /* cable moved */
- rc = qla24xx_post_gpnid_work(vha, &ea->id);
- if (rc) {
- ql_log(ql_log_warn, vha, 0xd044,
- "RSCN GPNID work failed %02x%02x%02x\n",
- ea->id.b.domain, ea->id.b.area,
- ea->id.b.al_pa);
- }
- } else {
- ea->fcport = fcport;
- qla24xx_handle_rscn_event(fcport, ea);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (vha->scan.scan_flags == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: schedule\n", __func__);
+ vha->scan.scan_flags |= SF_QUEUED;
+ schedule_delayed_work(&vha->scan.scan_work, 5);
}
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
break;
case RSCN_AREA_ADDR:
case RSCN_DOM_ADDR:
@@ -1227,7 +1417,7 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
qla24xx_handle_gnl_done_event(vha, ea);
break;
case FCME_GPSC_DONE:
- qla24xx_post_upd_fcport_work(vha, ea->fcport);
+ qla24xx_handle_gpsc_event(vha, ea);
break;
case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
qla24xx_handle_plogi_done_event(vha, ea);
@@ -1244,8 +1434,14 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case FCME_GFFID_DONE:
qla24xx_handle_gffid_event(vha, ea);
break;
- case FCME_DELETE_DONE:
- qla24xx_handle_delete_done_event(vha, ea);
+ case FCME_ADISC_DONE:
+ qla24xx_handle_adisc_event(vha, ea);
+ break;
+ case FCME_GNNID_DONE:
+ qla24xx_handle_gnnid_event(vha, ea);
+ break;
+ case FCME_GFPNID_DONE:
+ qla24xx_handle_gfpnid_event(vha, ea);
break;
default:
BUG_ON(1);
@@ -1327,6 +1523,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
done_free_sp:
sp->free(sp);
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
}
@@ -1368,6 +1565,13 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
sp->name = "abort";
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+
+ if (vha->flags.qpairs_available && cmd_sp->qpair)
+ abt_iocb->u.abt.req_que_no =
+ cpu_to_le16(cmd_sp->qpair->req->id);
+ else
+ abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
+
sp->done = qla24xx_abort_sp_done;
abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
@@ -1402,6 +1606,9 @@ qla24xx_async_abort_command(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -1452,6 +1659,42 @@ static void
qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
port_id_t cid; /* conflict Nport id */
+ u16 lid;
+ struct fc_port *conflict_fcport;
+ unsigned long flags;
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
+ ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
+ ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
+
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ea,
+ "%s %d %8phC Remote is trying to login\n",
+ __func__, __LINE__, fcport->port_name);
+ return;
+ }
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
switch (ea->data[0]) {
case MBS_COMMAND_COMPLETE:
@@ -1467,11 +1710,19 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_prli_work(vha, ea->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ea,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, ea->fcport->port_name);
+ "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->loop_id, ea->fcport->d_id.b24);
+
+ set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ ea->fcport->loop_id = FC_NO_LOOP_ID;
ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
ea->fcport->logout_on_delete = 1;
ea->fcport->send_els_logo = 0;
+ ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
}
break;
@@ -1513,8 +1764,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
ea->fcport->d_id.b.al_pa);
- qla2x00_clear_loop_id(ea->fcport);
- qla24xx_post_gidpn_work(vha, ea->fcport);
+ lid = ea->iop[1] & 0xffff;
+ qlt_find_sess_invalidate_other(vha,
+ wwn_to_u64(ea->fcport->port_name),
+ ea->fcport->d_id, lid, &conflict_fcport);
+
+ if (conflict_fcport) {
+ /*
+ * Another fcport share the same loop_id/nport id.
+ * Conflict fcport needs to finish cleanup before this
+ * fcport can proceed to login.
+ */
+ conflict_fcport->conflict = ea->fcport;
+ ea->fcport->login_pause = 1;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b24, lid);
+ qla2x00_clear_loop_id(ea->fcport);
+ qla24xx_post_gidpn_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b24, lid);
+
+ qla2x00_clear_loop_id(ea->fcport);
+ set_bit(lid, vha->hw->loop_id_map);
+ ea->fcport->loop_id = lid;
+ ea->fcport->keep_nport_handle = 0;
+ qlt_schedule_sess_for_deletion(ea->fcport);
+ }
break;
}
return;
@@ -2540,70 +2821,27 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
return rval;
}
-void
-qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+static void
+qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
{
int rval;
- uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
- eft_size, fce_size, mq_size;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = ha->req_q_map[0];
- struct rsp_que *rsp = ha->rsp_q_map[0];
- if (ha->fw_dump) {
+ if (ha->eft) {
ql_dbg(ql_dbg_init, vha, 0x00bd,
- "Firmware dump already allocated.\n");
+ "%s: Offload Mem is already allocated.\n",
+ __func__);
return;
}
- ha->fw_dumped = 0;
- ha->fw_dump_cap_flags = 0;
- dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
- req_q_size = rsp_q_size = 0;
-
- if (IS_QLA27XX(ha))
- goto try_fce;
-
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- fixed_size = sizeof(struct qla2100_fw_dump);
- } else if (IS_QLA23XX(ha)) {
- fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
- mem_size = (ha->fw_memory_size - 0x11000 + 1) *
- sizeof(uint16_t);
- } else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
- fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
- else if (IS_QLA81XX(ha))
- fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
- else if (IS_QLA25XX(ha))
- fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
- else
- fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
-
- mem_size = (ha->fw_memory_size - 0x100000 + 1) *
- sizeof(uint32_t);
- if (ha->mqenable) {
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
- mq_size = sizeof(struct qla2xxx_mq_chain);
- /*
- * Allocate maximum buffer size for all queues.
- * Resizing must be done at end-of-dump processing.
- */
- mq_size += ha->max_req_queues *
- (req->length * sizeof(request_t));
- mq_size += ha->max_rsp_queues *
- (rsp->length * sizeof(response_t));
- }
- if (ha->tgt.atio_ring)
- mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ if (IS_FWI2_CAPABLE(ha)) {
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
!IS_QLA27XX(ha))
goto try_eft;
-try_fce:
if (ha->fce)
dma_free_coherent(&ha->pdev->dev,
FCE_SIZE, ha->fce, ha->fce_dma);
@@ -2631,7 +2869,6 @@ try_fce:
ql_dbg(ql_dbg_init, vha, 0x00c0,
"Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
- fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
ha->flags.fce_enabled = 1;
ha->fce_dma = tc_dma;
ha->fce = tc;
@@ -2648,7 +2885,7 @@ try_eft:
ql_log(ql_log_warn, vha, 0x00c1,
"Unable to allocate (%d KB) for EFT.\n",
EFT_SIZE / 1024);
- goto cont_alloc;
+ goto eft_err;
}
rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
@@ -2657,17 +2894,76 @@ try_eft:
"Unable to initialize EFT (%d).\n", rval);
dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
tc_dma);
- goto cont_alloc;
+ goto eft_err;
}
ql_dbg(ql_dbg_init, vha, 0x00c3,
"Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
- eft_size = EFT_SIZE;
ha->eft_dma = tc_dma;
ha->eft = tc;
}
-cont_alloc:
+eft_err:
+ return;
+}
+
+void
+qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+{
+ uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
+ eft_size, fce_size, mq_size;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct qla2xxx_fw_dump *fw_dump;
+
+ dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+ req_q_size = rsp_q_size = 0;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ fixed_size = sizeof(struct qla2100_fw_dump);
+ } else if (IS_QLA23XX(ha)) {
+ fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
+ mem_size = (ha->fw_memory_size - 0x11000 + 1) *
+ sizeof(uint16_t);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
+ else if (IS_QLA81XX(ha))
+ fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
+ else if (IS_QLA25XX(ha))
+ fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
+ else
+ fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
+
+ mem_size = (ha->fw_memory_size - 0x100000 + 1) *
+ sizeof(uint32_t);
+ if (ha->mqenable) {
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ mq_size = sizeof(struct qla2xxx_mq_chain);
+ /*
+ * Allocate maximum buffer size for all queues.
+ * Resizing must be done at end-of-dump processing.
+ */
+ mq_size += ha->max_req_queues *
+ (req->length * sizeof(request_t));
+ mq_size += ha->max_rsp_queues *
+ (rsp->length * sizeof(response_t));
+ }
+ if (ha->tgt.atio_ring)
+ mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ /* Allocate memory for Fibre Channel Event Buffer. */
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
+ goto try_eft;
+
+ fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+try_eft:
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
+ "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+ eft_size = EFT_SIZE;
+ }
+
if (IS_QLA27XX(ha)) {
if (!ha->fw_dump_template) {
ql_log(ql_log_warn, vha, 0x00ba,
@@ -2695,51 +2991,44 @@ cont_alloc:
ha->exlogin_size;
allocate:
- ha->fw_dump = vmalloc(dump_size);
- if (!ha->fw_dump) {
- ql_log(ql_log_warn, vha, 0x00c4,
- "Unable to allocate (%d KB) for firmware dump.\n",
- dump_size / 1024);
-
- if (ha->fce) {
- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
- ha->fce_dma);
- ha->fce = NULL;
- ha->fce_dma = 0;
- }
-
- if (ha->eft) {
- dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
- ha->eft_dma);
- ha->eft = NULL;
- ha->eft_dma = 0;
+ if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
+ fw_dump = vmalloc(dump_size);
+ if (!fw_dump) {
+ ql_log(ql_log_warn, vha, 0x00c4,
+ "Unable to allocate (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+ } else {
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+ ha->fw_dump = fw_dump;
+
+ ha->fw_dump_len = dump_size;
+ ql_dbg(ql_dbg_init, vha, 0x00c5,
+ "Allocated (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+
+ if (IS_QLA27XX(ha))
+ return;
+
+ ha->fw_dump->signature[0] = 'Q';
+ ha->fw_dump->signature[1] = 'L';
+ ha->fw_dump->signature[2] = 'G';
+ ha->fw_dump->signature[3] = 'C';
+ ha->fw_dump->version = htonl(1);
+
+ ha->fw_dump->fixed_size = htonl(fixed_size);
+ ha->fw_dump->mem_size = htonl(mem_size);
+ ha->fw_dump->req_q_size = htonl(req_q_size);
+ ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
+
+ ha->fw_dump->eft_size = htonl(eft_size);
+ ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
+ ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
+
+ ha->fw_dump->header_size =
+ htonl(offsetof(struct qla2xxx_fw_dump, isp));
}
- return;
}
- ha->fw_dump_len = dump_size;
- ql_dbg(ql_dbg_init, vha, 0x00c5,
- "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
-
- if (IS_QLA27XX(ha))
- return;
-
- ha->fw_dump->signature[0] = 'Q';
- ha->fw_dump->signature[1] = 'L';
- ha->fw_dump->signature[2] = 'G';
- ha->fw_dump->signature[3] = 'C';
- ha->fw_dump->version = htonl(1);
-
- ha->fw_dump->fixed_size = htonl(fixed_size);
- ha->fw_dump->mem_size = htonl(mem_size);
- ha->fw_dump->req_q_size = htonl(req_q_size);
- ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
-
- ha->fw_dump->eft_size = htonl(eft_size);
- ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
- ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
-
- ha->fw_dump->header_size =
- htonl(offsetof(struct qla2xxx_fw_dump, isp));
}
static int
@@ -3065,9 +3354,12 @@ enable_82xx_npiv:
if (rval != QLA_SUCCESS)
goto failed;
- if (!fw_major_version && ql2xallocfwdump
- && !(IS_P3P_TYPE(ha)))
+ if (!fw_major_version && !(IS_P3P_TYPE(ha)))
+ qla2x00_alloc_offload_mem(vha);
+
+ if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
qla2x00_alloc_fw_dump(vha);
+
} else {
goto failed;
}
@@ -3278,6 +3570,12 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] |= BIT_4;
else
ha->fw_options[2] &= ~BIT_4;
+
+ /* Reserve 1/2 of emergency exchanges for ELS.*/
+ if (qla2xuseresexchforels)
+ ha->fw_options[2] |= BIT_8;
+ else
+ ha->fw_options[2] &= ~BIT_8;
}
ql_dbg(ql_dbg_init, vha, 0x00e8,
@@ -3671,6 +3969,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
port_id_t id;
+ unsigned long flags;
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -3752,7 +4051,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
id.b.area = area;
id.b.al_pa = al_pa;
id.b.rsvd_1 = 0;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_update_host_map(vha, id);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
@@ -4293,6 +4594,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
} else if (ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
+ if (ha->flags.rida_fmt2) {
+ /* With Rida Format 2, the login is already triggered.
+ * We know who is on the other side of the wire.
+ * No need to login to do login to find out or drop into
+ * qla2x00_configure_local_loop().
+ */
+ clear_bit(LOCAL_LOOP_UPDATE, &flags);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ } else {
+ if (qla_tgt_mode_enabled(vha)) {
+ /* allow the other side to start the login */
+ clear_bit(LOCAL_LOOP_UPDATE, &flags);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
+ }
} else if (ha->current_topology == ISP_CFG_NL) {
clear_bit(RSCN_UPDATE, &flags);
set_bit(LOCAL_LOOP_UPDATE, &flags);
@@ -4521,6 +4837,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
(uint8_t *)ha->gid_list,
entries * sizeof(struct gid_list_info));
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
@@ -4531,22 +4851,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
}
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
- /*
- * Mark local devices that were present with FCF_DEVICE_LOST for now.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (atomic_read(&fcport->state) == FCS_ONLINE &&
- fcport->port_type != FCT_BROADCAST &&
- (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
-
- ql_dbg(ql_dbg_disc, vha, 0x2096,
- "Marking port lost loop_id=0x%04x.\n",
- fcport->loop_id);
-
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
- }
- }
-
/* Inititae N2N login. */
if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
rval = qla24xx_n2n_handle_login(vha, new_fcport);
@@ -4589,6 +4893,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
+ new_fcport->scan_state = QLA_FCPORT_FOUND;
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
@@ -4620,13 +4925,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
fcport->d_id.b24 = new_fcport->d_id.b24;
memcpy(fcport->node_name, new_fcport->node_name,
WWN_SIZE);
-
- if (!fcport->login_succ) {
- vha->fcport_count++;
- fcport->login_succ = 1;
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- }
-
+ fcport->scan_state = QLA_FCPORT_FOUND;
found++;
break;
}
@@ -4637,11 +4936,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Allocate a new replacement fcport. */
fcport = new_fcport;
- if (!fcport->login_succ) {
- vha->fcport_count++;
- fcport->login_succ = 1;
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- }
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
@@ -4662,11 +4956,38 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Base iIDMA settings on HBA port speed. */
fcport->fp_speed = ha->link_data_rate;
- qla2x00_update_fcport(vha, fcport);
-
found_devs++;
}
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if (fcport->scan_state == QLA_FCPORT_SCAN) {
+ if ((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_BROADCAST) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ continue;
+ }
+ }
+ }
+
+ if (fcport->scan_state == QLA_FCPORT_FOUND)
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+
cleanup_allocation:
kfree(new_fcport);
@@ -4920,9 +5241,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- fcport->scan_state = QLA_FCPORT_SCAN;
- }
/* Mark the time right before querying FW for connected ports.
* This process is long, asynchronous and by the time it's done,
@@ -4932,7 +5250,17 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
* will be newer than discovery_gen. */
qlt_do_generation_tick(vha, &discovery_gen);
- rval = qla2x00_find_all_fabric_devs(vha);
+ if (USE_ASYNC_SCAN(ha)) {
+ rval = QLA_SUCCESS;
+ rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI);
+ if (rval)
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ } else {
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->scan_state = QLA_FCPORT_SCAN;
+
+ rval = qla2x00_find_all_fabric_devs(vha);
+ }
if (rval != QLA_SUCCESS)
break;
} while (0);
@@ -5237,9 +5565,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
"%s %d %8phC post del sess\n",
__func__, __LINE__,
fcport->port_name);
-
- qlt_schedule_sess_for_deletion_lock
- (fcport);
+ qlt_schedule_sess_for_deletion(fcport);
continue;
}
}
@@ -5974,6 +6300,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ SAVE_TOPO(ha);
+ ha->flags.rida_fmt2 = 0;
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
@@ -8173,9 +8501,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
int ret = QLA_FUNCTION_FAILED;
struct qla_hw_data *ha = qpair->hw;
- if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
- goto fail;
-
qpair->delete_in_progress = 1;
while (atomic_read(&qpair->ref_count))
msleep(500);
@@ -8183,6 +8508,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
ret = qla25xx_delete_req_que(vha, qpair->req);
if (ret != QLA_SUCCESS)
goto fail;
+
ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
if (ret != QLA_SUCCESS)
goto fail;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 17d2c20f1f75..4d32426393c7 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -273,6 +273,7 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
+ init_completion(&sp->comp);
if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
if (sp->type == SRB_ELS_DCMD)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d810a447cb4a..1b62e943ec49 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2158,7 +2158,9 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
- if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (qpair->use_shadow_reg)
+ cnt = *req->out_ptr;
+ else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2392,26 +2394,13 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
- struct qla_hw_data *ha = vha->hw;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- unsigned long flags = 0;
ql_dbg(ql_dbg_io, vha, 0x3069,
"%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- /* Abort the exchange */
- spin_lock_irqsave(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
- ql_dbg(ql_dbg_io, vha, 0x3070,
- "mbx abort_command failed.\n");
- } else {
- ql_dbg(ql_dbg_io, vha, 0x3071,
- "mbx abort_command success.\n");
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
complete(&lio->u.els_logo.comp);
}
@@ -2631,7 +2620,7 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res)
struct scsi_qla_host *vha = sp->vha;
ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
- "%s ELS hdl=%x, portid=%06x done %8pC\n",
+ "%s ELS hdl=%x, portid=%06x done %8phC\n",
sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
complete(&lio->u.els_plogi.comp);
@@ -3286,7 +3275,9 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb->handle =
+ cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
+ aio->u.abt.cmd_hndl));
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->handle_to_abort =
cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
@@ -3294,7 +3285,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
abt_iocb->vp_index = vha->vp_idx;
- abt_iocb->req_que_no = cpu_to_le16(req->id);
+ abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
/* Send the command to the firmware */
wmb();
}
@@ -3381,6 +3372,40 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
return rval;
}
+static void
+qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
+{
+ int map, pos;
+
+ vce->entry_type = VP_CTRL_IOCB_TYPE;
+ vce->handle = sp->handle;
+ vce->entry_count = 1;
+ vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
+ vce->vp_count = cpu_to_le16(1);
+
+ /*
+ * index map in firmware starts with 1; decrement index
+ * this is ok as we never use index 0
+ */
+ map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
+ pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
+ vce->vp_idx_map[map] |= 1 << pos;
+}
+
+static void
+qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags =
+ cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
+
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+ logio->port_id[1] = sp->fcport->d_id.b.area;
+ logio->port_id[2] = sp->fcport->d_id.b.domain;
+ logio->vp_index = sp->fcport->vha->vp_idx;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -3459,6 +3484,12 @@ qla2x00_start_sp(srb_t *sp)
case SRB_NACK_LOGO:
qla2x00_send_notify_ack_iocb(sp, pkt);
break;
+ case SRB_CTRL_VP:
+ qla25xx_ctrlvp_iocb(sp, pkt);
+ break;
+ case SRB_PRLO_CMD:
+ qla24xx_prlo_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2fd79129bb2a..14109d86c3f6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -809,6 +809,7 @@ skip_rio:
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
+ SAVE_TOPO(ha);
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
@@ -922,7 +923,6 @@ skip_rio:
set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
- ha->flags.gpsc_supported = 1;
vha->flags.management_server_logged_in = 0;
break;
@@ -1009,7 +1009,7 @@ skip_rio:
if (qla_ini_mode_enabled(vha)) {
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
fcport->logout_on_delete = 0;
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
break;
@@ -1059,8 +1059,7 @@ global_port_update:
* Mark all devices as missing so we will login again.
*/
atomic_set(&vha->loop_state, LOOP_UP);
-
- qla2x00_mark_all_devices_lost(vha, 1);
+ vha->scan.scan_retry = 0;
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
@@ -1202,6 +1201,7 @@ global_port_update:
qla2xxx_wake_dpc(vha);
}
}
+ /* fall through */
case MBA_IDC_COMPLETE:
if (ha->notify_lb_portup_comp && !vha->vp_idx)
complete(&ha->lb_portup_comp);
@@ -1574,7 +1574,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* borrowing sts_entry_24xx.comp_status.
same location as ct_entry_24xx.comp_status
*/
- res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
+ res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
sp->name);
sp->done(sp, res);
@@ -1769,7 +1769,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- /* drop through */
+ /* fall through */
default:
data[0] = MBS_COMMAND_ERROR;
break;
@@ -1936,6 +1936,37 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
sp->done(sp, ret);
}
+static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
+ struct vp_ctrl_entry_24xx *vce)
+{
+ const char func[] = "CTRLVP-IOCB";
+ srb_t *sp;
+ int rval = QLA_SUCCESS;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
+ if (!sp)
+ return;
+
+ if (vce->entry_status != 0) {
+ ql_dbg(ql_dbg_vport, vha, 0x10c4,
+ "%s: Failed to complete IOCB -- error status (%x)\n",
+ sp->name, vce->entry_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
+ ql_dbg(ql_dbg_vport, vha, 0x10c5,
+ "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
+ sp->name, le16_to_cpu(vce->comp_status),
+ le16_to_cpu(vce->vp_idx_failed));
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_vport, vha, 0x10c6,
+ "Done %s.\n", __func__);
+ }
+
+ sp->rc = rval;
+ sp->done(sp, rval);
+}
+
/**
* qla2x00_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@@ -2369,7 +2400,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
int res = 0;
uint16_t state_flags = 0;
uint16_t retry_delay = 0;
- uint8_t no_logout = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -2640,7 +2670,6 @@ check_scsi_status:
break;
case CS_PORT_LOGGED_OUT:
- no_logout = 1;
case CS_PORT_CONFIG_CHG:
case CS_PORT_BUSY:
case CS_INCOMPLETE:
@@ -2671,11 +2700,8 @@ check_scsi_status:
port_state_str[atomic_read(&fcport->state)],
comp_status);
- if (no_logout)
- fcport->logout_on_delete = 0;
-
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
break;
@@ -2972,9 +2998,9 @@ process_err:
(response_t *)pkt);
break;
} else {
- /* drop through */
qlt_24xx_process_atio_queue(vha, 1);
}
+ /* fall through */
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
@@ -3005,6 +3031,10 @@ process_err:
qla24xx_mbx_iocb_entry(vha, rsp->req,
(struct mbx_24xx_entry *)pkt);
break;
+ case VP_CTRL_IOCB_TYPE:
+ qla_ctrlvp_completed(vha, rsp->req,
+ (struct vp_ctrl_entry_24xx *)pkt);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index cb717d47339f..7397aeddd96c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -17,6 +17,7 @@ static struct mb_cmd_name {
{MBC_GET_PORT_DATABASE, "GPDB"},
{MBC_GET_ID_LIST, "GIDList"},
{MBC_GET_LINK_PRIV_STATS, "Stats"},
+ {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
};
static const char *mb_to_str(uint16_t cmd)
@@ -3731,6 +3732,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
unsigned long flags;
int found;
port_id_t id;
+ struct fc_port *fcport;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3753,7 +3755,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
-
+ ha->current_topology = ISP_CFG_NL;
qlt_update_host_map(vha, id);
} else if (rptid_entry->format == 1) {
@@ -3797,6 +3799,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
return;
}
+ ha->flags.gpsc_supported = 1;
+ ha->current_topology = ISP_CFG_F;
/* buffer to buffer credit flag */
vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
@@ -3862,6 +3866,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->u.f2.port_name);
/* N2N. direct connect */
+ ha->current_topology = ISP_CFG_N;
+ ha->flags.rida_fmt2 = 1;
vha->d_id.b.domain = rptid_entry->port_id[2];
vha->d_id.b.area = rptid_entry->port_id[1];
vha->d_id.b.al_pa = rptid_entry->port_id[0];
@@ -3869,6 +3875,40 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
spin_lock_irqsave(&ha->vport_slock, flags);
qlt_update_vp_map(vha, SET_AL_PA);
spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ rptid_entry->u.f2.port_name, 1);
+
+ if (fcport) {
+ fcport->plogi_nack_done_deadline = jiffies + HZ;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC login\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
+ } else {
+ id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0];
+ id.b.area = rptid_entry->u.f2.remote_nport_id[1];
+ id.b.domain = rptid_entry->u.f2.remote_nport_id[2];
+ qla24xx_post_newsess_work(vha, &id,
+ rptid_entry->u.f2.port_name,
+ rptid_entry->u.f2.node_name,
+ NULL,
+ FC4_TYPE_UNKNOWN);
+ }
}
}
@@ -3945,83 +3985,6 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
}
/*
- * qla24xx_control_vp
- * Enable a virtual port for given host
- *
- * Input:
- * ha = adapter block pointer.
- * vhba = virtual adapter (unused)
- * index = index number for enabled VP
- *
- * Returns:
- * qla2xxx local function return status code.
- *
- * Context:
- * Kernel context.
- */
-int
-qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
-{
- int rval;
- int map, pos;
- struct vp_ctrl_entry_24xx *vce;
- dma_addr_t vce_dma;
- struct qla_hw_data *ha = vha->hw;
- int vp_index = vha->vp_idx;
- struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
-
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
- "Entered %s enabling index %d.\n", __func__, vp_index);
-
- if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
- return QLA_PARAMETER_ERROR;
-
- vce = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
- if (!vce) {
- ql_log(ql_log_warn, vha, 0x10c2,
- "Failed to allocate VP control IOCB.\n");
- return QLA_MEMORY_ALLOC_FAILED;
- }
-
- vce->entry_type = VP_CTRL_IOCB_TYPE;
- vce->entry_count = 1;
- vce->command = cpu_to_le16(cmd);
- vce->vp_count = cpu_to_le16(1);
-
- /* index map in firmware starts with 1; decrement index
- * this is ok as we never use index 0
- */
- map = (vp_index - 1) / 8;
- pos = (vp_index - 1) & 7;
- mutex_lock(&ha->vport_lock);
- vce->vp_idx_map[map] |= 1 << pos;
- mutex_unlock(&ha->vport_lock);
-
- rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c3,
- "Failed to issue VP control IOCB (%x).\n", rval);
- } else if (vce->entry_status != 0) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c4,
- "Failed to complete IOCB -- error status (%x).\n",
- vce->entry_status);
- rval = QLA_FUNCTION_FAILED;
- } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c5,
- "Failed to complete IOCB -- completion status (%x).\n",
- le16_to_cpu(vce->comp_status));
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
- "Done %s.\n", __func__);
- }
-
- dma_pool_free(ha->s_dma_pool, vce, vce_dma);
-
- return rval;
-}
-
-/*
* qla2x00_send_change_request
* Receive or disable RSCN request from fabric controller
*
@@ -6160,8 +6123,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
}
/* Check for logged in state. */
- if (current_login_state != PDS_PRLI_COMPLETE &&
- last_login_state != PDS_PRLI_COMPLETE) {
+ if (current_login_state != PDS_PRLI_COMPLETE) {
ql_dbg(ql_dbg_mbx, vha, 0x119a,
"Unable to verify login-state (%x/%x) for loop_id %x.\n",
current_login_state, last_login_state, fcport->loop_id);
@@ -6350,3 +6312,32 @@ qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
return rval;
}
+
+int qla24xx_res_count_wait(struct scsi_qla_host *vha,
+ uint16_t *out_mb, int out_mb_sz)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: fail\n", __func__);
+ } else {
+ if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
+ memcpy(out_mb, mc.mb, out_mb_sz);
+ else
+ memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: done\n", __func__);
+ }
+done:
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index bd9f14bf7ac2..e965b16f21e3 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -50,10 +50,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_update_vp_map(vha, SET_VP_IDX);
-
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&ha->vport_lock);
return vp_id;
@@ -158,9 +159,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
/* Remove port id from vp target map */
- spin_lock_irqsave(&vha->hw->vport_slock, flags);
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
- spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
@@ -264,13 +265,20 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
case MBA_LIP_RESET:
case MBA_POINT_TO_POINT:
case MBA_CHG_IN_CONNECTION:
- case MBA_PORT_UPDATE:
- case MBA_RSCN_UPDATE:
ql_dbg(ql_dbg_async, vha, 0x5024,
"Async_event for VP[%d], mb=0x%x vha=%p.\n",
i, *mb, vha);
qla2x00_async_event(vha, rsp, mb);
break;
+ case MBA_PORT_UPDATE:
+ case MBA_RSCN_UPDATE:
+ if ((mb[3] & 0xff) == vha->vp_idx) {
+ ql_dbg(ql_dbg_async, vha, 0x5024,
+ "Async_event for VP[%d], mb=0x%x vha=%p\n",
+ i, *mb, vha);
+ qla2x00_async_event(vha, rsp, mb);
+ }
+ break;
}
spin_lock_irqsave(&ha->vport_slock, flags);
@@ -319,8 +327,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
"Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
- qla2x00_do_work(vha);
-
/* Check if Fw is ready to configure VP first */
if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
@@ -343,15 +349,19 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
"FCPort update end.\n");
}
- if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
- !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
- atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
+ !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
+ atomic_read(&vha->loop_state) != LOOP_DOWN) {
- ql_dbg(ql_dbg_dpc, vha, 0x4018,
- "Relogin needed scheduled.\n");
- qla2x00_relogin(vha);
- ql_dbg(ql_dbg_dpc, vha, 0x4019,
- "Relogin needed end.\n");
+ if (!vha->relogin_jif ||
+ time_after_eq(jiffies, vha->relogin_jif)) {
+ vha->relogin_jif = jiffies + HZ;
+ clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
+ ql_dbg(ql_dbg_dpc, vha, 0x4018,
+ "Relogin needed scheduled.\n");
+ qla24xx_post_relogin_work(vha);
+ }
}
if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@@ -475,7 +485,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
"Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
- vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
+ vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
vha->dpc_flags = 0L;
@@ -569,14 +579,16 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
- int ret = -1;
+ int ret = QLA_SUCCESS;
- if (req) {
+ if (req && vha->flags.qpairs_req_created) {
req->options |= BIT_0;
ret = qla25xx_init_req_que(vha, req);
- }
- if (ret == QLA_SUCCESS)
+ if (ret != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+
qla25xx_free_req_que(vha, req);
+ }
return ret;
}
@@ -584,14 +596,16 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
int
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
- int ret = -1;
+ int ret = QLA_SUCCESS;
- if (rsp) {
+ if (rsp && vha->flags.qpairs_rsp_created) {
rsp->options |= BIT_0;
ret = qla25xx_init_rsp_que(vha, rsp);
- }
- if (ret == QLA_SUCCESS)
+ if (ret != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+
qla25xx_free_rsp_que(vha, rsp);
+ }
return ret;
}
@@ -884,3 +898,79 @@ que_failed:
failed:
return 0;
}
+
+static void qla_ctrlvp_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+
+ complete(&sp->comp);
+ /* don't free sp here. Let the caller do the free */
+}
+
+/**
+ * qla24xx_control_vp() - Enable a virtual port for given host
+ * @vha: adapter block pointer
+ * @cmd: command type to be sent for enable virtual port
+ *
+ * Return: qla2xxx local function return status code.
+ */
+int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct qla_hw_data *ha = vha->hw;
+ int vp_index = vha->vp_idx;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp;
+
+ ql_dbg(ql_dbg_vport, vha, 0x10c1,
+ "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
+
+ if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
+ return QLA_PARAMETER_ERROR;
+
+ sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CTRL_VP;
+ sp->name = "ctrl_vp";
+ sp->done = qla_ctrlvp_sp_done;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
+ sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "%s: %s Failed submission. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
+ sp->name, sp->handle);
+
+ wait_for_completion(&sp->comp);
+ rval = sp->rc;
+ switch (rval) {
+ case QLA_FUNCTION_TIMEOUT:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
+ __func__, sp->name, rval);
+ break;
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
+ __func__, sp->name);
+ goto done_free_sp;
+ default:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+done:
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 0aa9c38bf347..525ac35a757b 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -11,8 +11,6 @@
#include "qla_def.h"
#include "qla_gbl.h"
-#include <linux/delay.h>
-
#define TIMEOUT_100_MS 100
static const uint32_t qla8044_reg_tbl[] = {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 46f2d0cf7c0d..12ee6e02d146 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -277,6 +277,12 @@ MODULE_PARM_DESC(ql2xenablemsix,
" 1 -- enable MSI-X interrupt mechanism.\n"
" 2 -- enable MSI interrupt mechanism.\n");
+int qla2xuseresexchforels;
+module_param(qla2xuseresexchforels, int, 0444);
+MODULE_PARM_DESC(qla2xuseresexchforels,
+ "Reserve 1/2 of emergency exchanges for ELS.\n"
+ " 0 (default): disabled");
+
/*
* SCSI host template entry points
*/
@@ -294,7 +300,6 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
-static void qla83xx_disable_laser(scsi_qla_host_t *vha);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
@@ -1705,93 +1710,103 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
return QLA_SUCCESS;
}
-void
-qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+static void
+__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
- int que, cnt, status;
+ int cnt, status;
unsigned long flags;
srb_t *sp;
+ scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_cmd *cmd;
uint8_t trace = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (que = 0; que < ha->max_req_queues; que++) {
- req = ha->req_q_map[que];
- if (!req)
- continue;
- if (!req->outstanding_cmds)
- continue;
- for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
- sp = req->outstanding_cmds[cnt];
- if (sp) {
- req->outstanding_cmds[cnt] = NULL;
- if (sp->cmd_type == TYPE_SRB) {
- if (sp->type == SRB_NVME_CMD ||
- sp->type == SRB_NVME_LS) {
- sp_get(sp);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- qla_nvme_abort(ha, sp);
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- } else if (GET_CMD_SP(sp) &&
- !ha->flags.eeh_busy &&
- (!test_bit(ABORT_ISP_ACTIVE,
- &vha->dpc_flags)) &&
- (sp->type == SRB_SCSI_CMD)) {
- /*
- * Don't abort commands in
- * adapter during EEH
- * recovery as it's not
- * accessible/responding.
- *
- * Get a reference to the sp
- * and drop the lock. The
- * reference ensures this
- * sp->done() call and not the
- * call in qla2xxx_eh_abort()
- * ends the SCSI command (with
- * result 'res').
- */
- sp_get(sp);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- status = qla2xxx_eh_abort(
- GET_CMD_SP(sp));
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- /*
- * Get rid of extra reference
- * if immediate exit from
- * ql2xxx_eh_abort
- */
- if (status == FAILED &&
- (qla2x00_isp_reg_stat(ha)))
- atomic_dec(
- &sp->ref_count);
- }
- sp->done(sp, res);
- } else {
- if (!vha->hw->tgt.tgt_ops || !tgt ||
- qla_ini_mode_enabled(vha)) {
- if (!trace)
- ql_dbg(ql_dbg_tgt_mgt,
- vha, 0xf003,
- "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
- vha->dpc_flags);
- continue;
- }
- cmd = (struct qla_tgt_cmd *)sp;
- qlt_abort_cmd_on_host_reset(cmd->vha,
- cmd);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ req = qp->req;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ req->outstanding_cmds[cnt] = NULL;
+ if (sp->cmd_type == TYPE_SRB) {
+ if (sp->type == SRB_NVME_CMD ||
+ sp->type == SRB_NVME_LS) {
+ sp_get(sp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr,
+ flags);
+ qla_nvme_abort(ha, sp);
+ spin_lock_irqsave(qp->qp_lock_ptr,
+ flags);
+ } else if (GET_CMD_SP(sp) &&
+ !ha->flags.eeh_busy &&
+ (!test_bit(ABORT_ISP_ACTIVE,
+ &vha->dpc_flags)) &&
+ (sp->type == SRB_SCSI_CMD)) {
+ /*
+ * Don't abort commands in
+ * adapter during EEH
+ * recovery as it's not
+ * accessible/responding.
+ *
+ * Get a reference to the sp
+ * and drop the lock. The
+ * reference ensures this
+ * sp->done() call and not the
+ * call in qla2xxx_eh_abort()
+ * ends the SCSI command (with
+ * result 'res').
+ */
+ sp_get(sp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr,
+ flags);
+ status = qla2xxx_eh_abort(
+ GET_CMD_SP(sp));
+ spin_lock_irqsave(qp->qp_lock_ptr,
+ flags);
+ /*
+ * Get rid of extra reference
+ * if immediate exit from
+ * ql2xxx_eh_abort
+ */
+ if (status == FAILED &&
+ (qla2x00_isp_reg_stat(ha)))
+ atomic_dec(
+ &sp->ref_count);
}
+ sp->done(sp, res);
+ } else {
+ if (!vha->hw->tgt.tgt_ops || !tgt ||
+ qla_ini_mode_enabled(vha)) {
+ if (!trace)
+ ql_dbg(ql_dbg_tgt_mgt,
+ vha, 0xf003,
+ "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+ vha->dpc_flags);
+ continue;
+ }
+ cmd = (struct qla_tgt_cmd *)sp;
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
}
}
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+}
+
+void
+qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+{
+ int que;
+ struct qla_hw_data *ha = vha->hw;
+
+ __qla2x00_abort_all_cmds(ha->base_qpair, res);
+
+ for (que = 0; que < ha->max_qpairs; que++) {
+ if (!ha->queue_pair_map[que])
+ continue;
+
+ __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
+ }
}
static int
@@ -2689,14 +2704,22 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
{
struct scsi_qla_host *vha = container_of(work,
struct scsi_qla_host, iocb_work);
- int cnt = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ int i = 20;
+ unsigned long flags;
- while (!list_empty(&vha->work_list)) {
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
+
+ while (!list_empty(&vha->work_list) && i > 0) {
qla2x00_do_work(vha);
- cnt++;
- if (cnt > 10)
- break;
+ i--;
}
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
+ spin_unlock_irqrestore(&vha->work_lock, flags);
}
/*
@@ -2790,6 +2813,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
+ ha->max_exchg = FW_MAX_EXCHANGES_CNT;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -3011,9 +3035,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
base_vha = qla2x00_create_host(sht, ha);
if (!base_vha) {
ret = -ENOMEM;
- qla2x00_mem_free(ha);
- qla2x00_free_req_que(ha, req);
- qla2x00_free_rsp_que(ha, rsp);
goto probe_hw_failed;
}
@@ -3023,7 +3044,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
if (IS_QLA2XXX_MIDTYPE(ha))
- base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
+ base_vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
else
base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
base_vha->vp_idx;
@@ -3074,7 +3095,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
- goto probe_init_failed;
+ goto probe_hw_failed;
/* Alloc arrays of request and response ring ptrs */
if (!qla2x00_alloc_queues(ha, req, rsp)) {
@@ -3193,10 +3214,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+ ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+
if (ha->mqenable) {
bool mq = false;
bool startit = false;
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
if (QLA_TGT_MODE_ENABLED()) {
mq = true;
@@ -3390,6 +3412,9 @@ probe_failed:
scsi_host_put(base_vha->host);
probe_hw_failed:
+ qla2x00_mem_free(ha);
+ qla2x00_free_req_que(ha, req);
+ qla2x00_free_rsp_que(ha, rsp);
qla2x00_clear_drv_active(ha);
iospace_config_failed:
@@ -3448,8 +3473,13 @@ qla2x00_shutdown(struct pci_dev *pdev)
if (ha->eft)
qla2x00_disable_eft_trace(vha);
- /* Stop currently executing firmware. */
- qla2x00_try_to_stop_firmware(vha);
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(vha);
+ } else {
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+ }
/* Turn adapter off line */
vha->flags.online = 0;
@@ -3609,6 +3639,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
+ vfree(base_vha->scan.l);
+
if (IS_QLAFX00(ha))
qlafx00_driver_shutdown(base_vha, 20);
@@ -3628,10 +3660,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla84xx_put_chip(base_vha);
- /* Laser should be disabled only for ISP2031 */
- if (IS_QLA2031(ha))
- qla83xx_disable_laser(base_vha);
-
/* Disable timer */
if (base_vha->timer_active)
qla2x00_stop_timer(base_vha);
@@ -3692,8 +3720,16 @@ qla2x00_free_device(scsi_qla_host_t *vha)
if (ha->eft)
qla2x00_disable_eft_trace(vha);
- /* Stop currently executing firmware. */
- qla2x00_try_to_stop_firmware(vha);
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(vha);
+ } else {
+ if (ha->flags.fw_started) {
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+ ha->flags.fw_started = 0;
+ }
+ }
vha->flags.online = 0;
@@ -3833,7 +3869,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = 0;
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
continue;
@@ -4221,6 +4257,9 @@ qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
u32 temp;
*ret_cnt = FW_DEF_EXCHANGES_CNT;
+ if (max_cnt > vha->hw->max_exchg)
+ max_cnt = vha->hw->max_exchg;
+
if (qla_ini_mode_enabled(vha)) {
if (ql2xiniexchg > max_cnt)
ql2xiniexchg = max_cnt;
@@ -4250,8 +4289,8 @@ int
qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
{
int rval;
- u16 size, max_cnt;
- u32 temp;
+ u16 size, max_cnt;
+ u32 actual_cnt, totsz;
struct qla_hw_data *ha = vha->hw;
if (!ha->flags.exchoffld_enabled)
@@ -4268,16 +4307,19 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
return rval;
}
- qla2x00_number_of_exch(vha, &temp, max_cnt);
- temp *= size;
+ qla2x00_number_of_exch(vha, &actual_cnt, max_cnt);
+ ql_log(ql_log_info, vha, 0xd014,
+ "Actual exchange offload count: %d.\n", actual_cnt);
+
+ totsz = actual_cnt * size;
- if (temp != ha->exchoffld_size) {
+ if (totsz != ha->exchoffld_size) {
qla2x00_free_exchoffld_buffer(ha);
- ha->exchoffld_size = temp;
+ ha->exchoffld_size = totsz;
ql_log(ql_log_info, vha, 0xd016,
- "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
- max_cnt, size, temp);
+ "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n",
+ max_cnt, actual_cnt, size, totsz);
ql_log(ql_log_info, vha, 0xd017,
"Exchange Buffers requested size = 0x%x\n",
@@ -4288,7 +4330,21 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
if (!ha->exchoffld_buf) {
ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
- "Failed to allocate memory for exchoffld_buf_dma.\n");
+ "Failed to allocate memory for Exchange Offload.\n");
+
+ if (ha->max_exchg >
+ (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) {
+ ha->max_exchg -= REDUCE_EXCHANGES_CNT;
+ } else if (ha->max_exchg >
+ (FW_DEF_EXCHANGES_CNT + 512)) {
+ ha->max_exchg -= 512;
+ } else {
+ ha->flags.exchoffld_enabled = 0;
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
+ "Disabling Exchange offload due to lack of memory\n");
+ }
+ ha->exchoffld_size = 0;
+
return -ENOMEM;
}
}
@@ -4514,6 +4570,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->qp_list);
INIT_LIST_HEAD(&vha->gnl.fcports);
INIT_LIST_HEAD(&vha->nvme_rport_list);
+ INIT_LIST_HEAD(&vha->gpnid_list);
+ INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
@@ -4531,6 +4589,19 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
return NULL;
}
+ /* todo: what about ext login? */
+ vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp);
+ vha->scan.l = vmalloc(vha->scan.size);
+ if (!vha->scan.l) {
+ ql_log(ql_log_fatal, vha, 0xd04a,
+ "Alloc failed for scan database.\n");
+ dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
+ vha->gnl.l, vha->gnl.ldma);
+ scsi_remove_host(vha->host);
+ return NULL;
+ }
+ INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
+
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
"Allocated the host=%p hw=%p vha=%p dev_name=%s",
@@ -4566,15 +4637,18 @@ int
qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
unsigned long flags;
+ bool q = false;
spin_lock_irqsave(&vha->work_lock, flags);
list_add_tail(&e->list, &vha->work_list);
+
+ if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
+ q = true;
+
spin_unlock_irqrestore(&vha->work_lock, flags);
- if (QLA_EARLY_LINKUP(vha->hw))
- schedule_work(&vha->iocb_work);
- else
- qla2xxx_wake_dpc(vha);
+ if (q)
+ queue_work(vha->hw->wq, &vha->iocb_work);
return QLA_SUCCESS;
}
@@ -4623,6 +4697,7 @@ int qla2x00_post_async_##name##_work( \
e->u.logio.data[0] = data[0]; \
e->u.logio.data[1] = data[1]; \
} \
+ fcport->flags |= FCF_ASYNC_ACTIVE; \
return qla2x00_post_work(vha, e); \
}
@@ -4631,6 +4706,8 @@ qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
+qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
+qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
int
qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
@@ -4699,6 +4776,11 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
struct qlt_plogi_ack_t *pla =
(struct qlt_plogi_ack_t *)e->u.new_sess.pla;
uint8_t free_fcport = 0;
+ u64 wwn;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC enter\n",
+ __func__, __LINE__, e->u.new_sess.port_name);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
@@ -4706,6 +4788,9 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
if (pla) {
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ memcpy(fcport->node_name,
+ pla->iocb.u.isp24.u.plogi.node_name,
+ WWN_SIZE);
qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
/* we took an extra ref_count to prevent PLOGI ACK when
* fcport/sess has not been created.
@@ -4717,9 +4802,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (fcport) {
fcport->d_id = e->u.new_sess.id;
- fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI)
+ fcport->fc4_type = FC4_TYPE_FCP_SCSI;
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
@@ -4734,7 +4820,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
}
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- /* search again to make sure one else got ahead */
+ /* search again to make sure no one else got ahead */
tfcp = qla2x00_find_fcport_by_wwpn(vha,
e->u.new_sess.port_name, 1);
if (tfcp) {
@@ -4748,20 +4834,82 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
} else {
list_add_tail(&fcport->list, &vha->vp_fcports);
- if (pla) {
- qlt_plogi_ack_link(vha, pla, fcport,
- QLT_PLOGI_LINK_SAME_WWN);
- pla->ref_count--;
- }
+ }
+ if (pla) {
+ qlt_plogi_ack_link(vha, pla, fcport,
+ QLT_PLOGI_LINK_SAME_WWN);
+ pla->ref_count--;
}
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
if (fcport) {
- if (pla)
+ if (N2N_TOPO(vha->hw))
+ fcport->flags &= ~FCF_FABRIC_DEVICE;
+
+ fcport->id_changed = 1;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
+
+ if (pla) {
+ if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
+ u16 wd3_lo;
+
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ fcport->local = 0;
+ fcport->loop_id =
+ le16_to_cpu(
+ pla->iocb.u.isp24.nport_handle);
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ wd3_lo =
+ le16_to_cpu(
+ pla->iocb.u.isp24.u.prli.wd3_lo);
+
+ if (wd3_lo & BIT_7)
+ fcport->conf_compl_supported = 1;
+
+ if ((wd3_lo & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+ }
qlt_plogi_ack_unref(vha, pla);
- else
- qla24xx_async_gffid(vha, fcport);
+ } else {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ tfcp = qla2x00_find_fcport_by_nportid(vha,
+ &e->u.new_sess.id, 1);
+ if (tfcp && (tfcp != fcport)) {
+ /*
+ * We have a conflict fcport with same NportID.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC found conflict b4 add. DS %d LS %d\n",
+ __func__, tfcp->port_name, tfcp->disc_state,
+ tfcp->fw_login_state);
+
+ switch (tfcp->disc_state) {
+ case DSC_DELETED:
+ break;
+ case DSC_DELETE_PEND:
+ fcport->login_pause = 1;
+ tfcp->conflict = fcport;
+ break;
+ default:
+ fcport->login_pause = 1;
+ tfcp->conflict = fcport;
+ qlt_schedule_sess_for_deletion(tfcp);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ wwn = wwn_to_u64(fcport->node_name);
+
+ if (!wwn)
+ qla24xx_async_gnnid(vha, fcport);
+ else
+ qla24xx_async_gnl(vha, fcport);
+ }
}
if (free_fcport) {
@@ -4771,6 +4919,20 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
}
}
+static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+ struct srb *sp = e->u.iosb.sp;
+ int rval;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "%s: %s: Re-issue IOCB failed (%d).\n",
+ __func__, sp->name, rval);
+ qla24xx_sp_unmap(vha, sp);
+ }
+}
+
void
qla2x00_do_work(struct scsi_qla_host *vha)
{
@@ -4824,8 +4986,11 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_GPNID:
qla24xx_async_gpnid(vha, &e->u.gpnid.id);
break;
- case QLA_EVT_GPNID_DONE:
- qla24xx_async_gpnid_done(vha, e->u.iosb.sp);
+ case QLA_EVT_UNMAP:
+ qla24xx_sp_unmap(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_RELOGIN:
+ qla2x00_relogin(vha);
break;
case QLA_EVT_NEW_SESS:
qla24xx_create_new_sess(vha, e);
@@ -4849,6 +5014,30 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_NACK:
qla24xx_do_nack_work(vha, e);
break;
+ case QLA_EVT_ASYNC_PRLO:
+ qla2x00_async_prlo(vha, e->u.logio.fcport);
+ break;
+ case QLA_EVT_ASYNC_PRLO_DONE:
+ qla2x00_async_prlo_done(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_GPNFT:
+ qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type);
+ break;
+ case QLA_EVT_GPNFT_DONE:
+ qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_GNNFT_DONE:
+ qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_GNNID:
+ qla24xx_async_gnnid(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_GFPNID:
+ qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_SP_RETRY:
+ qla_sp_retry(vha, e);
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@@ -4858,6 +5047,20 @@ qla2x00_do_work(struct scsi_qla_host *vha)
}
}
+int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
+
+ if (!e) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ return qla2x00_post_work(vha, e);
+}
+
/* Relogins all the fcports of a vport
* Context: dpc thread
*/
@@ -4868,14 +5071,14 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
struct event_arg ea;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- /*
- * If the port is not ONLINE then try to login
- * to it if we haven't run out of retries.
- */
+ /*
+ * If the port is not ONLINE then try to login
+ * to it if we haven't run out of retries.
+ */
if (atomic_read(&fcport->state) != FCS_ONLINE &&
- fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
- fcport->login_retry--;
- if (fcport->flags & FCF_FABRIC_DEVICE) {
+ fcport->login_retry &&
+ !(fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE))) {
+ if (vha->hw->current_topology != ISP_CFG_NL) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
"%s %8phC DS %d LS %d\n", __func__,
fcport->port_name, fcport->disc_state,
@@ -4884,7 +5087,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
ea.event = FCME_RELOGIN;
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
- } else {
+ } else if (vha->hw->current_topology == ISP_CFG_NL) {
+ fcport->login_retry--;
status = qla2x00_local_device_login(vha,
fcport);
if (status == QLA_SUCCESS) {
@@ -4912,6 +5116,9 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
}
+
+ ql_dbg(ql_dbg_disc, vha, 0x400e,
+ "Relogin end.\n");
}
/* Schedule work on any of the dpc-workqueues */
@@ -5687,8 +5894,6 @@ qla2x00_do_dpc(void *data)
if (test_bit(UNLOADING, &base_vha->dpc_flags))
break;
- qla2x00_do_work(base_vha);
-
if (IS_P3P_TYPE(ha)) {
if (IS_QLA8044(ha)) {
if (test_and_clear_bit(ISP_UNRECOVERABLE,
@@ -5867,16 +6072,19 @@ qla2x00_do_dpc(void *data)
}
/* Retry each device up to login retry count */
- if ((test_and_clear_bit(RELOGIN_NEEDED,
- &base_vha->dpc_flags)) &&
+ if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
!test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
- ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
- "Relogin scheduled.\n");
- qla2x00_relogin(base_vha);
- ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
- "Relogin end.\n");
+ if (!base_vha->relogin_jif ||
+ time_after_eq(jiffies, base_vha->relogin_jif)) {
+ base_vha->relogin_jif = jiffies + HZ;
+ clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
+
+ ql_dbg(ql_dbg_disc, base_vha, 0x400d,
+ "Relogin scheduled.\n");
+ qla24xx_post_relogin_work(base_vha);
+ }
}
loop_resync_check:
if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
@@ -6135,8 +6343,17 @@ qla2x00_timer(struct timer_list *t)
}
/* Process any deferred work. */
- if (!list_empty(&vha->work_list))
- start_dpc++;
+ if (!list_empty(&vha->work_list)) {
+ unsigned long flags;
+ bool q = false;
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
+ q = true;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ if (q)
+ queue_work(vha->hw->wq, &vha->iocb_work);
+ }
/*
* FC-NVME
@@ -6580,37 +6797,16 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
ha->flags.eeh_busy = 0;
}
-static void
-qla83xx_disable_laser(scsi_qla_host_t *vha)
-{
- uint32_t reg, data, fn;
- struct qla_hw_data *ha = vha->hw;
- struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
-
- /* pci func #/port # */
- ql_dbg(ql_dbg_init, vha, 0x004b,
- "Disabling Laser for hba: %p\n", vha);
-
- fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
- (BIT_15|BIT_14|BIT_13|BIT_12));
-
- fn = (fn >> 12);
-
- if (fn & 1)
- reg = PORT_1_2031;
- else
- reg = PORT_0_2031;
-
- data = LASER_OFF_2031;
-
- qla83xx_wr_reg(vha, reg, data);
-}
-
static int qla2xxx_map_queues(struct Scsi_Host *shost)
{
+ int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
- return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+ if (USER_CTRL_IRQ(vha->hw))
+ rc = blk_mq_map_queues(&shost->tag_set);
+ else
+ rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+ return rc;
}
static const struct pci_error_handlers qla2xxx_err_handler = {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index b4336e0cd85f..d2db86ea06b2 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2461,6 +2461,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
sec_mask = 0x1e000;
break;
}
+ /* fall through */
default:
/* Default to 16 kb sector size. */
rest_addr = 0x3fff;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 18069edd4773..fc89af8fe256 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -75,7 +75,8 @@ MODULE_PARM_DESC(ql2xuctrlirq,
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
-static int temp_sam_status = SAM_STAT_BUSY;
+static int qla_sam_status = SAM_STAT_BUSY;
+static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
/*
* From scsi/fc/fc_fcp.h
@@ -208,7 +209,7 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
host = btree_lookup32(&vha->hw->tgt.host_map, key);
if (!host)
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+ ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
"Unable to find host %06x\n", key);
return host;
@@ -309,17 +310,17 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
if (host != NULL) {
- ql_dbg(ql_dbg_async, vha, 0x502f,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
"Requeuing unknown ATIO_TYPE7 %p\n", u);
qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
} else if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_async, vha, 0x503a,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
"Freeing unknown %s %p, because tgt is being stopped\n",
"ATIO_TYPE7", u);
qlt_send_term_exchange(vha->hw->base_qpair, NULL,
&u->atio, ha_locked, 0);
} else {
- ql_dbg(ql_dbg_async, vha, 0x503d,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
"Reschedule u %p, vha %p, host %p\n", u, vha, host);
if (!queued) {
queued = 1;
@@ -450,6 +451,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe073,
"qla_target(%d):%s: CRC2 Response pkt\n",
vha->vp_idx, __func__);
+ /* fall through */
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
@@ -606,7 +608,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
__func__, __LINE__,
sp->fcport->port_name,
vha->fcport_count);
-
+ sp->fcport->disc_state = DSC_UPD_FCPORT;
qla24xx_post_upd_fcport_work(vha, sp->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20f5,
@@ -665,7 +667,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
sp->u.iocb_cmd.u.nack.ntfy = ntfy;
-
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_nack_sp_done;
rval = qla2x00_start_sp(sp);
@@ -861,7 +863,10 @@ void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
fcport->loop_id = loop_id;
fcport->d_id = port_id;
- qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI)
+ qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+ else
+ qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
@@ -890,6 +895,17 @@ qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
pla->ref_count, pla, link);
+ if (link == QLT_PLOGI_LINK_CONFLICT) {
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ pla->ref_count--;
+ return;
+ default:
+ break;
+ }
+ }
+
if (sess->plogi_link[link])
qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
@@ -954,8 +970,9 @@ static void qlt_free_session_done(struct work_struct *work)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
bool logout_started = false;
- struct event_arg ea;
scsi_qla_host_t *base_vha;
+ struct qlt_plogi_ack_t *own =
+ sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
@@ -971,19 +988,35 @@ static void qlt_free_session_done(struct work_struct *work)
logo.id = sess->d_id;
logo.cmd_count = 0;
+ sess->send_els_logo = 0;
qlt_send_first_logo(vha, &logo);
}
- if (sess->logout_on_delete) {
+ if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
int rc;
- rc = qla2x00_post_async_logout_work(vha, sess, NULL);
- if (rc != QLA_SUCCESS)
- ql_log(ql_log_warn, vha, 0xf085,
- "Schedule logo failed sess %p rc %d\n",
- sess, rc);
- else
- logout_started = true;
+ if (!own ||
+ (own &&
+ (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ rc = qla2x00_post_async_logout_work(vha, sess,
+ NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule logo failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ } else if (own && (own->iocb.u.isp24.status_subcode ==
+ ELS_PRLI) && ha->flags.rida_fmt2) {
+ rc = qla2x00_post_async_prlo_work(vha, sess,
+ NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule PRLO failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ }
}
}
@@ -1007,7 +1040,7 @@ static void qlt_free_session_done(struct work_struct *work)
}
ql_dbg(ql_dbg_disc, vha, 0xf087,
- "%s: sess %p logout completed\n",__func__, sess);
+ "%s: sess %p logout completed\n", __func__, sess);
}
if (sess->logo_ack_needed) {
@@ -1033,8 +1066,7 @@ static void qlt_free_session_done(struct work_struct *work)
sess->login_succ = 0;
}
- if (sess->chip_reset != ha->base_qpair->chip_reset)
- qla2x00_clear_loop_id(sess);
+ qla2x00_clear_loop_id(sess);
if (sess->conflict) {
sess->conflict->login_pause = 0;
@@ -1044,8 +1076,6 @@ static void qlt_free_session_done(struct work_struct *work)
}
{
- struct qlt_plogi_ack_t *own =
- sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
struct qlt_plogi_ack_t *con =
sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
struct imm_ntfy_from_isp *iocb;
@@ -1076,6 +1106,7 @@ static void qlt_free_session_done(struct work_struct *work)
sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
}
}
+
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
@@ -1089,14 +1120,24 @@ static void qlt_free_session_done(struct work_struct *work)
wake_up_all(&vha->fcport_waitQ);
base_vha = pci_get_drvdata(ha->pdev);
+
+ sess->free_pending = 0;
+
if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
return;
- if (!tgt || !tgt->tgt_stop) {
- memset(&ea, 0, sizeof(ea));
- ea.event = FCME_DELETE_DONE;
- ea.fcport = sess;
- qla2x00_fcport_event_handler(vha, &ea);
+ if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
+ switch (vha->host->active_mode) {
+ case MODE_INITIATOR:
+ case MODE_DUAL:
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ break;
+ case MODE_TARGET:
+ default:
+ /* no-op */
+ break;
+ }
}
}
@@ -1104,11 +1145,20 @@ static void qlt_free_session_done(struct work_struct *work)
void qlt_unreg_sess(struct fc_port *sess)
{
struct scsi_qla_host *vha = sess->vha;
+ unsigned long flags;
ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
"%s sess %p for deletion %8phC\n",
__func__, sess, sess->port_name);
+ spin_lock_irqsave(&sess->vha->work_lock, flags);
+ if (sess->free_pending) {
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ return;
+ }
+ sess->free_pending = 1;
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
@@ -1175,10 +1225,10 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
}
/* ha->tgt.sess_lock supposed to be held on entry */
-void qlt_schedule_sess_for_deletion(struct fc_port *sess,
- bool immediate)
+void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
+ unsigned long flags;
if (sess->disc_state == DSC_DELETE_PEND)
return;
@@ -1194,27 +1244,28 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess,
return;
}
- sess->disc_state = DSC_DELETE_PEND;
-
if (sess->deleted == QLA_SESS_DELETED)
sess->logout_on_delete = 0;
+ spin_lock_irqsave(&sess->vha->work_lock, flags);
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ return;
+ }
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+
+ sess->disc_state = DSC_DELETE_PEND;
+
qla24xx_chk_fcp_state(sess);
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
"Scheduling sess %p for deletion\n", sess);
- schedule_work(&sess->del_work);
-}
-
-void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
-{
- unsigned long flags;
- struct qla_hw_data *ha = sess->vha->hw;
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- qlt_schedule_sess_for_deletion(sess, 1);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ /* use cancel to push work element through before re-queue */
+ cancel_work_sync(&sess->del_work);
+ INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
+ queue_work(sess->vha->hw->wq, &sess->del_work);
}
/* ha->tgt.sess_lock supposed to be held on entry */
@@ -1225,7 +1276,7 @@ static void qlt_clear_tgt_db(struct qla_tgt *tgt)
list_for_each_entry(sess, &vha->vp_fcports, list) {
if (sess->se_sess)
- qlt_schedule_sess_for_deletion(sess, 1);
+ qlt_schedule_sess_for_deletion(sess);
}
/* At this point tgt could be already dead */
@@ -1400,7 +1451,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
sess->local = 1;
- qlt_schedule_sess_for_deletion(sess, false);
+ qlt_schedule_sess_for_deletion(sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
@@ -1560,8 +1611,11 @@ static void qlt_release(struct qla_tgt *tgt)
btree_destroy64(&tgt->lun_qpair_map);
- if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target)
- ha->tgt.tgt_ops->remove_target(vha);
+ if (vha->vp_idx)
+ if (ha->tgt.tgt_ops &&
+ ha->tgt.tgt_ops->remove_target &&
+ vha->vha_tgt.target_lport_ptr)
+ ha->tgt.tgt_ops->remove_target(vha);
vha->vha_tgt.qla_tgt = NULL;
@@ -1976,15 +2030,10 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
"qla_target(%d): task abort for non-existant session\n",
vha->vp_idx);
- rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
- QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
-
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- if (rc != 0) {
- qlt_24xx_send_abts_resp(ha->base_qpair, abts,
- FCP_TMF_REJECTED, false);
- }
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -2174,7 +2223,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
"TM response logo %phC status %#x state %#x",
mcmd->sess->port_name, mcmd->fc_tm_rsp,
mcmd->flags);
- qlt_schedule_sess_for_deletion_lock(mcmd->sess);
+ qlt_schedule_sess_for_deletion(mcmd->sess);
} else {
qlt_send_notify_ack(vha->hw->base_qpair,
&mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
@@ -3708,7 +3757,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
term = 1;
if (term)
- qlt_term_ctio_exchange(qpair, ctio, cmd, status);
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
return term;
}
@@ -3869,7 +3918,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
"%s %d %8phC post del sess\n",
__func__, __LINE__, cmd->sess->port_name);
- qlt_schedule_sess_for_deletion_lock(cmd->sess);
+ qlt_schedule_sess_for_deletion(cmd->sess);
}
break;
}
@@ -4204,76 +4253,6 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
return cmd;
}
-static void qlt_create_sess_from_atio(struct work_struct *work)
-{
- struct qla_tgt_sess_op *op = container_of(work,
- struct qla_tgt_sess_op, work);
- scsi_qla_host_t *vha = op->vha;
- struct qla_hw_data *ha = vha->hw;
- struct fc_port *sess;
- struct qla_tgt_cmd *cmd;
- unsigned long flags;
- uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
-
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_del(&op->cmd_list);
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
-
- if (op->aborted) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
- "sess_op with tag %u is aborted\n",
- op->atio.u.isp24.exchange_addr);
- goto out_term;
- }
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
- "qla_target(%d): Unable to find wwn login"
- " (s_id %x:%x:%x), trying to create it manually\n",
- vha->vp_idx, s_id[0], s_id[1], s_id[2]);
-
- if (op->atio.u.raw.entry_count > 1) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
- "Dropping multy entry atio %p\n", &op->atio);
- goto out_term;
- }
-
- sess = qlt_make_local_sess(vha, s_id);
- /* sess has an extra creation ref. */
-
- if (!sess)
- goto out_term;
- /*
- * Now obtain a pre-allocated session tag using the original op->atio
- * packet header, and dispatch into __qlt_do_work() using the existing
- * process context.
- */
- cmd = qlt_get_tag(vha, sess, &op->atio);
- if (!cmd) {
- struct qla_qpair *qpair = ha->base_qpair;
-
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- qlt_send_busy(qpair, &op->atio, SAM_STAT_BUSY);
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- kfree(op);
- return;
- }
-
- /*
- * __qlt_do_work() will call qlt_put_sess() to release
- * the extra reference taken above by qlt_make_local_sess()
- */
- __qlt_do_work(cmd);
- kfree(op);
- return;
-out_term:
- qlt_send_term_exchange(vha->hw->base_qpair, NULL, &op->atio, 0, 0);
- kfree(op);
-}
-
/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
@@ -4283,31 +4262,23 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct fc_port *sess;
struct qla_tgt_cmd *cmd;
unsigned long flags;
+ port_id_t id;
if (unlikely(tgt->tgt_stop)) {
ql_dbg(ql_dbg_io, vha, 0x3061,
"New command while device %p is shutting down\n", tgt);
- return -EFAULT;
+ return -ENODEV;
}
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
- if (unlikely(!sess)) {
- struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
- GFP_ATOMIC);
- if (!op)
- return -ENOMEM;
-
- memcpy(&op->atio, atio, sizeof(*atio));
- op->vha = vha;
-
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+ id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
+ id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
+ id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
+ if (IS_SW_RESV_ADDR(id))
+ return -EBUSY;
- INIT_WORK(&op->work, qlt_create_sess_from_atio);
- queue_work(qla_tgt_wq, &op->work);
- return 0;
- }
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
+ if (unlikely(!sess))
+ return -EFAULT;
/* Another WWN used to have our s_id. Our PLOGI scheduled its
* session deletion, but it's still in sess_del_work wq */
@@ -4336,7 +4307,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- return -ENOMEM;
+ return -EBUSY;
}
cmd->cmd_in_wq = 1;
@@ -4417,14 +4388,11 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
{
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt;
struct fc_port *sess;
u64 unpacked_lun;
int fn;
unsigned long flags;
- tgt = vha->vha_tgt.qla_tgt;
-
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4435,15 +4403,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
unpacked_lun =
scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
- if (!sess) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
- "qla_target(%d): task mgmt fn 0x%x for "
- "non-existant session\n", vha->vp_idx, fn);
- return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
- sizeof(struct atio_from_isp));
- }
-
- if (sess->deleted)
+ if (sess == NULL || sess->deleted)
return -EFAULT;
return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
@@ -4574,7 +4534,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
* might have cleared it when requested this session
* deletion, so don't touch it
*/
- qlt_schedule_sess_for_deletion(other_sess, true);
+ qlt_schedule_sess_for_deletion(other_sess);
} else {
/*
* Another wwn used to have our s_id/loop_id
@@ -4584,11 +4544,10 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
-
other_sess->keep_nport_handle = 1;
- *conflict_sess = other_sess;
- qlt_schedule_sess_for_deletion(other_sess,
- true);
+ if (other_sess->disc_state != DSC_DELETED)
+ *conflict_sess = other_sess;
+ qlt_schedule_sess_for_deletion(other_sess);
}
continue;
}
@@ -4602,7 +4561,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
/* Same loop_id but different s_id
* Ok to kill and logout */
- qlt_schedule_sess_for_deletion(other_sess, true);
+ qlt_schedule_sess_for_deletion(other_sess);
}
}
@@ -4652,6 +4611,138 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
return count;
}
+static int qlt_handle_login(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct fc_port *sess = NULL, *conflict_sess = NULL;
+ uint64_t wwn;
+ port_id_t port_id;
+ uint16_t loop_id, wd3_lo;
+ int res = 0;
+ struct qlt_plogi_ack_t *pla;
+ unsigned long flags;
+
+ wwn = wwn_to_u64(iocb->u.isp24.port_name);
+
+ port_id.b.domain = iocb->u.isp24.port_id[2];
+ port_id.b.area = iocb->u.isp24.port_id[1];
+ port_id.b.al_pa = iocb->u.isp24.port_id[0];
+ port_id.b.rsvd_1 = 0;
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
+ /* Mark all stale commands sitting in qla_tgt_wq for deletion */
+ abort_cmds_for_s_id(vha, &port_id);
+
+ if (wwn) {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ sess = qlt_find_sess_invalidate_other(vha, wwn,
+ port_id, loop_id, &conflict_sess);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ }
+
+ if (IS_SW_RESV_ADDR(port_id)) {
+ res = 1;
+ goto out;
+ }
+
+ pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
+ if (!pla) {
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+
+ if (conflict_sess) {
+ conflict_sess->login_gen++;
+ qlt_plogi_ack_link(vha, pla, conflict_sess,
+ QLT_PLOGI_LINK_CONFLICT);
+ }
+
+ if (!sess) {
+ pla->ref_count++;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, iocb->u.isp24.port_name);
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI)
+ qla24xx_post_newsess_work(vha, &port_id,
+ iocb->u.isp24.port_name,
+ iocb->u.isp24.u.plogi.node_name,
+ pla, FC4_TYPE_UNKNOWN);
+ else
+ qla24xx_post_newsess_work(vha, &port_id,
+ iocb->u.isp24.port_name, NULL,
+ pla, FC4_TYPE_UNKNOWN);
+
+ goto out;
+ }
+
+ qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
+ sess->d_id = port_id;
+ sess->login_gen++;
+
+ if (iocb->u.isp24.status_subcode == ELS_PRLI) {
+ sess->fw_login_state = DSC_LS_PRLI_PEND;
+ sess->local = 0;
+ sess->loop_id = loop_id;
+ sess->d_id = port_id;
+ sess->fw_login_state = DSC_LS_PRLI_PEND;
+ wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
+
+ if (wd3_lo & BIT_7)
+ sess->conf_compl_supported = 1;
+
+ if ((wd3_lo & BIT_4) == 0)
+ sess->port_type = FCT_INITIATOR;
+ else
+ sess->port_type = FCT_TARGET;
+
+ } else
+ sess->fw_login_state = DSC_LS_PLOGI_PEND;
+
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f9,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__, sess->port_name, sess->disc_state);
+
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ qlt_plogi_ack_unref(vha, pla);
+ break;
+
+ default:
+ /*
+ * Under normal circumstances we want to release nport handle
+ * during LOGO process to avoid nport handle leaks inside FW.
+ * The exception is when LOGO is done while another PLOGI with
+ * the same nport handle is waiting as might be the case here.
+ * Note: there is always a possibily of a race where session
+ * deletion has already started for other reasons (e.g. ACL
+ * removal) and now PLOGI arrives:
+ * 1. if PLOGI arrived in FW after nport handle has been freed,
+ * FW must have assigned this PLOGI a new/same handle and we
+ * can proceed ACK'ing it as usual when session deletion
+ * completes.
+ * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+ * bit reached it, the handle has now been released. We'll
+ * get an error when we ACK this PLOGI. Nothing will be sent
+ * back to initiator. Initiator should eventually retry
+ * PLOGI and situation will correct itself.
+ */
+ sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+ (sess->d_id.b24 == port_id.b24));
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f9,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, sess->port_name);
+
+
+ qlt_schedule_sess_for_deletion(sess);
+ break;
+ }
+out:
+ return res;
+}
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -4666,7 +4757,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
uint16_t loop_id;
uint16_t wd3_lo;
int res = 0;
- struct qlt_plogi_ack_t *pla;
unsigned long flags;
wwn = wwn_to_u64(iocb->u.isp24.port_name);
@@ -4690,88 +4780,32 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
*/
switch (iocb->u.isp24.status_subcode) {
case ELS_PLOGI:
+ res = qlt_handle_login(vha, iocb);
+ break;
- /* Mark all stale commands in qla_tgt_wq for deletion */
- abort_cmds_for_s_id(vha, &port_id);
-
- if (wwn) {
- spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
- sess = qlt_find_sess_invalidate_other(vha, wwn,
- port_id, loop_id, &conflict_sess);
- spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
- }
-
- if (IS_SW_RESV_ADDR(port_id)) {
- res = 1;
- break;
- }
-
- pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
- if (!pla) {
- qlt_send_term_imm_notif(vha, iocb, 1);
- break;
- }
-
- res = 0;
+ case ELS_PRLI:
+ if (N2N_TOPO(ha)) {
+ sess = qla2x00_find_fcport_by_wwpn(vha,
+ iocb->u.isp24.port_name, 1);
- if (conflict_sess) {
- conflict_sess->login_gen++;
- qlt_plogi_ack_link(vha, pla, conflict_sess,
- QLT_PLOGI_LINK_CONFLICT);
- }
+ if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
+ __func__, __LINE__,
+ iocb->u.isp24.port_name);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ break;
+ }
- if (!sess) {
- pla->ref_count++;
- qla24xx_post_newsess_work(vha, &port_id,
- iocb->u.isp24.port_name, pla);
- res = 0;
+ res = qlt_handle_login(vha, iocb);
break;
}
- qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
- sess->fw_login_state = DSC_LS_PLOGI_PEND;
- sess->d_id = port_id;
- sess->login_gen++;
-
- switch (sess->disc_state) {
- case DSC_DELETED:
- qlt_plogi_ack_unref(vha, pla);
- break;
-
- default:
- /*
- * Under normal circumstances we want to release nport handle
- * during LOGO process to avoid nport handle leaks inside FW.
- * The exception is when LOGO is done while another PLOGI with
- * the same nport handle is waiting as might be the case here.
- * Note: there is always a possibily of a race where session
- * deletion has already started for other reasons (e.g. ACL
- * removal) and now PLOGI arrives:
- * 1. if PLOGI arrived in FW after nport handle has been freed,
- * FW must have assigned this PLOGI a new/same handle and we
- * can proceed ACK'ing it as usual when session deletion
- * completes.
- * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
- * bit reached it, the handle has now been released. We'll
- * get an error when we ACK this PLOGI. Nothing will be sent
- * back to initiator. Initiator should eventually retry
- * PLOGI and situation will correct itself.
- */
- sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
- (sess->d_id.b24 == port_id.b24));
-
- ql_dbg(ql_dbg_disc, vha, 0x20f9,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, sess->port_name);
-
-
- qlt_schedule_sess_for_deletion_lock(sess);
+ if (IS_SW_RESV_ADDR(port_id)) {
+ res = 1;
break;
}
- break;
-
- case ELS_PRLI:
wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
if (wwn) {
@@ -4782,17 +4816,51 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
if (conflict_sess) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
- "PRLI with conflicting sess %p port %8phC\n",
- conflict_sess, conflict_sess->port_name);
- qlt_send_term_imm_notif(vha, iocb, 1);
- res = 0;
- break;
+ switch (conflict_sess->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
+ "PRLI with conflicting sess %p port %8phC\n",
+ conflict_sess, conflict_sess->port_name);
+ conflict_sess->fw_login_state =
+ DSC_LS_PORT_UNAVAIL;
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ res = 0;
+ break;
+ }
}
if (sess != NULL) {
- if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
- sess->fw_login_state != DSC_LS_PLOGI_COMP) {
+ bool delete = false;
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
+ switch (sess->fw_login_state) {
+ case DSC_LS_PLOGI_PEND:
+ case DSC_LS_PLOGI_COMP:
+ case DSC_LS_PRLI_COMP:
+ break;
+ default:
+ delete = true;
+ break;
+ }
+
+ switch (sess->disc_state) {
+ case DSC_LOGIN_PEND:
+ case DSC_GPDB:
+ case DSC_GPSC:
+ case DSC_UPD_FCPORT:
+ case DSC_LOGIN_COMPLETE:
+ case DSC_ADISC:
+ delete = false;
+ break;
+ default:
+ break;
+ }
+
+ if (delete) {
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
+ flags);
/*
* Impatient initiator sent PRLI before last
* PLOGI could finish. Will force him to re-try,
@@ -4803,6 +4871,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess);
qlt_send_term_imm_notif(vha, iocb, 1);
res = 0;
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock,
+ flags);
break;
}
@@ -4826,6 +4896,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess->port_type = FCT_INITIATOR;
else
sess->port_type = FCT_TARGET;
+
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
}
res = 1; /* send notify ack */
@@ -4863,7 +4935,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
res = 1;
break;
}
- /* drop through */
+ /* fall through */
case ELS_LOGO:
case ELS_PRLO:
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4892,7 +4964,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
} else {
/* cmd did not go to upper layer. */
if (sess) {
- qlt_schedule_sess_for_deletion_lock(sess);
+ qlt_schedule_sess_for_deletion(sess);
res = 0;
}
/* else logo will be ack */
@@ -4930,6 +5002,10 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
break;
}
+ ql_dbg(ql_dbg_disc, vha, 0xf026,
+ "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
+ vha->vp_idx, iocb->u.isp24.status_subcode, res);
+
return res;
}
@@ -5320,7 +5396,6 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
struct atio_from_isp *atio, uint8_t ha_locked)
{
struct qla_hw_data *ha = vha->hw;
- uint16_t status;
unsigned long flags;
if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
@@ -5328,8 +5403,7 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
- status = temp_sam_status;
- qlt_send_busy(qpair, atio, status);
+ qlt_send_busy(qpair, atio, qla_sam_status);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -5344,7 +5418,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int rc;
- unsigned long flags;
+ unsigned long flags = 0;
if (unlikely(tgt == NULL)) {
ql_dbg(ql_dbg_tgt, vha, 0x3064,
@@ -5368,8 +5442,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
"sending QUEUE_FULL\n", vha->vp_idx);
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_busy(ha->base_qpair, atio,
- SAM_STAT_TASK_SET_FULL);
+ qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
@@ -5388,42 +5461,37 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
rc = qlt_handle_task_mgmt(vha, atio);
}
if (unlikely(rc != 0)) {
- if (rc == -ESRCH) {
- if (!ha_locked)
- spin_lock_irqsave(&ha->hardware_lock,
- flags);
-
-#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(ha->base_qpair, atio,
- SAM_STAT_BUSY);
-#else
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ switch (rc) {
+ case -ENODEV:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target\n");
+ break;
+ case -EBADF:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
qlt_send_term_exchange(ha->base_qpair, NULL,
atio, 1, 0);
-#endif
- if (!ha_locked)
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- } else {
- if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_tgt, vha, 0xe059,
- "qla_target: Unable to send "
- "command to target for req, "
- "ignoring.\n");
- } else {
- ql_dbg(ql_dbg_tgt, vha, 0xe05a,
- "qla_target(%d): Unable to send "
- "command to target, sending BUSY "
- "status.\n", vha->vp_idx);
- if (!ha_locked)
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- qlt_send_busy(ha->base_qpair,
- atio, SAM_STAT_BUSY);
- if (!ha_locked)
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- }
+ break;
+ case -EBUSY:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(ha->base_qpair, atio,
+ tc_sam_status);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(ha->base_qpair, atio,
+ qla_sam_status);
+ break;
}
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
}
break;
@@ -5506,27 +5574,31 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
rc = qlt_handle_cmd_for_atio(vha, atio);
if (unlikely(rc != 0)) {
- if (rc == -ESRCH) {
-#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(rsp->qpair, atio, 0);
-#else
- qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0);
-#endif
- } else {
- if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_tgt, vha, 0xe05f,
- "qla_target: Unable to send "
- "command to target, sending TERM "
- "EXCHANGE for rsp\n");
- qlt_send_term_exchange(rsp->qpair, NULL,
- atio, 1, 0);
- } else {
- ql_dbg(ql_dbg_tgt, vha, 0xe060,
- "qla_target(%d): Unable to send "
- "command to target, sending BUSY "
- "status\n", vha->vp_idx);
- qlt_send_busy(rsp->qpair, atio, 0);
- }
+ switch (rc) {
+ case -ENODEV:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target\n");
+ break;
+ case -EBADF:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
+ qlt_send_term_exchange(rsp->qpair, NULL,
+ atio, 1, 0);
+ break;
+ case -EBUSY:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(rsp->qpair, atio,
+ tc_sam_status);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(rsp->qpair, atio,
+ qla_sam_status);
+ break;
}
}
}
@@ -5755,7 +5827,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
unsigned long flags;
u8 newfcport = 0;
- fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
"qla_target(%d): Allocation of tmp FC port failed",
@@ -5784,6 +5856,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
tfcp->port_type = fcport->port_type;
tfcp->supported_classes = fcport->supported_classes;
tfcp->flags |= fcport->flags;
+ tfcp->scan_state = QLA_FCPORT_FOUND;
del = fcport;
fcport = tfcp;
@@ -6445,18 +6518,21 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
qlt_add_target(ha, vha);
}
-void
-qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+u8
+qlt_rff_id(struct scsi_qla_host *vha)
{
+ u8 fc4_feature = 0;
/*
* FC-4 Feature bit 0 indicates target functionality to the name server.
*/
if (qla_tgt_mode_enabled(vha)) {
- ct_req->req.rff_id.fc4_feature = BIT_0;
+ fc4_feature = BIT_0;
} else if (qla_ini_mode_enabled(vha)) {
- ct_req->req.rff_id.fc4_feature = BIT_1;
+ fc4_feature = BIT_1;
} else if (qla_dual_mode_enabled(vha))
- ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+ fc4_feature = BIT_0 | BIT_1;
+
+ return fc4_feature;
}
/*
@@ -6546,7 +6622,9 @@ void
qlt_24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb;
+ struct qla_msix_entry *msix = &ha->msix_entries[2];
+ struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6554,19 +6632,28 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
- icb = (struct init_cb_24xx *)ha->init_cb;
-
- if ((ql2xenablemsix != 0) && IS_ATIO_MSIX_CAPABLE(ha)) {
- struct qla_msix_entry *msix = &ha->msix_entries[2];
-
- icb->msix_atio = cpu_to_le16(msix->entry);
- ql_dbg(ql_dbg_init, vha, 0xf072,
- "Registering ICB vector 0x%x for atio que.\n",
- msix->entry);
- } else if (ql2xenablemsix == 0) {
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
- ql_dbg(ql_dbg_init, vha, 0xf07f,
- "Registering INTx vector for ATIO.\n");
+ if (ha->flags.msix_enabled) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA2071(ha)) {
+ /* 4 ports Baker: Enable Interrupt Handshake */
+ icb->msix_atio = 0;
+ icb->firmware_options_2 |= BIT_26;
+ } else {
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= ~BIT_26;
+ }
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "Registering ICB vector 0x%x for atio que.\n",
+ msix->entry);
+ }
+ } else {
+ /* INTx|MSI */
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ icb->msix_atio = 0;
+ icb->firmware_options_2 |= BIT_26;
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "%s: Use INTx for ATIOQ.\n", __func__);
+ }
}
}
@@ -6574,6 +6661,7 @@ void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6625,6 +6713,14 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
+
+ if (IS_QLA25XX(ha)) {
+ /* Change Loop-prefer to Pt-Pt */
+ tmp = ~(BIT_4|BIT_5|BIT_6);
+ nv->firmware_options_2 &= cpu_to_le32(tmp);
+ tmp = P2P << 4;
+ nv->firmware_options_2 |= cpu_to_le32(tmp);
+ }
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6679,6 +6775,7 @@ void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6729,6 +6826,12 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
nv->host_p &= cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
+
+ /* Change Loop-prefer to Pt-Pt */
+ tmp = ~(BIT_4|BIT_5|BIT_6);
+ nv->firmware_options_2 &= cpu_to_le32(tmp);
+ tmp = P2P << 4;
+ nv->firmware_options_2 |= cpu_to_le32(tmp);
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6991,20 +7094,14 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
{
- unsigned long flags;
- struct qla_hw_data *ha = vha->hw;
if (!vha->d_id.b24) {
- spin_lock_irqsave(&ha->vport_slock, flags);
vha->d_id = id;
qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
} else if (vha->d_id.b24 != id.b24) {
- spin_lock_irqsave(&ha->vport_slock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
vha->d_id = id;
qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index aba58d3848a6..bb67b5a284a8 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -993,7 +993,7 @@ struct qla_tgt_prm {
/* Check for Switch reserved address */
#define IS_SW_RESV_ADDR(_s_id) \
- ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
+ ((_s_id.b.domain == 0xff) && ((_s_id.b.area & 0xf0) == 0xf0))
#define QLA_TGT_XMIT_DATA 1
#define QLA_TGT_XMIT_STATUS 2
@@ -1072,7 +1072,7 @@ extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
extern void qlt_enable_vha(struct scsi_qla_host *);
extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
-extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern u8 qlt_rff_id(struct scsi_qla_host *);
extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t);
extern void qlt_24xx_config_rings(struct scsi_qla_host *);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 733e8dcccf5c..731ca0d8520a 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -526,7 +526,8 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
{
ql_dbg(ql_dbg_misc, vha, 0xd20c,
"%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
- if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
+ switch (ent->t268.buf_type) {
+ case T268_BUF_TYPE_EXTD_TRACE:
if (vha->hw->eft) {
if (buf) {
ent->t268.buf_size = EFT_SIZE;
@@ -538,10 +539,43 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
"%s: missing eft\n", __func__);
qla27xx_skip_entry(ent, buf);
}
- } else {
- ql_dbg(ql_dbg_misc, vha, 0xd02b,
+ break;
+ case T268_BUF_TYPE_EXCH_BUFOFF:
+ if (vha->hw->exchoffld_buf) {
+ if (buf) {
+ ent->t268.buf_size = vha->hw->exchoffld_size;
+ ent->t268.start_addr =
+ vha->hw->exchoffld_buf_dma;
+ }
+ qla27xx_insertbuf(vha->hw->exchoffld_buf,
+ vha->hw->exchoffld_size, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing exch offld\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ break;
+ case T268_BUF_TYPE_EXTD_LOGIN:
+ if (vha->hw->exlogin_buf) {
+ if (buf) {
+ ent->t268.buf_size = vha->hw->exlogin_size;
+ ent->t268.start_addr =
+ vha->hw->exlogin_buf_dma;
+ }
+ qla27xx_insertbuf(vha->hw->exlogin_buf,
+ vha->hw->exlogin_size, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing ext login\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_async, vha, 0xd02b,
"%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
qla27xx_skip_entry(ent, buf);
+ break;
}
return false;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index b6ec02b96d3d..549bef9afddd 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.02-k"
+#define QLA2XXX_VERSION "10.00.00.05-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3f82ea1b72dc..aadfeaac3898 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1635,16 +1635,13 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
return rc;
}
- lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
- 65536);
+ lport->lport_loopid_map = vzalloc(sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
if (!lport->lport_loopid_map) {
pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
btree_destroy32(&lport->lport_fcport_map);
return -ENOMEM;
}
- memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
- * 65536);
pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
return 0;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 5d6d158bbfd6..52b1a0bc93c9 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -153,15 +153,14 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
goto exit_get_sys_info_no_free;
}
- memset(sys_info, 0, sizeof(*sys_info));
/* Get flash sys info */
if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO,
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 1da04f323d38..bda2e64ee5ca 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -625,15 +625,14 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
goto exit_init_fw_cb_no_free;
}
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
@@ -710,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
__func__);
@@ -720,7 +719,6 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
}
/* Get Initialize Firmware Control Block. */
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
@@ -1342,16 +1340,15 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- about_fw = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct about_fw_info),
- &about_fw_dma, GFP_KERNEL);
+ about_fw = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
if (!about_fw) {
DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
"for about_fw\n", __func__));
return status;
}
- memset(about_fw, 0, sizeof(struct about_fw_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e91abb327745..968bd85610f8 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4050,15 +4050,14 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
return status;
}
- memset(sys_info, 0, sizeof(*sys_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2b8a8ce2a431..82e889bbe0ed 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2689,16 +2689,15 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
uint32_t rem = len;
struct nlattr *attr;
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (!init_fw_cb) {
ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
__func__);
return -ENOMEM;
}
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
@@ -4196,15 +4195,14 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
sizeof(struct shadow_regs) +
MEM_ALIGN_VALUE +
(PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
- ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
- &ha->queues_dma, GFP_KERNEL);
+ ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len,
+ &ha->queues_dma, GFP_KERNEL);
if (ha->queues == NULL) {
ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed - queues.\n");
goto mem_alloc_error_exit;
}
- memset(ha->queues, 0, ha->queues_len);
/*
* As per RISC alignment requirements -- the bus-address must be a
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 40bc616cf8ab..90349498f686 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -12,7 +12,7 @@
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
- * encouraged once assigned by ANSI/INCITS T10
+ * encouraged once assigned by ANSI/INCITS T10).
*/
static const char *const scsi_device_types[] = {
"Direct-Access ",
@@ -39,7 +39,7 @@ static const char *const scsi_device_types[] = {
};
/**
- * scsi_device_type - Return 17 char string indicating device type.
+ * scsi_device_type - Return 17-char string indicating device type.
* @type: type number to look up
*/
const char *scsi_device_type(unsigned type)
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(scsi_device_type);
* @scsilun: struct scsi_lun to be converted.
*
* Description:
- * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered
+ * Convert @scsilun from a struct scsi_lun to a four-byte host byte-ordered
* integer, and return the result. The caller must check for
* truncation before using this function.
*
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(scsilun_to_int);
* back into the lun value.
*
* Notes:
- * Given an integer : 0x0b03d204, this function returns a
+ * Given an integer : 0x0b03d204, this function returns a
* struct scsi_lun of: d2 04 0b 03 00 00 00 00
*
*/
@@ -221,7 +221,7 @@ EXPORT_SYMBOL(scsi_sense_desc_find);
/**
* scsi_build_sense_buffer - build sense data in a buffer
- * @desc: Sense format (non zero == descriptor format,
+ * @desc: Sense format (non-zero == descriptor format,
* 0 == fixed format)
* @buf: Where to build sense data
* @key: Sense key
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(scsi_build_sense_buffer);
* @info: 64-bit information value to be set
*
* Return value:
- * 0 on success or EINVAL for invalid sense buffer length
+ * 0 on success or -EINVAL for invalid sense buffer length
**/
int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
{
@@ -305,7 +305,7 @@ EXPORT_SYMBOL(scsi_set_sense_information);
* @cd: command/data bit
*
* Return value:
- * 0 on success or EINVAL for invalid sense buffer length
+ * 0 on success or -EINVAL for invalid sense buffer length
*/
int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd)
{
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index e4f037f0f38b..a5986dae9020 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6,7 +6,7 @@
* anything out of the ordinary is seen.
* ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
- * Copyright (C) 2001 - 2016 Douglas Gilbert
+ * Copyright (C) 2001 - 2017 Douglas Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -61,8 +61,8 @@
#include "scsi_logging.h"
/* make sure inq_product_rev string corresponds to this version */
-#define SDEBUG_VERSION "1.86"
-static const char *sdebug_version_date = "20160430";
+#define SDEBUG_VERSION "0187" /* format to fit INQUIRY revision field */
+static const char *sdebug_version_date = "20171202";
#define MY_NAME "scsi_debug"
@@ -93,6 +93,7 @@ static const char *sdebug_version_date = "20160430";
#define MISCOMPARE_VERIFY_ASC 0x1d
#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
+#define WRITE_ERROR_ASC 0xc
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
@@ -105,6 +106,7 @@ static const char *sdebug_version_date = "20160430";
* (id 0) containing 1 logical unit (lun 0). That is 1 device.
*/
#define DEF_ATO 1
+#define DEF_CDB_LEN 10
#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
#define DEF_DEV_SIZE_MB 8
#define DEF_DIF 0
@@ -161,12 +163,14 @@ static const char *sdebug_version_date = "20160430";
#define SDEBUG_OPT_N_WCE 0x1000
#define SDEBUG_OPT_RESET_NOISE 0x2000
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
+#define SDEBUG_OPT_HOST_BUSY 0x8000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_TRANSPORT_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
- SDEBUG_OPT_SHORT_TRANSFER)
+ SDEBUG_OPT_SHORT_TRANSFER | \
+ SDEBUG_OPT_HOST_BUSY)
/* When "every_nth" > 0 then modulo "every_nth" commands:
* - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
@@ -232,7 +236,7 @@ static const char *sdebug_version_date = "20160430";
#define F_M_ACCESS 0x800 /* media access */
#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
-#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
+#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
#define FF_SA (F_SA_HIGH | F_SA_LOW)
#define SDEBUG_MAX_PARTS 4
@@ -263,12 +267,18 @@ struct sdebug_host_info {
#define to_sdebug_host(d) \
container_of(d, struct sdebug_host_info, dev)
+enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
+ SDEB_DEFER_WQ = 2};
+
struct sdebug_defer {
struct hrtimer hrt;
struct execute_work ew;
int sqa_idx; /* index of sdebug_queue array */
int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
int issuing_cpu;
+ bool init_hrt;
+ bool init_wq;
+ enum sdeb_defer_type defer_t;
};
struct sdebug_queued_cmd {
@@ -282,6 +292,7 @@ struct sdebug_queued_cmd {
unsigned int inj_dif:1;
unsigned int inj_dix:1;
unsigned int inj_short:1;
+ unsigned int inj_host_busy:1;
};
struct sdebug_queue {
@@ -304,8 +315,8 @@ struct opcode_info_t {
u32 flags; /* OR-ed set of SDEB_F_* */
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
const struct opcode_info_t *arrp; /* num_attached elements or NULL */
- u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
- /* ignore cdb bytes after position 15 */
+ u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
+ /* 1 to min(cdb_len, 15); ignore cdb[15...] */
};
/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
@@ -322,12 +333,12 @@ enum sdeb_opcode_index {
SDEB_I_READ = 9, /* 6, 10, 12, 16 */
SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
SDEB_I_START_STOP = 11,
- SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
- SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
+ SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
+ SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
SDEB_I_MAINT_IN = 14,
SDEB_I_MAINT_OUT = 15,
SDEB_I_VERIFY = 16, /* 10 only */
- SDEB_I_VARIABLE_LEN = 17,
+ SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
SDEB_I_RESERVE = 18, /* 6, 10 */
SDEB_I_RELEASE = 19, /* 6, 10 */
SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
@@ -340,7 +351,7 @@ enum sdeb_opcode_index {
SDEB_I_WRITE_SAME = 27, /* 10, 16 */
SDEB_I_SYNC_CACHE = 28, /* 10 only */
SDEB_I_COMP_WRITE = 29,
- SDEB_I_LAST_ELEMENT = 30, /* keep this last */
+ SDEB_I_LAST_ELEMENT = 30, /* keep this last (previous + 1) */
};
@@ -372,12 +383,12 @@ static const unsigned char opcode_ind_arr[256] = {
0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
+ 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
SDEB_I_MAINT_OUT, 0, 0, 0,
- SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
- 0, 0, 0, 0,
+ SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
+ 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0xc0; 0xc0->0xff: vendor specific */
@@ -396,6 +407,7 @@ static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -409,72 +421,81 @@ static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
-static const struct opcode_info_t msense_iarr[1] = {
+/*
+ * The following are overflow arrays for cdbs that "hit" the same index in
+ * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
+ * should be placed in opcode_info_arr[], the others should be placed here.
+ */
+static const struct opcode_info_t msense_iarr[] = {
{0, 0x1a, 0, F_D_IN, NULL, NULL,
{6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t mselect_iarr[1] = {
+static const struct opcode_info_t mselect_iarr[] = {
{0, 0x15, 0, F_D_OUT, NULL, NULL,
{6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t read_iarr[3] = {
- {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
- {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+static const struct opcode_info_t read_iarr[] = {
+ {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
+ {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
{6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
- {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
+ {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
+ {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
0xc7, 0, 0, 0, 0} },
};
-static const struct opcode_info_t write_iarr[3] = {
- {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
- {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
- 0, 0, 0, 0} },
- {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
- {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
- {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
- 0xc7, 0, 0, 0, 0} },
+static const struct opcode_info_t write_iarr[] = {
+ {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
+ NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} },
+ {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
+ NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0} },
+ {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
+ NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xbf, 0xc7, 0, 0, 0, 0} },
};
-static const struct opcode_info_t sa_in_iarr[1] = {
+static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0, 0xc7} },
+ 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
};
-static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
- {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
- NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
+static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
+ {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
+ NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
+ {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
+ 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
};
-static const struct opcode_info_t maint_in_iarr[2] = {
+static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
{12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
- 0xc7, 0, 0, 0, 0} },
+ 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
{12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
- 0, 0} },
+ 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
};
-static const struct opcode_info_t write_same_iarr[1] = {
- {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
+static const struct opcode_info_t write_same_iarr[] = {
+ {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x1f, 0xc7} },
+ 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
};
-static const struct opcode_info_t reserve_iarr[1] = {
- {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
+static const struct opcode_info_t reserve_iarr[] = {
+ {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
{6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t release_iarr[1] = {
- {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
+static const struct opcode_info_t release_iarr[] = {
+ {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
{6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
@@ -484,57 +505,67 @@ static const struct opcode_info_t release_iarr[1] = {
* REPORT SUPPORTED OPERATION CODES. */
static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
/* 0 */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
+ {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
+ {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
{6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
{12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
- 0, 0} },
+ 0, 0} }, /* REPORT LUNS */
{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
{6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
{6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
- {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
- 0} },
- {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
- {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
+/* 5 */
+ {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
+ resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
+ 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
+ resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
+ 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
{10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
0, 0, 0} },
- {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
+ {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
{10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
0, 0} },
- {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
- {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
+ {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
+ resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
/* 10 */
- {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
- {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
+ {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
+ resp_write_dt0, write_iarr, /* WRITE(16) */
+ {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* WRITE(16) */
{0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
{6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
- {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
- {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
- 0} },
+ {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
+ resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
+ {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
+ {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
+ {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
+ resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
+ maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
+ 0xff, 0, 0xc7, 0, 0, 0, 0} },
+/* 15 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
+ {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
0, 0, 0, 0, 0, 0} },
- {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
- vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
- 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
- {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
+ {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
+ resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
+ {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
+ 0xff, 0xff} },
+ {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
+ NULL, reserve_iarr, /* RESERVE(10) <no response function> */
{10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
- {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
+ {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
+ NULL, release_iarr, /* RELEASE(10) <no response function> */
{10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
/* 20 */
@@ -546,23 +577,25 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
{6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
- {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
- NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
- 0, 0, 0, 0, 0, 0} },
+ {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
+ {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+/* 25 */
+ {0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
+ NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} }, /* XDWRITEREAD(10) */
{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* WRITE_BUFFER */
- {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
- write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
- 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
- {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+ {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
+ resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
+ 0, 0, 0, 0, 0} },
+ {0, 0x35, 0, F_DELAY_OVERR | FF_MEDIA_IO, NULL, NULL, /* SYNC_CACHE */
+ {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
+ {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
{16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
- 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
+ 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
/* 30 */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
@@ -571,6 +604,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
static int sdebug_add_host = DEF_NUM_HOST;
static int sdebug_ato = DEF_ATO;
+static int sdebug_cdb_len = DEF_CDB_LEN;
static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
static int sdebug_dif = DEF_DIF;
@@ -797,6 +831,61 @@ static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
/* return -ENOTTY; // correct return but upsets fdisk */
}
+static void config_cdb_len(struct scsi_device *sdev)
+{
+ switch (sdebug_cdb_len) {
+ case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ break;
+ case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ break;
+ case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = true;
+ break;
+ case 16:
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = true;
+ sdev->use_10_for_ms = true;
+ break;
+ case 32: /* No knobs to suggest this so same as 16 for now */
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = true;
+ sdev->use_10_for_ms = true;
+ break;
+ default:
+ pr_warn("unexpected cdb_len=%d, force to 10\n",
+ sdebug_cdb_len);
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ sdebug_cdb_len = 10;
+ break;
+ }
+}
+
+static void all_config_cdb_len(void)
+{
+ struct sdebug_host_info *sdbg_host;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+ shost = sdbg_host->shost;
+ shost_for_each_device(sdev, shost) {
+ config_cdb_len(sdev);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+}
+
static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
{
struct sdebug_host_info *sdhp;
@@ -955,7 +1044,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
static char sdebug_inq_vendor_id[9] = "Linux ";
static char sdebug_inq_product_id[17] = "scsi_debug ";
-static char sdebug_inq_product_rev[5] = "0186"; /* version less '.' */
+static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
/* Use some locally assigned NAAs for SAS addresses. */
static const u64 naa3_comp_a = 0x3222222000000000ULL;
static const u64 naa3_comp_b = 0x3333333000000000ULL;
@@ -1411,6 +1500,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
memcpy(&arr[8], sdebug_inq_vendor_id, 8);
memcpy(&arr[16], sdebug_inq_product_id, 16);
memcpy(&arr[32], sdebug_inq_product_rev, 4);
+ /* Use Vendor Specific area to place driver date in ASCII hex */
+ memcpy(&arr[36], sdebug_version_date, 8);
/* version descriptors (2 bytes each) follow */
put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
@@ -1900,7 +1991,7 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
{ /* Control mode page for mode_sense */
unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
- 0, 0, 0, 0};
+ 0, 0, 0, 0};
unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
0, 0, 0x2, 0x4b};
@@ -2077,13 +2168,13 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
len = resp_disconnect_pg(ap, pcontrol, target);
offset += len;
break;
- case 0x3: /* Format device page, direct access */
+ case 0x3: /* Format device page, direct access */
if (is_disk) {
len = resp_format_pg(ap, pcontrol, target);
offset += len;
} else
bad_pcode = true;
- break;
+ break;
case 0x8: /* Caching page, direct access */
if (is_disk) {
len = resp_caching_pg(ap, pcontrol, target);
@@ -2099,7 +2190,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
if ((subpcode > 0x2) && (subpcode < 0xff)) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
- }
+ }
len = 0;
if ((0x0 == subpcode) || (0xff == subpcode))
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2136,7 +2227,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
- }
+ }
break;
default:
bad_pcode = true;
@@ -2172,8 +2263,8 @@ static int resp_mode_select(struct scsi_cmnd *scp,
mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
return check_condition_result;
}
- res = fetch_to_dev_buffer(scp, arr, param_len);
- if (-1 == res)
+ res = fetch_to_dev_buffer(scp, arr, param_len);
+ if (-1 == res)
return DID_ERROR << 16;
else if (sdebug_verbose && (res < param_len))
sdev_printk(KERN_INFO, scp->device,
@@ -2239,8 +2330,8 @@ static int resp_temp_l_pg(unsigned char * arr)
0x0, 0x1, 0x3, 0x2, 0x0, 65,
};
- memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
- return sizeof(temp_l_pg);
+ memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
+ return sizeof(temp_l_pg);
}
static int resp_ie_l_pg(unsigned char * arr)
@@ -2248,18 +2339,18 @@ static int resp_ie_l_pg(unsigned char * arr)
unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
};
- memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
+ memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
if (iec_m_pg[2] & 0x4) { /* TEST bit set */
arr[4] = THRESHOLD_EXCEEDED;
arr[5] = 0xff;
}
- return sizeof(ie_l_pg);
+ return sizeof(ie_l_pg);
}
#define SDEBUG_MAX_LSENSE_SZ 512
-static int resp_log_sense(struct scsi_cmnd * scp,
- struct sdebug_dev_info * devip)
+static int resp_log_sense(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
int ppc, sp, pcode, subpcode, alloc_len, len, n;
unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
@@ -2353,8 +2444,8 @@ static int check_device_access_params(struct scsi_cmnd *scp,
}
/* Returns number of bytes copied or -1 if error. */
-static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
- bool do_write)
+static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
+ u32 num, bool do_write)
{
int ret;
u64 block, rest = 0;
@@ -2380,14 +2471,15 @@ static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
fake_storep + (block * sdebug_sector_size),
- (num - rest) * sdebug_sector_size, 0, do_write);
+ (num - rest) * sdebug_sector_size, sg_skip, do_write);
if (ret != (num - rest) * sdebug_sector_size)
return ret;
if (rest) {
ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
fake_storep, rest * sdebug_sector_size,
- (num - rest) * sdebug_sector_size, do_write);
+ sg_skip + ((num - rest) * sdebug_sector_size),
+ do_write);
}
return ret;
@@ -2648,7 +2740,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(scp, lba, num, false);
+ ret = do_device_access(scp, 0, lba, num, false);
read_unlock_irqrestore(&atomic_rw, iflags);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -2936,7 +3028,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(scp, lba, num, true);
+ ret = do_device_access(scp, 0, lba, num, true);
if (unlikely(scsi_debug_lbp()))
map_region(lba, num);
write_unlock_irqrestore(&atomic_rw, iflags);
@@ -2970,6 +3062,173 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return 0;
}
+/*
+ * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
+ * No READ GATHERED yet (requires bidi or long cdb holding gather list).
+ */
+static int resp_write_scat(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ u8 *lrdp = NULL;
+ u8 *up;
+ u8 wrprotect;
+ u16 lbdof, num_lrd, k;
+ u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
+ u32 lb_size = sdebug_sector_size;
+ u32 ei_lba;
+ u64 lba;
+ unsigned long iflags;
+ int ret, res;
+ bool is_16;
+ static const u32 lrd_size = 32; /* + parameter list header size */
+
+ if (cmd[0] == VARIABLE_LENGTH_CMD) {
+ is_16 = false;
+ wrprotect = (cmd[10] >> 5) & 0x7;
+ lbdof = get_unaligned_be16(cmd + 12);
+ num_lrd = get_unaligned_be16(cmd + 16);
+ bt_len = get_unaligned_be32(cmd + 28);
+ } else { /* that leaves WRITE SCATTERED(16) */
+ is_16 = true;
+ wrprotect = (cmd[2] >> 5) & 0x7;
+ lbdof = get_unaligned_be16(cmd + 4);
+ num_lrd = get_unaligned_be16(cmd + 8);
+ bt_len = get_unaligned_be32(cmd + 10);
+ if (unlikely(have_dif_prot)) {
+ if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
+ wrprotect) {
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+ if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
+ sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
+ wrprotect == 0)
+ sdev_printk(KERN_ERR, scp->device,
+ "Unprotected WR to DIF device\n");
+ }
+ }
+ if ((num_lrd == 0) || (bt_len == 0))
+ return 0; /* T10 says these do-nothings are not errors */
+ if (lbdof == 0) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: LB Data Offset field bad\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return illegal_condition_result;
+ }
+ lbdof_blen = lbdof * lb_size;
+ if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: LBA range descriptors don't fit\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return illegal_condition_result;
+ }
+ lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
+ if (lrdp == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
+ my_name, __func__, lbdof_blen);
+ res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
+ if (res == -1) {
+ ret = DID_ERROR << 16;
+ goto err_out;
+ }
+
+ write_lock_irqsave(&atomic_rw, iflags);
+ sg_off = lbdof_blen;
+ /* Spec says Buffer xfer Length field in number of LBs in dout */
+ cum_lb = 0;
+ for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
+ lba = get_unaligned_be64(up + 0);
+ num = get_unaligned_be32(up + 8);
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
+ my_name, __func__, k, lba, num, sg_off);
+ if (num == 0)
+ continue;
+ ret = check_device_access_params(scp, lba, num);
+ if (ret)
+ goto err_out_unlock;
+ num_by = num * lb_size;
+ ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
+
+ if ((cum_lb + num) > bt_len) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: sum of blocks > data provided\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
+ 0);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+
+ /* DIX + T10 DIF */
+ if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
+ int prot_ret = prot_verify_write(scp, lba, num,
+ ei_lba);
+
+ if (prot_ret) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
+ prot_ret);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+ }
+
+ ret = do_device_access(scp, sg_off, lba, num, true);
+ if (unlikely(scsi_debug_lbp()))
+ map_region(lba, num);
+ if (unlikely(-1 == ret)) {
+ ret = DID_ERROR << 16;
+ goto err_out_unlock;
+ } else if (unlikely(sdebug_verbose && (ret < num_by)))
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, num_by, ret);
+
+ if (unlikely(sdebug_any_injecting_opt)) {
+ struct sdebug_queued_cmd *sqcp =
+ (struct sdebug_queued_cmd *)scp->host_scribble;
+
+ if (sqcp) {
+ if (sqcp->inj_recovered) {
+ mk_sense_buffer(scp, RECOVERED_ERROR,
+ THRESHOLD_EXCEEDED, 0);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ } else if (sqcp->inj_dif) {
+ /* Logical block guard check failed */
+ mk_sense_buffer(scp, ABORTED_COMMAND,
+ 0x10, 1);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ } else if (sqcp->inj_dix) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ 0x10, 1);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+ }
+ }
+ sg_off += num_by;
+ cum_lb += num;
+ }
+ ret = 0;
+err_out_unlock:
+ write_unlock_irqrestore(&atomic_rw, iflags);
+err_out:
+ kfree(lrdp);
+ return ret;
+}
+
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
@@ -3177,7 +3436,7 @@ static int resp_comp_write(struct scsi_cmnd *scp,
* from data-in into arr. Safe (atomic) since write_lock held. */
fake_storep_hold = fake_storep;
fake_storep = arr;
- ret = do_device_access(scp, 0, dnum, true);
+ ret = do_device_access(scp, 0, 0, dnum, true);
fake_storep = fake_storep_hold;
if (ret == -1) {
retval = DID_ERROR << 16;
@@ -3495,6 +3754,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
struct scsi_cmnd *scp;
struct sdebug_dev_info *devip;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
qc_idx = sd_dp->qc_idx;
sqp = sdebug_q_arr + sd_dp->sqa_idx;
if (sdebug_statistics) {
@@ -3603,12 +3863,12 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
if (!sdbg_host) {
pr_err("Host info NULL\n");
return NULL;
- }
+ }
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if ((devip->used) && (devip->channel == sdev->channel) &&
- (devip->target == sdev->id) &&
- (devip->lun == sdev->lun))
- return devip;
+ (devip->target == sdev->id) &&
+ (devip->lun == sdev->lun))
+ return devip;
else {
if ((!devip->used) && (!open_devip))
open_devip = devip;
@@ -3660,6 +3920,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
blk_queue_max_segment_size(sdp->request_queue, -1U);
if (sdebug_no_uld)
sdp->no_uld_attach = 1;
+ config_cdb_len(sdp);
return 0;
}
@@ -3678,13 +3939,14 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
}
}
-static void stop_qc_helper(struct sdebug_defer *sd_dp)
+static void stop_qc_helper(struct sdebug_defer *sd_dp,
+ enum sdeb_defer_type defer_t)
{
if (!sd_dp)
return;
- if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
+ if (defer_t == SDEB_DEFER_HRT)
hrtimer_cancel(&sd_dp->hrt);
- else if (sdebug_jdelay < 0)
+ else if (defer_t == SDEB_DEFER_WQ)
cancel_work_sync(&sd_dp->ew.work);
}
@@ -3694,6 +3956,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
{
unsigned long iflags;
int j, k, qmax, r_qmax;
+ enum sdeb_defer_type l_defer_t;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
@@ -3717,8 +3980,13 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
atomic_dec(&devip->num_in_q);
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
+ if (sd_dp) {
+ l_defer_t = sd_dp->defer_t;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
+ } else
+ l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp);
+ stop_qc_helper(sd_dp, l_defer_t);
clear_bit(k, sqp->in_use_bm);
return true;
}
@@ -3733,6 +4001,7 @@ static void stop_all_queued(void)
{
unsigned long iflags;
int j, k;
+ enum sdeb_defer_type l_defer_t;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
@@ -3751,8 +4020,13 @@ static void stop_all_queued(void)
atomic_dec(&devip->num_in_q);
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
+ if (sd_dp) {
+ l_defer_t = sd_dp->defer_t;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
+ } else
+ l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp);
+ stop_qc_helper(sd_dp, l_defer_t);
clear_bit(k, sqp->in_use_bm);
spin_lock_irqsave(&sqp->qc_lock, iflags);
}
@@ -3848,8 +4122,8 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
{
struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *devip;
- struct scsi_device * sdp;
- struct Scsi_Host * hp;
+ struct scsi_device *sdp;
+ struct Scsi_Host *hp;
int k = 0;
++num_bus_resets;
@@ -3863,7 +4137,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
if (sdbg_host) {
list_for_each_entry(devip,
- &sdbg_host->dev_info_list,
+ &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
++k;
@@ -3886,15 +4160,15 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
++num_host_resets;
if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
- spin_lock(&sdebug_host_list_lock);
- list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
++k;
}
- }
- spin_unlock(&sdebug_host_list_lock);
+ }
+ spin_unlock(&sdebug_host_list_lock);
stop_all_queued();
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
@@ -3921,7 +4195,7 @@ static void __init sdebug_build_parts(unsigned char *ramp,
sectors_per_part = (num_sectors - sdebug_sectors_per)
/ sdebug_num_parts;
heads_by_sects = sdebug_heads * sdebug_sectors_per;
- starts[0] = sdebug_sectors_per;
+ starts[0] = sdebug_sectors_per;
for (k = 1; k < sdebug_num_parts; ++k)
starts[k] = ((k * sectors_per_part) / heads_by_sects)
* heads_by_sects;
@@ -3995,6 +4269,7 @@ static void setup_inject(struct sdebug_queue *sqp,
sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
+ sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
}
/* Complete the processing of the thread that queued a SCSI command to this
@@ -4003,7 +4278,7 @@ static void setup_inject(struct sdebug_queue *sqp,
* SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
*/
static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
- int scsi_result, int delta_jiff)
+ int scsi_result, int delta_jiff, int ndelay)
{
unsigned long iflags;
int k, num_in_q, qdepth, inject;
@@ -4081,20 +4356,20 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
setup_inject(sqp, sqcp);
- if (delta_jiff > 0 || sdebug_ndelay > 0) {
+ if (sd_dp == NULL) {
+ sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
+ if (sd_dp == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ if (delta_jiff > 0 || ndelay > 0) {
ktime_t kt;
if (delta_jiff > 0) {
- struct timespec ts;
-
- jiffies_to_timespec(delta_jiff, &ts);
- kt = ktime_set(ts.tv_sec, ts.tv_nsec);
+ kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
} else
- kt = sdebug_ndelay;
- if (NULL == sd_dp) {
- sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
- if (NULL == sd_dp)
- return SCSI_MLQUEUE_HOST_BUSY;
+ kt = ndelay;
+ if (!sd_dp->init_hrt) {
+ sd_dp->init_hrt = true;
sqcp->sd_dp = sd_dp;
hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
@@ -4104,12 +4379,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
+ sd_dp->defer_t = SDEB_DEFER_HRT;
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
} else { /* jdelay < 0, use work queue */
- if (NULL == sd_dp) {
- sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
- if (NULL == sd_dp)
- return SCSI_MLQUEUE_HOST_BUSY;
+ if (!sd_dp->init_wq) {
+ sd_dp->init_wq = true;
sqcp->sd_dp = sd_dp;
sd_dp->sqa_idx = sqp - sdebug_q_arr;
sd_dp->qc_idx = k;
@@ -4117,6 +4391,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
+ sd_dp->defer_t = SDEB_DEFER_WQ;
schedule_work(&sd_dp->ew.work);
}
if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
@@ -4141,6 +4416,7 @@ respond_in_thread: /* call back to mid-layer using invocation thread */
*/
module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
module_param_named(ato, sdebug_ato, int, S_IRUGO);
+module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
@@ -4198,6 +4474,7 @@ MODULE_VERSION(SDEBUG_VERSION);
MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
@@ -4210,7 +4487,8 @@ MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
-MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\"0186\")");
+MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
+ SDEBUG_VERSION "\")");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
@@ -4360,9 +4638,6 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
}
}
if (res > 0) {
- /* make sure sdebug_defer instances get
- * re-allocated for new delay variant */
- free_all_queued();
sdebug_jdelay = jdelay;
sdebug_ndelay = 0;
}
@@ -4403,9 +4678,6 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
}
}
if (res > 0) {
- /* make sure sdebug_defer instances get
- * re-allocated for new delay variant */
- free_all_queued();
sdebug_ndelay = ndelay;
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
@@ -4426,15 +4698,15 @@ static ssize_t opts_show(struct device_driver *ddp, char *buf)
static ssize_t opts_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int opts;
+ int opts;
char work[20];
- if (1 == sscanf(buf, "%10s", work)) {
- if (0 == strncasecmp(work,"0x", 2)) {
- if (1 == sscanf(&work[2], "%x", &opts))
+ if (sscanf(buf, "%10s", work) == 1) {
+ if (strncasecmp(work, "0x", 2) == 0) {
+ if (kstrtoint(work + 2, 16, &opts) == 0)
goto opts_done;
} else {
- if (1 == sscanf(work, "%d", &opts))
+ if (kstrtoint(work, 10, &opts) == 0)
goto opts_done;
}
}
@@ -4455,7 +4727,7 @@ static ssize_t ptype_show(struct device_driver *ddp, char *buf)
static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_ptype = n;
@@ -4472,7 +4744,7 @@ static ssize_t dsense_show(struct device_driver *ddp, char *buf)
static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_dsense = n;
@@ -4489,7 +4761,7 @@ static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
n = (n > 0);
@@ -4522,7 +4794,7 @@ static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_no_lun_0 = n;
@@ -4539,7 +4811,7 @@ static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_num_tgts = n;
@@ -4569,7 +4841,7 @@ static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int nth;
+ int nth;
if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
sdebug_every_nth = nth;
@@ -4591,7 +4863,7 @@ static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
@@ -4678,7 +4950,7 @@ static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
@@ -4884,6 +5156,24 @@ static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(uuid_ctl);
+static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
+}
+static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int ret, n;
+
+ ret = kstrtoint(buf, 0, &n);
+ if (ret)
+ return ret;
+ sdebug_cdb_len = n;
+ all_config_cdb_len();
+ return count;
+}
+static DRIVER_ATTR_RW(cdb_len);
+
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -4923,6 +5213,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_ndelay.attr,
&driver_attr_strict.attr,
&driver_attr_uuid_ctl.attr,
+ &driver_attr_cdb_len.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -5113,12 +5404,12 @@ static int __init scsi_debug_init(void)
host_to_add = sdebug_add_host;
sdebug_add_host = 0;
- for (k = 0; k < host_to_add; k++) {
- if (sdebug_add_adapter()) {
+ for (k = 0; k < host_to_add; k++) {
+ if (sdebug_add_adapter()) {
pr_err("sdebug_add_adapter failed k=%d\n", k);
- break;
- }
- }
+ break;
+ }
+ }
if (sdebug_verbose)
pr_info("built %d host(s)\n", sdebug_add_host);
@@ -5161,53 +5452,53 @@ module_exit(scsi_debug_exit);
static void sdebug_release_adapter(struct device * dev)
{
- struct sdebug_host_info *sdbg_host;
+ struct sdebug_host_info *sdbg_host;
sdbg_host = to_sdebug_host(dev);
- kfree(sdbg_host);
+ kfree(sdbg_host);
}
static int sdebug_add_adapter(void)
{
int k, devs_per_host;
- int error = 0;
- struct sdebug_host_info *sdbg_host;
+ int error = 0;
+ struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
- sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
- if (NULL == sdbg_host) {
+ sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
+ if (sdbg_host == NULL) {
pr_err("out of memory at line %d\n", __LINE__);
- return -ENOMEM;
- }
+ return -ENOMEM;
+ }
- INIT_LIST_HEAD(&sdbg_host->dev_info_list);
+ INIT_LIST_HEAD(&sdbg_host->dev_info_list);
devs_per_host = sdebug_num_tgts * sdebug_max_luns;
- for (k = 0; k < devs_per_host; k++) {
+ for (k = 0; k < devs_per_host; k++) {
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
if (!sdbg_devinfo) {
pr_err("out of memory at line %d\n", __LINE__);
- error = -ENOMEM;
+ error = -ENOMEM;
goto clean;
- }
- }
+ }
+ }
- spin_lock(&sdebug_host_list_lock);
- list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
- spin_unlock(&sdebug_host_list_lock);
+ spin_lock(&sdebug_host_list_lock);
+ list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
+ spin_unlock(&sdebug_host_list_lock);
- sdbg_host->dev.bus = &pseudo_lld_bus;
- sdbg_host->dev.parent = pseudo_primary;
- sdbg_host->dev.release = &sdebug_release_adapter;
+ sdbg_host->dev.bus = &pseudo_lld_bus;
+ sdbg_host->dev.parent = pseudo_primary;
+ sdbg_host->dev.release = &sdebug_release_adapter;
dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
- error = device_register(&sdbg_host->dev);
+ error = device_register(&sdbg_host->dev);
- if (error)
+ if (error)
goto clean;
++sdebug_add_host;
- return error;
+ return error;
clean:
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
@@ -5217,20 +5508,20 @@ clean:
}
kfree(sdbg_host);
- return error;
+ return error;
}
static void sdebug_remove_adapter(void)
{
- struct sdebug_host_info * sdbg_host = NULL;
+ struct sdebug_host_info *sdbg_host = NULL;
- spin_lock(&sdebug_host_list_lock);
- if (!list_empty(&sdebug_host_list)) {
- sdbg_host = list_entry(sdebug_host_list.prev,
- struct sdebug_host_info, host_list);
+ spin_lock(&sdebug_host_list_lock);
+ if (!list_empty(&sdebug_host_list)) {
+ sdbg_host = list_entry(sdebug_host_list.prev,
+ struct sdebug_host_info, host_list);
list_del(&sdbg_host->host_list);
}
- spin_unlock(&sdebug_host_list_lock);
+ spin_unlock(&sdebug_host_list_lock);
if (!sdbg_host)
return;
@@ -5281,6 +5572,12 @@ static bool fake_timeout(struct scsi_cmnd *scp)
return false;
}
+static bool fake_host_busy(struct scsi_cmnd *scp)
+{
+ return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
+ (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
+}
+
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scp)
{
@@ -5323,6 +5620,8 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
b);
}
+ if (fake_host_busy(scp))
+ return SCSI_MLQUEUE_HOST_BUSY;
has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
goto err_out;
@@ -5420,12 +5719,15 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
errsts = r_pfp(scp, devip);
fini:
- return schedule_resp(scp, devip, errsts,
- ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
+ if (F_DELAY_OVERR & flags)
+ return schedule_resp(scp, devip, errsts, 0, 0);
+ else
+ return schedule_resp(scp, devip, errsts, sdebug_jdelay,
+ sdebug_ndelay);
check_cond:
- return schedule_resp(scp, devip, check_condition_result, 0);
+ return schedule_resp(scp, devip, check_condition_result, 0, 0);
err_out:
- return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
+ return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0, 0);
}
static struct scsi_host_template sdebug_driver_template = {
@@ -5484,7 +5786,7 @@ static int sdebug_driver_probe(struct device * dev)
if (sdebug_mq_active)
hpnt->nr_hw_queues = submit_queues;
- sdbg_host->shost = hpnt;
+ sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
hpnt->max_id = sdebug_num_tgts + 1;
@@ -5542,12 +5844,12 @@ static int sdebug_driver_probe(struct device * dev)
sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
if (sdebug_every_nth) /* need stats counters for every_nth */
sdebug_statistics = true;
- error = scsi_add_host(hpnt, &sdbg_host->dev);
- if (error) {
+ error = scsi_add_host(hpnt, &sdbg_host->dev);
+ if (error) {
pr_err("scsi_add_host failed\n");
- error = -ENODEV;
+ error = -ENODEV;
scsi_host_put(hpnt);
- } else
+ } else
scsi_scan_host(hpnt);
return error;
@@ -5555,7 +5857,7 @@ static int sdebug_driver_probe(struct device * dev)
static int sdebug_driver_remove(struct device * dev)
{
- struct sdebug_host_info *sdbg_host;
+ struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
sdbg_host = to_sdebug_host(dev);
@@ -5565,16 +5867,16 @@ static int sdebug_driver_remove(struct device * dev)
return -ENODEV;
}
- scsi_remove_host(sdbg_host->shost);
+ scsi_remove_host(sdbg_host->shost);
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
- list_del(&sdbg_devinfo->dev_list);
- kfree(sdbg_devinfo);
- }
+ list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo);
+ }
- scsi_host_put(sdbg_host->shost);
- return 0;
+ scsi_host_put(sdbg_host->shost);
+ return 0;
}
static int pseudo_lld_bus_match(struct device *dev,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index dfb8da83fa50..f3b117246d47 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -108,8 +108,8 @@ static struct {
* seagate controller, which causes SCSI code to reset bus.
*/
{"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
- {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
- {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
+ {"HP", "C1790A", NULL, BLIST_NOLUN}, /* scanjet iip */
+ {"HP", "C2500A", NULL, BLIST_NOLUN}, /* scanjet iicx */
{"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */
{"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */
{"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */
@@ -119,7 +119,7 @@ static struct {
{"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */
{"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
{"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
- {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
+ {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN | BLIST_BORKEN},
{"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
{"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
@@ -158,8 +158,8 @@ static struct {
{"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */
{"DELL", "PV530F", NULL, BLIST_SPARSELUN},
{"DELL", "PERCRAID", NULL, BLIST_FORCELUN},
- {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
- {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
+ {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, storage on LUN 0 */
+ {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, no storage on LUN 0 */
{"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
{"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
@@ -181,15 +181,14 @@ static struct {
{"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
- {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
+ {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */
{"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
{"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"HP", "C1557A", NULL, BLIST_FORCELUN},
{"HP", "C3323-300", "4269", BLIST_NOTQ},
{"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
- {"HP", "DF400", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"HP", "DF500", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"HP", "DF600", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "DF400", "*", BLIST_REPORTLUN2},
+ {"HP", "DF500", "*", BLIST_REPORTLUN2},
{"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
@@ -255,7 +254,6 @@ static struct {
{"ST650211", "CF", NULL, BLIST_RETRY_HWERROR},
{"SUN", "T300", "*", BLIST_SPARSELUN},
{"SUN", "T4", "*", BLIST_SPARSELUN},
- {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN},
{"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
{"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
{"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
@@ -353,7 +351,8 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
* Returns: 0 OK, -error on failure.
**/
int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
- char *strflags, blist_flags_t flags, int key)
+ char *strflags, blist_flags_t flags,
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -402,7 +401,7 @@ EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
* Returns: pointer to matching entry, or ERR_PTR on failure.
**/
static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
- const char *model, int key)
+ const char *model, enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -485,7 +484,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
*
* Returns: 0 OK, -error on failure.
**/
-int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+int scsi_dev_info_list_del_keyed(char *vendor, char *model,
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *found;
@@ -587,20 +587,15 @@ blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
- int key)
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
- int err;
devinfo = scsi_dev_info_list_find(vendor, model, key);
if (!IS_ERR(devinfo))
return devinfo->flags;
- err = PTR_ERR(devinfo);
- if (err != -ENOENT)
- return err;
-
- /* nothing found, return nothing */
+ /* key or device not found: return nothing */
if (key != SCSI_DEVINFO_GLOBAL)
return 0;
@@ -774,7 +769,7 @@ void scsi_exit_devinfo(void)
* Adds the requested list, returns zero on success, -EEXIST if the
* key is already registered to a list, or other error on failure.
*/
-int scsi_dev_info_add_list(int key, const char *name)
+int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name)
{
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
@@ -806,7 +801,7 @@ EXPORT_SYMBOL(scsi_dev_info_add_list);
* frees the list itself. Returns 0 on success or -EINVAL if the key
* can't be found.
*/
-int scsi_dev_info_remove_list(int key)
+int scsi_dev_info_remove_list(enum scsi_devinfo_key key)
{
struct list_head *lh, *lh_next;
struct scsi_dev_info_list_table *devinfo_table =
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 2b785d09d5bd..b88b5dbbc444 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"IBM", "1815", "rdac", },
{"IBM", "1818", "rdac", },
{"IBM", "3526", "rdac", },
+ {"IBM", "3542", "rdac", },
+ {"IBM", "3552", "rdac", },
{"SGI", "TP9", "rdac", },
{"SGI", "IS", "rdac", },
- {"STK", "OPENstorage D280", "rdac", },
+ {"STK", "OPENstorage", "rdac", },
{"STK", "FLEXLINE 380", "rdac", },
+ {"STK", "BladeCtlr", "rdac", },
{"SUN", "CSM", "rdac", },
{"SUN", "LCSM100", "rdac", },
{"SUN", "STK6580_6780", "rdac", },
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 62b56de38ae8..d042915ce895 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -61,9 +61,10 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
static int scsi_try_to_abort_cmd(struct scsi_host_template *,
struct scsi_cmnd *);
-/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
+ lockdep_assert_held(shost->host_lock);
+
if (atomic_read(&shost->host_busy) == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
@@ -220,6 +221,17 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
}
}
+static void scsi_eh_inc_host_failed(struct rcu_head *head)
+{
+ struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ shost->host_failed++;
+ scsi_eh_wakeup(shost);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
/**
* scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on.
@@ -242,9 +254,12 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
scsi_eh_reset(scmd);
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
- shost->host_failed++;
- scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
+ /*
+ * Ensure that all tasks observe the host state change before the
+ * host_failed change.
+ */
+ call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d9ca1dfab154..976c936029cb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -164,7 +164,7 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
* for a requeue after completion, which should only occur in this
* file.
*/
-static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
+static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
{
struct scsi_device *device = cmd->device;
struct request_queue *q = device->request_queue;
@@ -220,7 +220,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
*/
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
- __scsi_queue_insert(cmd, reason, 1);
+ __scsi_queue_insert(cmd, reason, true);
}
@@ -318,22 +318,39 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
cmd->cmd_len = scsi_command_size(cmd->cmnd);
}
-void scsi_device_unbusy(struct scsi_device *sdev)
+/*
+ * Decrement the host_busy counter and wake up the error handler if necessary.
+ * Avoid as follows that the error handler is not woken up if shost->host_busy
+ * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
+ * with an RCU read lock in this function to ensure that this function in its
+ * entirety either finishes before scsi_eh_scmd_add() increases the
+ * host_failed counter or that it notices the shost state change made by
+ * scsi_eh_scmd_add().
+ */
+static void scsi_dec_host_busy(struct Scsi_Host *shost)
{
- struct Scsi_Host *shost = sdev->host;
- struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
+ rcu_read_lock();
atomic_dec(&shost->host_busy);
- if (starget->can_queue > 0)
- atomic_dec(&starget->target_busy);
-
- if (unlikely(scsi_host_in_recovery(shost) &&
- (shost->host_failed || shost->host_eh_scheduled))) {
+ if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
- scsi_eh_wakeup(shost);
+ if (shost->host_failed || shost->host_eh_scheduled)
+ scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
}
+ rcu_read_unlock();
+}
+
+void scsi_device_unbusy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct scsi_target *starget = scsi_target(sdev);
+
+ scsi_dec_host_busy(shost);
+
+ if (starget->can_queue > 0)
+ atomic_dec(&starget->target_busy);
atomic_dec(&sdev->device_busy);
}
@@ -998,11 +1015,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
break;
case ACTION_RETRY:
/* Retry the same command immediately */
- __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
+ __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
break;
case ACTION_DELAYED_RETRY:
/* Retry the same command after a delay */
- __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
+ __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
break;
}
}
@@ -1128,7 +1145,7 @@ EXPORT_SYMBOL(scsi_init_io);
* Called from inside blk_get_request() for pass-through requests and from
* inside scsi_init_command() for filesystem requests.
*/
-void scsi_initialize_rq(struct request *rq)
+static void scsi_initialize_rq(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
@@ -1136,7 +1153,6 @@ void scsi_initialize_rq(struct request *rq)
cmd->jiffies_at_alloc = jiffies;
cmd->retries = 0;
}
-EXPORT_SYMBOL(scsi_initialize_rq);
/* Add a command to the list used by the aacraid and dpt_i2o drivers */
void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
@@ -1532,7 +1548,7 @@ starved:
list_add_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
- atomic_dec(&shost->host_busy);
+ scsi_dec_host_busy(shost);
return 0;
}
@@ -2020,7 +2036,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
out_dec_host_busy:
- atomic_dec(&shost->host_busy);
+ scsi_dec_host_busy(shost);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index a5946cd64caa..99f1db5e467e 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,7 +45,7 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
/* scsi_devinfo.c */
/* list of keys for the lists */
-enum {
+enum scsi_devinfo_key {
SCSI_DEVINFO_GLOBAL = 0,
SCSI_DEVINFO_SPI,
};
@@ -56,13 +56,15 @@ extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
- int key);
+ enum scsi_devinfo_key key);
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
- blist_flags_t flags, int key);
-extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
-extern int scsi_dev_info_add_list(int key, const char *name);
-extern int scsi_dev_info_remove_list(int key);
+ blist_flags_t flags,
+ enum scsi_devinfo_key key);
+extern int scsi_dev_info_list_del_keyed(char *vendor, char *model,
+ enum scsi_devinfo_key key);
+extern int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name);
+extern int scsi_dev_info_remove_list(enum scsi_devinfo_key key);
extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
@@ -184,7 +186,6 @@ void scsi_dh_release_device(struct scsi_device *sdev);
static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
#endif
-static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
/*
* internal scsi timeout functions: for use by mid-layer and transport
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 26ce17178401..91b90f672d23 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1278,7 +1278,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (error) {
sdev_printk(KERN_INFO, sdev,
"failed to add device: %d\n", error);
- scsi_dh_remove_device(sdev);
return error;
}
@@ -1287,7 +1286,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (error) {
sdev_printk(KERN_INFO, sdev,
"failed to add class device: %d\n", error);
- scsi_dh_remove_device(sdev);
device_del(&sdev->sdev_gendev);
return error;
}
@@ -1354,7 +1352,6 @@ void __scsi_remove_device(struct scsi_device *sdev)
bsg_unregister_queue(sdev->request_queue);
device_unregister(&sdev->sdev_dev);
transport_remove_device(dev);
- scsi_dh_remove_device(sdev);
device_del(dev);
} else
put_device(&sdev->sdev_dev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 4664024bd5d3..be3be0f9cb2d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -267,8 +267,8 @@ static const struct {
{ FC_PORTSPEED_50GBIT, "50 Gbit" },
{ FC_PORTSPEED_100GBIT, "100 Gbit" },
{ FC_PORTSPEED_25GBIT, "25 Gbit" },
- { FC_PORTSPEED_64BIT, "64 Gbit" },
- { FC_PORTSPEED_128BIT, "128 Gbit" },
+ { FC_PORTSPEED_64GBIT, "64 Gbit" },
+ { FC_PORTSPEED_128GBIT, "128 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a028ab3322a9..ce756d575aff 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2172,7 +2172,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
}
/* Wait 1 second for next try */
msleep(1000);
- printk(".");
+ printk(KERN_CONT ".");
/*
* Wait for USB flash devices with slow firmware.
@@ -2202,9 +2202,9 @@ sd_spinup_disk(struct scsi_disk *sdkp)
if (spintime) {
if (scsi_status_is_good(the_result))
- printk("ready\n");
+ printk(KERN_CONT "ready\n");
else
- printk("not responding...\n");
+ printk(KERN_CONT "not responding...\n");
}
}
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 11826c5c2dd4..62f04c0511cf 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -615,13 +615,16 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
}
static void ses_match_to_enclosure(struct enclosure_device *edev,
- struct scsi_device *sdev)
+ struct scsi_device *sdev,
+ int refresh)
{
+ struct scsi_device *edev_sdev = to_scsi_device(edev->edev.parent);
struct efd efd = {
.addr = 0,
};
- ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
+ if (refresh)
+ ses_enclosure_data_process(edev, edev_sdev, 0);
if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
efd.addr = sas_get_address(sdev);
@@ -652,7 +655,7 @@ static int ses_intf_add(struct device *cdev,
struct enclosure_device *prev = NULL;
while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
- ses_match_to_enclosure(edev, sdev);
+ ses_match_to_enclosure(edev, sdev, 1);
prev = edev;
}
return -ENODEV;
@@ -768,7 +771,7 @@ page2_not_supported:
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
continue;
- ses_match_to_enclosure(edev, tmp_sdev);
+ ses_match_to_enclosure(edev, tmp_sdev, 0);
}
return 0;
diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
index 0f42a225a664..e6b779930230 100644
--- a/drivers/scsi/smartpqi/Makefile
+++ b/drivers/scsi/smartpqi/Makefile
@@ -1,3 +1,3 @@
ccflags-y += -I.
-obj-m += smartpqi.o
+obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index b141d7641a2e..6c399480783d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4712,7 +4712,7 @@ static ssize_t read_byte_cnt_show(struct device *dev,
static DEVICE_ATTR_RO(read_byte_cnt);
/**
- * read_us_show - return read us - overall time spent waiting on reads in ns.
+ * read_ns_show - return read ns - overall time spent waiting on reads in ns.
* @dev: struct device
* @attr: attribute structure
* @buf: buffer to return formatted data in
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3b3d1d050cac..40fc7a590e81 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1834,8 +1834,10 @@ static int storvsc_probe(struct hv_device *device,
fc_host_node_name(host) = stor_device->node_name;
fc_host_port_name(host) = stor_device->port_name;
stor_device->rport = fc_remote_port_add(host, 0, &ids);
- if (!stor_device->rport)
+ if (!stor_device->rport) {
+ ret = -ENOMEM;
goto err_out4;
+ }
}
#endif
return 0;
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 277752b0fc6f..1a1b5d9fe514 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -157,6 +157,8 @@ enum {
#define UTP_TRANSFER_REQ_LIST_READY 0x2
#define UTP_TASK_REQ_LIST_READY 0x4
#define UIC_COMMAND_READY 0x8
+#define HOST_ERROR_INDICATOR 0x10
+#define DEVICE_ERROR_INDICATOR 0x20
#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
@@ -185,6 +187,10 @@ enum {
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
+#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
+#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
+#define UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF 0x20
#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
@@ -192,10 +198,20 @@ enum {
/* UECN - Host UIC Error Code Network Layer 40h */
#define UIC_NETWORK_LAYER_ERROR 0x80000000
#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
+#define UIC_NETWORK_UNSUPPORTED_HEADER_TYPE 0x1
+#define UIC_NETWORK_BAD_DEVICEID_ENC 0x2
+#define UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING 0x4
/* UECT - Host UIC Error Code Transport Layer 44h */
#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
+#define UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE 0x1
+#define UIC_TRANSPORT_UNKNOWN_CPORTID 0x2
+#define UIC_TRANSPORT_NO_CONNECTION_RX 0x4
+#define UIC_TRANSPORT_CONTROLLED_SEGMENT_DROPPING 0x8
+#define UIC_TRANSPORT_BAD_TC 0x10
+#define UIC_TRANSPORT_E2E_CREDIT_OVERFOW 0x20
+#define UIC_TRANSPORT_SAFETY_VALUE_DROPPING 0x40
/* UECDME - Host UIC Error Code DME 48h */
#define UIC_DME_ERROR 0x80000000
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 2a9da2e0ea6b..2ba2b7b47f41 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -803,7 +803,9 @@ static enum wd719x_card_type wd719x_detect_type(struct wd719x *wd)
static int wd719x_board_found(struct Scsi_Host *sh)
{
struct wd719x *wd = shost_priv(sh);
- char *card_types[] = { "Unknown card", "WD7193", "WD7197", "WD7296" };
+ static const char * const card_types[] = {
+ "Unknown card", "WD7193", "WD7197", "WD7296"
+ };
int ret;
INIT_LIST_HEAD(&wd->active_scbs);