summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/commsup.c43
-rw-r--r--drivers/scsi/aacraid/linit.c5
-rw-r--r--drivers/scsi/aacraid/rx.c15
-rw-r--r--drivers/scsi/aacraid/src.c20
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c6
-rw-r--r--drivers/scsi/bfa/bfad_im.c6
-rw-r--r--drivers/scsi/bfa/bfad_im.h10
-rw-r--r--drivers/scsi/hpsa.c10
-rw-r--r--drivers/scsi/libfc/fc_lport.c4
-rw-r--r--drivers/scsi/libsas/sas_expander.c10
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c4
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c35
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h4
-rw-r--r--drivers/scsi/qedf/qedf_els.c2
-rw-r--r--drivers/scsi/qedf/qedf_hsi.h68
-rw-r--r--drivers/scsi/qedf/qedf_io.c35
-rw-r--r--drivers/scsi/qedf/qedf_main.c12
-rw-r--r--drivers/scsi/qedf/qedf_version.h8
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c4
-rw-r--r--drivers/scsi/qedi/qedi_fw.c59
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c139
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h5
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c9
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c29
-rw-r--r--drivers/scsi/qedi/qedi_version.h8
-rw-r--r--drivers/scsi/scsi_debugfs.c46
-rw-r--r--drivers/scsi/scsi_devinfo.c33
-rw-r--r--drivers/scsi/scsi_lib.c27
-rw-r--r--drivers/scsi/scsi_scan.c13
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_spi.c28
-rw-r--r--drivers/scsi/sd.c45
-rw-r--r--drivers/scsi/sd.h11
-rw-r--r--drivers/scsi/sd_zbc.c235
-rw-r--r--drivers/scsi/sg.c18
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/scsi/ufs/ufshcd.c7
46 files changed, 588 insertions, 539 deletions
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 3ab323198fb2..0095fcbd1c88 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1682,6 +1682,7 @@ struct aac_dev
struct aac_ciss_phys_luns_resp *safw_phys_luns;
u8 adapter_shutdown;
u32 handle_pci_error;
+ bool init_reset;
};
#define aac_adapter_interrupt(dev) \
@@ -1733,6 +1734,7 @@ struct aac_dev
#define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010)
#define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020)
#define FIB_CONTEXT_FLAG_SCSI_CMD (0x00000040)
+#define FIB_CONTEXT_FLAG_EH_RESET (0x00000080)
/*
* Define the command values
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index fbf8b7ebd654..84858d5c8257 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -468,35 +468,6 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
return 0;
}
-#ifdef CONFIG_EEH
-static inline int aac_check_eeh_failure(struct aac_dev *dev)
-{
- /* Check for an EEH failure for the given
- * device node. Function eeh_dev_check_failure()
- * returns 0 if there has not been an EEH error
- * otherwise returns a non-zero value.
- *
- * Need to be called before any PCI operation,
- * i.e.,before aac_adapter_check_health()
- */
- struct eeh_dev *edev = pci_dev_to_eeh_dev(dev->pdev);
-
- if (eeh_dev_check_failure(edev)) {
- /* The EEH mechanisms will handle this
- * error and reset the device if
- * necessary.
- */
- return 1;
- }
- return 0;
-}
-#else
-static inline int aac_check_eeh_failure(struct aac_dev *dev)
-{
- return 0;
-}
-#endif
-
/*
* Define the highest level of host to adapter communication routines.
* These routines will support host to adapter FS commuication. These
@@ -702,7 +673,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
return -ETIMEDOUT;
}
- if (aac_check_eeh_failure(dev))
+ if (unlikely(pci_channel_offline(dev->pdev)))
return -EFAULT;
if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -802,7 +773,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
spin_unlock_irqrestore(&fibptr->event_lock, flags);
- if (aac_check_eeh_failure(dev))
+ if (unlikely(pci_channel_offline(dev->pdev)))
return -EFAULT;
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
@@ -1584,6 +1555,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
* will ensure that i/o is queisced and the card is flushed in that
* case.
*/
+ aac_free_irq(aac);
aac_fib_map_free(aac);
dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
aac->comm_phys);
@@ -1591,7 +1563,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
aac->comm_phys = 0;
kfree(aac->queues);
aac->queues = NULL;
- aac_free_irq(aac);
kfree(aac->fsa_dev);
aac->fsa_dev = NULL;
@@ -2531,8 +2502,8 @@ int aac_command_thread(void *data)
/* Synchronize our watches */
if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
&& (now.tv_nsec > (NSEC_PER_SEC / HZ)))
- difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ)
- + NSEC_PER_SEC / 2) / NSEC_PER_SEC;
+ difference = HZ + HZ / 2 -
+ now.tv_nsec / (NSEC_PER_SEC / HZ);
else {
if (now.tv_nsec > NSEC_PER_SEC / 2)
++now.tv_sec;
@@ -2556,6 +2527,10 @@ int aac_command_thread(void *data)
if (kthread_should_stop())
break;
+ /*
+ * we probably want usleep_range() here instead of the
+ * jiffies computation
+ */
schedule_timeout(difference);
if (kthread_should_stop())
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index b730e8edb8b3..2664ea0df35f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1037,7 +1037,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
info = &aac->hba_map[bus][cid];
if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
- fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+ fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
}
}
@@ -1677,6 +1677,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
aac->cardtype = index;
INIT_LIST_HEAD(&aac->entry);
+ if (aac_reset_devices || reset_devices)
+ aac->init_reset = true;
+
aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
if (!aac->fibs)
goto out_free_host;
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 93ef7c37e568..620166694171 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -561,11 +561,16 @@ int _aac_rx_init(struct aac_dev *dev)
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
- if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
- !aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
- /* Make sure the Hardware FIFO is empty */
- while ((++restart < 512) &&
- (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
+
+ if (((status & 0x0c) != 0x0c) || dev->init_reset) {
+ dev->init_reset = false;
+ if (!aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) {
+ /* Make sure the Hardware FIFO is empty */
+ while ((++restart < 512) &&
+ (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
+ }
+ }
+
/*
* Check to see if the board panic'd while booting.
*/
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 0c9361c87ec8..fde6b6aa86e3 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -868,9 +868,13 @@ int aac_src_init(struct aac_dev *dev)
/* Failure to reset here is an option ... */
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
- if ((aac_reset_devices || reset_devices) &&
- !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
- ++restart;
+
+ if (dev->init_reset) {
+ dev->init_reset = false;
+ if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
+ ++restart;
+ }
+
/*
* Check to see if the board panic'd while booting.
*/
@@ -1014,9 +1018,13 @@ int aac_srcv_init(struct aac_dev *dev)
/* Failure to reset here is an option ... */
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
- if ((aac_reset_devices || reset_devices) &&
- !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
- ++restart;
+
+ if (dev->init_reset) {
+ dev->init_reset = false;
+ if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
+ ++restart;
+ }
+
/*
* Check to see if flash update is running.
* Wait for the adapter to be up and running. Wait up to 5 minutes
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 1d01dd610454..3976e787ba64 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3133,7 +3133,8 @@ bfad_im_bsg_vendor_request(struct bsg_job *job)
struct fc_bsg_request *bsg_request = job->request;
struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
- struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
struct bfad_s *bfad = im_port->bfad;
void *payload_kbuf;
int rc = -EINVAL;
@@ -3348,7 +3349,8 @@ int
bfad_im_bsg_els_ct_request(struct bsg_job *job)
{
struct bfa_bsg_data *bsg_data;
- struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
struct bfad_s *bfad = im_port->bfad;
bfa_bsg_fcpt_t *bsg_fcpt;
struct bfad_fcxp *drv_fcxp;
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 24e657a4ec80..c05d6e91e4bd 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -546,6 +546,7 @@ int
bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
struct device *dev)
{
+ struct bfad_im_port_pointer *im_portp;
int error = 1;
mutex_lock(&bfad_mutex);
@@ -564,7 +565,8 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
goto out_free_idr;
}
- im_port->shost->hostdata[0] = (unsigned long)im_port;
+ im_portp = shost_priv(im_port->shost);
+ im_portp->p = im_port;
im_port->shost->unique_id = im_port->idr_id;
im_port->shost->this_id = -1;
im_port->shost->max_id = MAX_FCP_TARGET;
@@ -748,7 +750,7 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
sht->sg_tablesize = bfad->cfg_data.io_max_sge;
- return scsi_host_alloc(sht, sizeof(unsigned long));
+ return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer));
}
void
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 7f7616c52814..af66275570c3 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -69,6 +69,16 @@ struct bfad_im_port_s {
struct fc_vport *fc_vport;
};
+struct bfad_im_port_pointer {
+ struct bfad_im_port_s *p;
+};
+
+static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host)
+{
+ struct bfad_im_port_pointer *im_portp = shost_priv(host);
+ return im_portp->p;
+}
+
enum bfad_itnim_state {
ITNIM_STATE_NONE,
ITNIM_STATE_ONLINE,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 87b260e403ec..5293e6827ce5 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -901,14 +901,14 @@ static ssize_t host_show_legacy_board(struct device *dev,
return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
}
-static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
-static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
-static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(lunid);
+static DEVICE_ATTR_RO(unique_id);
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
-static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL);
+static DEVICE_ATTR_RO(sas_address);
static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
host_show_hp_ssd_smart_path_enabled, NULL);
-static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
+static DEVICE_ATTR_RO(path_info);
static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
host_show_hp_ssd_smart_path_status,
host_store_hp_ssd_smart_path_status);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 5da46052e179..21be672679fb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -904,10 +904,14 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
case ELS_FLOGI:
if (!lport->point_to_multipoint)
fc_lport_recv_flogi_req(lport, fp);
+ else
+ fc_rport_recv_req(lport, fp);
break;
case ELS_LOGO:
if (fc_frame_sid(fp) == FC_FID_FLOGI)
fc_lport_recv_logo_req(lport, fp);
+ else
+ fc_rport_recv_req(lport, fp);
break;
case ELS_RSCN:
lport->tt.disc_recv_req(lport, fp);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 7444d40e261c..6a4f8198b78e 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2145,7 +2145,7 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
struct sas_rphy *rphy)
{
struct domain_device *dev;
- unsigned int reslen = 0;
+ unsigned int rcvlen = 0;
int ret = -EINVAL;
/* no rphy means no smp target support (ie aic94xx host) */
@@ -2179,12 +2179,12 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
ret = smp_execute_task_sg(dev, job->request_payload.sg_list,
job->reply_payload.sg_list);
- if (ret > 0) {
- /* positive number is the untransferred residual */
- reslen = ret;
+ if (ret >= 0) {
+ /* bsg_job_done() requires the length received */
+ rcvlen = job->reply_payload.payload_len - ret;
ret = 0;
}
out:
- bsg_job_done(job, ret, reslen);
+ bsg_job_done(job, ret, rcvlen);
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 626727207889..6de9681ace82 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -487,15 +487,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
int sas_eh_abort_handler(struct scsi_cmnd *cmd)
{
- int res;
+ int res = TMF_RESP_FUNC_FAILED;
struct sas_task *task = TO_SAS_TASK(cmd);
struct Scsi_Host *host = cmd->device->host;
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_internal *i = to_sas_internal(host->transportt);
+ unsigned long flags;
if (!i->dft->lldd_abort_task)
return FAILED;
- res = i->dft->lldd_abort_task(task);
+ spin_lock_irqsave(host->host_lock, flags);
+ /* We cannot do async aborts for SATA devices */
+ if (dev_is_sata(dev) && !host->host_eh_scheduled) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return FAILED;
+ }
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ if (task)
+ res = i->dft->lldd_abort_task(task);
+ else
+ SAS_DPRINTK("no task to abort\n");
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d188fb565a32..ac77081e6e9e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2294,8 +2294,8 @@ static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
lpfc_num_discovered_ports_show, NULL);
static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
-static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
-static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL);
+static DEVICE_ATTR_RO(lpfc_drvr_version);
+static DEVICE_ATTR_RO(lpfc_enable_fip);
static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
lpfc_board_mode_show, lpfc_board_mode_store);
static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
@@ -2306,12 +2306,11 @@ static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
-static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
-static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
-static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
-static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
-static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
- lpfc_sriov_hw_max_virtfn_show, NULL);
+static DEVICE_ATTR_RO(lpfc_temp_sensor);
+static DEVICE_ATTR_RO(lpfc_fips_level);
+static DEVICE_ATTR_RO(lpfc_fips_rev);
+static DEVICE_ATTR_RO(lpfc_dss);
+static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
NULL);
@@ -2419,8 +2418,7 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
- lpfc_soft_wwn_enable_store);
+static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
/**
* lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
@@ -2519,8 +2517,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
"reinit adapter - %d\n", stat2);
return (stat1 || stat2) ? -EIO : count;
}
-static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,
- lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
+static DEVICE_ATTR_RW(lpfc_soft_wwpn);
/**
* lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
@@ -2583,8 +2580,7 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,
- lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
+static DEVICE_ATTR_RW(lpfc_soft_wwnn);
/**
* lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
@@ -3102,8 +3098,7 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
" 1 - poll with interrupts enabled"
" 3 - poll and disable FCP ring interrupts");
-static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
- lpfc_poll_show, lpfc_poll_store);
+static DEVICE_ATTR_RW(lpfc_poll);
int lpfc_no_hba_reset_cnt;
unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
@@ -3336,8 +3331,7 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
lpfc_vport_param_store(nodev_tmo)
-static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
- lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
+static DEVICE_ATTR_RW(lpfc_nodev_tmo);
/*
# lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
@@ -3386,8 +3380,7 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
}
lpfc_vport_param_store(devloss_tmo)
-static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
- lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
+static DEVICE_ATTR_RW(lpfc_devloss_tmo);
/*
* lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it
@@ -3580,8 +3573,7 @@ lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
return 0;
}
lpfc_vport_param_store(restrict_login);
-static DEVICE_ATTR(lpfc_restrict_login, S_IRUGO | S_IWUSR,
- lpfc_restrict_login_show, lpfc_restrict_login_store);
+static DEVICE_ATTR_RW(lpfc_restrict_login);
/*
# Some disk devices have a "select ID" or "select Target" capability.
@@ -3695,8 +3687,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
}
lpfc_param_show(topology)
-static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
- lpfc_topology_show, lpfc_topology_store);
+static DEVICE_ATTR_RW(lpfc_topology);
/**
* lpfc_static_vport_show: Read callback function for
@@ -3726,8 +3717,7 @@ lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
/*
* Sysfs attribute to control the statistical data collection.
*/
-static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
- lpfc_static_vport_show, NULL);
+static DEVICE_ATTR_RO(lpfc_static_vport);
/**
* lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -3954,8 +3944,7 @@ lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
/*
* Sysfs attribute to control the statistical data collection.
*/
-static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
- lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store);
+static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
/*
* lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
@@ -4194,8 +4183,7 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val)
return -EINVAL;
}
-static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
- lpfc_link_speed_show, lpfc_link_speed_store);
+static DEVICE_ATTR_RW(lpfc_link_speed);
/*
# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
@@ -4288,8 +4276,7 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
return rc;
}
-static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
- lpfc_aer_support_show, lpfc_aer_support_store);
+static DEVICE_ATTR_RW(lpfc_aer_support);
/**
* lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
@@ -4436,8 +4423,7 @@ LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
"Enable PCIe device SR-IOV virtual fn");
lpfc_param_show(sriov_nr_virtfn)
-static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
- lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
+static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
/**
* lpfc_request_firmware_store - Request for Linux generic firmware upgrade
@@ -4611,8 +4597,7 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
return 0;
}
-static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
- lpfc_fcp_imax_show, lpfc_fcp_imax_store);
+static DEVICE_ATTR_RW(lpfc_fcp_imax);
/*
* lpfc_auto_imax: Controls Auto-interrupt coalescing values support.
@@ -4772,8 +4757,7 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
return 0;
}
-static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
- lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
+static DEVICE_ATTR_RW(lpfc_fcp_cpu_map);
/*
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
@@ -4859,9 +4843,7 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
return 0;
}
lpfc_vport_param_store(max_scsicmpl_time);
-static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
- lpfc_max_scsicmpl_time_show,
- lpfc_max_scsicmpl_time_store);
+static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time);
/*
# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 56faeb049b4a..87c08ff37ddd 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -753,12 +753,12 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) {
- (rqbp->rqb_free_buffer)(phba, rqb_entry);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6409 Cannot post to RQ %d: %x %x\n",
rqb_entry->hrq->queue_id,
rqb_entry->hrq->host_index,
rqb_entry->hrq->hba_index);
+ (rqbp->rqb_free_buffer)(phba, rqb_entry);
} else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0f1d88f272be..a71ee67df084 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -7033,15 +7033,15 @@ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
/**
* megasas_mgmt_poll - char node "poll" entry point
* */
-static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
+static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
{
- unsigned int mask;
+ __poll_t mask;
unsigned long flags;
poll_wait(file, &megasas_poll_wait, wait);
spin_lock_irqsave(&poll_aen_lock, flags);
if (megasas_poll_wait_aen)
- mask = (POLLIN | POLLRDNORM);
+ mask = (EPOLLIN | EPOLLRDNORM);
else
mask = 0;
megasas_poll_wait_aen = 0;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 1a6cddd0111a..523971aeb4c1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -534,7 +534,7 @@ _ctl_fasync(int fd, struct file *filep, int mode)
* @wait -
*
*/
-static unsigned int
+static __poll_t
_ctl_poll(struct file *filep, poll_table *wait)
{
struct MPT3SAS_ADAPTER *ioc;
@@ -546,7 +546,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
if (ioc->aen_event_read_flag) {
spin_unlock(&gioc_lock);
- return POLLIN | POLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
}
}
spin_unlock(&gioc_lock);
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index a4f28b7e4c65..e18877177f1b 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
return req;
for_each_bio(bio) {
- ret = blk_rq_append_bio(req, bio);
+ struct bio *bounce_bio = bio;
+
+ ret = blk_rq_append_bio(req, &bounce_bio);
if (ret)
return ERR_PTR(ret);
}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index 7d91e53562f8..a980ef756a67 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -25,15 +25,17 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
u32 task_retry_id,
u8 fcp_cmd_payload[32])
{
- struct fcoe_task_context *ctx = task_params->context;
+ struct e4_fcoe_task_context *ctx = task_params->context;
+ const u8 val_byte = ctx->ystorm_ag_context.byte0;
+ struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
- struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
u32 io_size, val;
bool slow_sgl;
memset(ctx, 0, sizeof(*(ctx)));
+ ctx->ystorm_ag_context.byte0 = val_byte;
slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
sgl_task_params->small_mid_sge);
io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
@@ -43,20 +45,20 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
y_st_ctx = &ctx->ystorm_st_context;
y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
- y_st_ctx->task_type = task_params->task_type;
+ y_st_ctx->task_type = (u8)task_params->task_type;
memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
/* Tstorm ctx */
t_st_ctx = &ctx->tstorm_st_context;
- t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
- FCOE_TASK_DEV_TYPE_TAPE :
- FCOE_TASK_DEV_TYPE_DISK);
+ t_st_ctx->read_only.dev_type = (u8)(task_params->is_tape_device == 1 ?
+ FCOE_TASK_DEV_TYPE_TAPE :
+ FCOE_TASK_DEV_TYPE_DISK);
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
val = cpu_to_le32(task_params->cq_rss_number);
t_st_ctx->read_only.glbl_q_num = val;
t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
- t_st_ctx->read_only.task_type = task_params->task_type;
+ t_st_ctx->read_only.task_type = (u8)task_params->task_type;
SET_FIELD(t_st_ctx->read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
@@ -88,6 +90,8 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
SET_FIELD(m_st_ctx->flags,
MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
(slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+ m_st_ctx->sgl_params.sgl_num_sges =
+ cpu_to_le16(sgl_task_params->num_sges);
} else {
/* Tstorm ctx */
SET_FIELD(t_st_ctx->read_write.flags,
@@ -101,7 +105,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
sgl_task_params);
}
+ /* Init Sqe */
init_common_sqe(task_params, SEND_FCOE_CMD);
+
return 0;
}
@@ -112,14 +118,16 @@ int init_initiator_midpath_unsolicited_fcoe_task(
struct scsi_sgl_task_params *rx_sgl_task_params,
u8 fw_to_place_fc_header)
{
- struct fcoe_task_context *ctx = task_params->context;
+ struct e4_fcoe_task_context *ctx = task_params->context;
+ const u8 val_byte = ctx->ystorm_ag_context.byte0;
+ struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
- struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
u32 val;
memset(ctx, 0, sizeof(*(ctx)));
+ ctx->ystorm_ag_context.byte0 = val_byte;
/* Init Ystorm */
y_st_ctx = &ctx->ystorm_st_context;
@@ -129,7 +137,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
SET_FIELD(y_st_ctx->sgl_mode,
YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
- y_st_ctx->task_type = task_params->task_type;
+ y_st_ctx->task_type = (u8)task_params->task_type;
memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
@@ -148,7 +156,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
val = cpu_to_le32(task_params->cq_rss_number);
t_st_ctx->read_only.glbl_q_num = val;
- t_st_ctx->read_only.task_type = task_params->task_type;
+ t_st_ctx->read_only.task_type = (u8)task_params->task_type;
SET_FIELD(t_st_ctx->read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
@@ -182,9 +190,10 @@ int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
}
int init_initiator_sequence_recovery_fcoe_task(
- struct fcoe_task_params *task_params, u32 off)
+ struct fcoe_task_params *task_params, u32 desired_offset)
{
init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
- task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+ task_params->sqe->additional_info_union.seq_rec_updated_offset =
+ desired_offset;
return 0;
}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index f9c50faa748e..b5c236efd465 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -13,7 +13,7 @@
struct fcoe_task_params {
/* Output parameter [set/filled by the HSI function] */
- struct fcoe_task_context *context;
+ struct e4_fcoe_task_context *context;
/* Output parameter [set/filled by the HSI function] */
struct fcoe_wqe *sqe;
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 9bf7b227e69a..c105a2e48ac1 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -129,7 +129,7 @@ struct qedf_ioreq {
struct delayed_work timeout_work;
struct completion tm_done;
struct completion abts_done;
- struct fcoe_task_context *task;
+ struct e4_fcoe_task_context *task;
struct fcoe_task_params *task_params;
struct scsi_sgl_task_params *sgl_task_params;
int idx;
@@ -465,7 +465,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
+ struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 59c18ca4cda9..aa22b11436ba 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -19,7 +19,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
struct qedf_ioreq *els_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
- struct fcoe_task_context *task;
+ struct e4_fcoe_task_context *task;
int rc = 0;
uint32_t did, sid;
uint16_t xid;
diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h
index 7faef80c5f7a..503c1ae3ccd0 100644
--- a/drivers/scsi/qedf/qedf_hsi.h
+++ b/drivers/scsi/qedf/qedf_hsi.h
@@ -225,19 +225,6 @@ enum fcoe_cqe_type {
MAX_FCOE_CQE_TYPE
};
-
-/*
- * FCoE device type
- */
-enum fcoe_device_type {
- FCOE_TASK_DEV_TYPE_DISK,
- FCOE_TASK_DEV_TYPE_TAPE,
- MAX_FCOE_DEVICE_TYPE
-};
-
-
-
-
/*
* FCoE fast path error codes
*/
@@ -332,31 +319,6 @@ enum fcoe_sp_error_code {
MAX_FCOE_SP_ERROR_CODE
};
-
-/*
- * FCoE SQE request type
- */
-enum fcoe_sqe_request_type {
- SEND_FCOE_CMD,
- SEND_FCOE_MIDPATH,
- SEND_FCOE_ABTS_REQUEST,
- FCOE_EXCHANGE_CLEANUP,
- FCOE_SEQUENCE_RECOVERY,
- SEND_FCOE_XFER_RDY,
- SEND_FCOE_RSP,
- SEND_FCOE_RSP_WITH_SENSE_DATA,
- SEND_FCOE_TARGET_DATA,
- SEND_FCOE_INITIATOR_DATA,
- /*
- * Xfer Continuation (==1) ready to be sent. Previous XFERs data
- * received successfully.
- */
- SEND_FCOE_XFER_CONTINUATION_RDY,
- SEND_FCOE_TARGET_ABTS_RSP,
- MAX_FCOE_SQE_REQUEST_TYPE
-};
-
-
/*
* FCoE task TX state
*/
@@ -389,34 +351,4 @@ enum fcoe_task_tx_state {
MAX_FCOE_TASK_TX_STATE
};
-
-/*
- * FCoE task type
- */
-enum fcoe_task_type {
- FCOE_TASK_TYPE_WRITE_INITIATOR,
- FCOE_TASK_TYPE_READ_INITIATOR,
- FCOE_TASK_TYPE_MIDPATH,
- FCOE_TASK_TYPE_UNSOLICITED,
- FCOE_TASK_TYPE_ABTS,
- FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
- FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
- FCOE_TASK_TYPE_WRITE_TARGET,
- FCOE_TASK_TYPE_READ_TARGET,
- FCOE_TASK_TYPE_RSP,
- FCOE_TASK_TYPE_RSP_SENSE_DATA,
- FCOE_TASK_TYPE_ABTS_TARGET,
- FCOE_TASK_TYPE_ENUM_SIZE,
- MAX_FCOE_TASK_TYPE
-};
-
-struct scsi_glbl_queue_entry {
- /* Start physical address for the RQ (receive queue) PBL. */
- struct regpair rq_pbl_addr;
- /* Start physical address for the CQ (completion queue) PBL. */
- struct regpair cq_pbl_addr;
- /* Start physical address for the CMDQ (command queue) PBL. */
- struct regpair cmdq_pbl_addr;
-};
-
#endif /* __QEDF_HSI__ */
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index ded386036c27..b15e69586a36 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -579,7 +579,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
}
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
- struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
+ struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
struct fcoe_wqe *sqe)
{
enum fcoe_task_type task_type;
@@ -597,7 +597,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
/* Note init_initiator_rw_fcoe_task memsets the task context */
io_req->task = task_ctx;
- memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
@@ -673,7 +673,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
}
void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
+ struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_rport *fcport = io_req->fcport;
@@ -691,7 +691,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
- memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
/* Setup the task from io_req for easy reference */
@@ -844,7 +844,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
struct Scsi_Host *host = sc_cmd->device->host;
struct fc_lport *lport = shost_priv(host);
struct qedf_ctx *qedf = lport_priv(lport);
- struct fcoe_task_context *task_ctx;
+ struct e4_fcoe_task_context *task_ctx;
u16 xid;
enum fcoe_task_type req_type = 0;
struct fcoe_wqe *sqe;
@@ -1065,7 +1065,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req)
{
u16 xid, rval;
- struct fcoe_task_context *task_ctx;
+ struct e4_fcoe_task_context *task_ctx;
struct scsi_cmnd *sc_cmd;
struct fcoe_cqe_rsp_info *fcp_rsp;
struct qedf_rport *fcport;
@@ -1722,7 +1722,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
struct qedf_rport *fcport;
struct qedf_ctx *qedf;
uint16_t xid;
- struct fcoe_task_context *task;
+ struct e4_fcoe_task_context *task;
int tmo = 0;
int rc = SUCCESS;
unsigned long flags;
@@ -1835,7 +1835,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
uint8_t tm_flags)
{
struct qedf_ioreq *io_req;
- struct fcoe_task_context *task;
+ struct e4_fcoe_task_context *task;
struct qedf_ctx *qedf = fcport->qedf;
struct fc_lport *lport = qedf->lport;
int rc = 0;
@@ -2005,17 +2005,18 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
struct qedf_io_work *io_work;
u32 bdq_idx;
void *bdq_addr;
+ struct scsi_bd *p_bd_info;
+ p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
- "address.hi=%x address.lo=%x opaque_data.hi=%x "
- "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
- le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
- le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
- le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
- le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
- qedf->bdq_prod_idx, pktlen);
-
- bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
+ "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
+ le32_to_cpu(p_bd_info->address.hi),
+ le32_to_cpu(p_bd_info->address.lo),
+ le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
+ le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
+ qedf->bdq_prod_idx, pktlen);
+
+ bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
if (bdq_idx >= QEDF_BDQ_SIZE) {
QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
bdq_idx);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 4809debc6110..ccd9a08ea030 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1860,7 +1860,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
struct qedf_ctx *qedf = fp->qedf;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
u16 prod_idx;
/* Get the pointer to the global CQ this completion is on */
@@ -1887,7 +1887,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
{
struct qedf_ctx *qedf = fp->qedf;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
struct global_queue *que;
u16 prod_idx;
struct fcoe_cqe *cqe;
@@ -2352,12 +2352,12 @@ void qedf_fp_io_handler(struct work_struct *work)
static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
- sizeof(struct status_block), &sb_phys, GFP_KERNEL);
+ sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
if (!sb_virt) {
QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
@@ -2623,9 +2623,9 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
for (i = 0; i < QEDF_BDQ_SIZE; i++) {
pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
- pbl->opaque.hi = 0;
+ pbl->opaque.fcoe_opaque.hi = 0;
/* Opaque lo data is an index into the BDQ array */
- pbl->opaque.lo = cpu_to_le32(i);
+ pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
pbl++;
}
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index 397b3b8ee51a..c2478056356a 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -7,9 +7,9 @@
* this source tree.
*/
-#define QEDF_VERSION "8.20.5.0"
+#define QEDF_VERSION "8.33.0.20"
#define QEDF_DRIVER_MAJOR_VER 8
-#define QEDF_DRIVER_MINOR_VER 20
-#define QEDF_DRIVER_REV_VER 5
-#define QEDF_DRIVER_ENG_VER 0
+#define QEDF_DRIVER_MINOR_VER 33
+#define QEDF_DRIVER_REV_VER 0
+#define QEDF_DRIVER_ENG_VER 20
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 39d77818a677..fd8a1eea3163 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
{
struct qedi_fastpath *fp = NULL;
struct qed_sb_info *sb_info = NULL;
- struct status_block *sb = NULL;
+ struct status_block_e4 *sb = NULL;
struct global_queue *que = NULL;
int id;
u16 prod_idx;
@@ -168,7 +168,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
sb_info = fp->sb_info;
sb = sb_info->sb_virt;
prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
- STATUS_BLOCK_PROD_INDEX_MASK);
+ STATUS_BLOCK_E4_PROD_INDEX_MASK);
seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
que = qedi->global_queues[fp->sb_id];
seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 03c772c223fa..d09afe1b567d 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -87,7 +87,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct iscsi_task_context *task_ctx;
+ struct e4_iscsi_task_context *task_ctx;
struct iscsi_text_rsp *resp_hdr_ptr;
struct iscsi_text_response_hdr *cqe_text_response;
struct qedi_cmd *cmd;
@@ -260,7 +260,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct iscsi_task_context *task_ctx;
+ struct e4_iscsi_task_context *task_ctx;
struct iscsi_login_rsp *resp_hdr_ptr;
struct iscsi_login_response_hdr *cqe_login_response;
struct qedi_cmd *cmd;
@@ -326,7 +326,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
(qedi->bdq_prod_idx % qedi->rq_num_entries));
/* Obtain buffer address from rqe_opaque */
- idx = cqe->rqe_opaque.lo;
+ idx = cqe->rqe_opaque;
if (idx > (QEDI_BDQ_NUM - 1)) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
@@ -335,8 +335,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
}
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
- "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
- cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+ "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
@@ -363,7 +362,7 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
struct scsi_bd *pbl;
/* Obtain buffer address from rqe_opaque */
- idx = cqe->rqe_opaque.lo;
+ idx = cqe->rqe_opaque;
if (idx > (QEDI_BDQ_NUM - 1)) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
@@ -378,8 +377,10 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
pbl, pbl->address.hi, pbl->address.lo, idx);
- pbl->opaque.hi = 0;
- pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+ pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+ pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
/* Increment producer to let f/w know we've handled the frame */
qedi->bdq_prod_idx += count;
@@ -1022,7 +1023,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
struct scsi_sge *resp_sge = NULL;
@@ -1042,8 +1043,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1124,7 +1126,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_logout *logout_hdr = NULL;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct qedi_cmd *qedi_cmd;
@@ -1142,8 +1144,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1472,7 +1475,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
struct iscsi_tmf_request_hdr tmf_pdu_header;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr;
@@ -1495,8 +1498,9 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1610,7 +1614,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_text *text_hdr;
struct scsi_sge *req_sge = NULL;
@@ -1632,8 +1636,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1710,7 +1715,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_nopout *nopout_hdr;
struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
@@ -1730,8 +1735,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -2051,7 +2057,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
struct iscsi_task_params task_params;
struct iscsi_conn_params conn_params;
struct scsi_initiator_cmd_params cmd_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_cls_conn *cls_conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
@@ -2074,8 +2080,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
cmd->task_id = tid;
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index 7df32a68bd54..a269da1a6c75 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -203,12 +203,15 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
struct data_hdr *pdu_header,
enum iscsi_task_type task_type)
{
- struct iscsi_task_context *context;
- u16 index;
+ struct e4_iscsi_task_context *context;
u32 val;
+ u16 index;
+ u8 val_byte;
context = task_params->context;
+ val_byte = context->mstorm_ag_context.cdu_validation;
memset(context, 0, sizeof(*context));
+ context->mstorm_ag_context.cdu_validation = val_byte;
for (index = 0; index <
ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
@@ -222,7 +225,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
cpu_to_le16(task_params->conn_icid);
SET_FIELD(context->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
context->ustorm_st_context.task_type = task_type;
context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
@@ -252,10 +255,9 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
static
void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
- struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
- u32 remaining_recv_len,
- u32 expected_data_transfer_len,
- u8 num_sges, bool tx_dif_conn_err_en)
+ struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+ u32 remaining_recv_len, u32 expected_data_transfer_len,
+ u8 num_sges, bool tx_dif_conn_err_en)
{
u32 val;
@@ -265,12 +267,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
ustorm_st_cxt->exp_data_transfer_len = val;
SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
SET_FIELD(ustorm_ag_cxt->flags2,
- USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
tx_dif_conn_err_en ? 1 : 0);
}
static
-void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
+void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
struct iscsi_conn_params *conn_params,
enum iscsi_task_type task_type,
u32 task_size,
@@ -342,56 +344,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
dif_task_params->crc_seed ? 1 : 0);
- SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+ SET_FIELD(rdif_context->flags0,
+ RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
dif_task_params->host_guard_type);
SET_FIELD(rdif_context->flags0,
- RDIF_TASK_CONTEXT_PROTECTIONTYPE,
+ RDIF_TASK_CONTEXT_PROTECTION_TYPE,
dif_task_params->protection_type);
SET_FIELD(rdif_context->flags0,
- RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
+ RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
SET_FIELD(rdif_context->flags0,
- RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+ RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
dif_task_params->keep_ref_tag_const ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+ RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
(dif_task_params->validate_app_tag &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_VALIDATEGUARD,
+ RDIF_TASK_CONTEXT_VALIDATE_GUARD,
(dif_task_params->validate_guard &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_VALIDATEREFTAG,
+ RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
(dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_HOSTINTERFACE,
+ RDIF_TASK_CONTEXT_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_NETWORKINTERFACE,
+ RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
dif_task_params->dif_on_network ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDGUARD,
+ RDIF_TASK_CONTEXT_FORWARD_GUARD,
dif_task_params->forward_guard ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDAPPTAG,
+ RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
dif_task_params->forward_app_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDREFTAG,
+ RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
dif_task_params->forward_ref_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+ RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+ RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_INTERVALSIZE,
+ RDIF_TASK_CONTEXT_INTERVAL_SIZE,
dif_task_params->dif_block_size_log - 9);
SET_FIELD(rdif_context->state,
- RDIF_TASK_CONTEXT_REFTAGMASK,
+ RDIF_TASK_CONTEXT_REF_TAG_MASK,
dif_task_params->ref_tag_mask);
- SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
+ SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
dif_task_params->ignore_app_tag);
}
@@ -399,7 +402,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
tdif_context->app_tag_value =
cpu_to_le16(dif_task_params->application_tag);
- tdif_context->partial_crc_valueB =
+ tdif_context->partial_crc_value_b =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
tdif_context->partial_crc_value_a =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
@@ -407,64 +410,68 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
dif_task_params->crc_seed ? 1 : 0);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_SETERRORWITHEOP,
+ TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
dif_task_params->tx_dif_conn_err_en ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
dif_task_params->forward_guard ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
dif_task_params->forward_app_tag ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
dif_task_params->forward_ref_tag ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
dif_task_params->dif_block_size_log - 9);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_NETWORKINTERFACE,
+ TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
dif_task_params->dif_on_network ? 1 : 0);
val = cpu_to_le32(dif_task_params->initial_ref_tag);
tdif_context->initial_ref_tag = val;
tdif_context->app_tag_mask =
cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+ TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
dif_task_params->host_guard_type);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_PROTECTIONTYPE,
+ TDIF_TASK_CONTEXT_PROTECTION_TYPE,
dif_task_params->protection_type);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
+ TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+ TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
dif_task_params->keep_ref_tag_const ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_VALIDATE_GUARD,
(dif_task_params->validate_guard &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+ TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
(dif_task_params->validate_app_tag &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_VALIDATEREFTAG,
+ TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
(dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+ TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+ TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_REFTAGMASK,
+ TDIF_TASK_CONTEXT_REF_TAG_MASK,
dif_task_params->ref_tag_mask);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_IGNOREAPPTAG,
+ TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
dif_task_params->ignore_app_tag ? 1 : 0);
}
}
-static void set_local_completion_context(struct iscsi_task_context *context)
+static void set_local_completion_context(struct e4_iscsi_task_context *context)
{
SET_FIELD(context->ystorm_st_context.state.flags,
YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
@@ -481,7 +488,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
struct scsi_dif_task_params *dif_task_params)
{
u32 exp_data_transfer_len = conn_params->max_burst_length;
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
bool slow_io = false;
u32 task_size, val;
u8 num_sges = 0;
@@ -494,19 +501,33 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
cxt = task_params->context;
- val = cpu_to_le32(task_size);
- cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
- init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
- cmd_params);
- val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
- cxt->mstorm_st_context.sense_db.lo = val;
- val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
- cxt->mstorm_st_context.sense_db.hi = val;
+ if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
+ set_local_completion_context(cxt);
+ } else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
+ val = cpu_to_le32(task_size +
+ ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
+ cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
+ cxt->mstorm_st_context.expected_itt =
+ cpu_to_le32(pdu_header->itt);
+ } else {
+ val = cpu_to_le32(task_size);
+ cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
+ val;
+ init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+ cmd_params);
+ val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+ cxt->mstorm_st_context.sense_db.lo = val;
+
+ val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+ cxt->mstorm_st_context.sense_db.hi = val;
+ }
if (task_params->tx_io_size) {
init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
dif_task_params);
+ init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
+ dif_task_params);
init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
&cxt->ystorm_st_context.state.data_desc,
sgl_task_params);
@@ -595,7 +616,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
cxt = task_params->context;
@@ -637,7 +658,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params)
{
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
cxt = task_params->context;
@@ -683,7 +704,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
cxt = task_params->context;
@@ -738,7 +759,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
cxt = task_params->context;
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
index b6f24f91849d..c3deb77ac388 100644
--- a/drivers/scsi/qedi/qedi_fw_iscsi.h
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -13,7 +13,7 @@
#include "qedi_fw_scsi.h"
struct iscsi_task_params {
- struct iscsi_task_context *context;
+ struct e4_iscsi_task_context *context;
struct iscsi_wqe *sqe;
u32 tx_io_size;
u32 rx_io_size;
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 63d793f46064..f5b5a31999aa 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -52,11 +52,12 @@ void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
void qedi_process_iscsi_error(struct qedi_endpoint *ep,
- struct async_data *data);
+ struct iscsi_eqe_data *data);
void qedi_start_conn_recovery(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn);
struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+ struct iscsi_eqe_data *data);
void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index a02b34ea5cab..7ec7f6e00fb8 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -539,7 +539,6 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
conn_info->dup_ack_theshold = 3;
conn_info->rcv_wnd = 65535;
- conn_info->cwnd = DEF_MAX_CWND;
conn_info->ss_thresh = 65535;
conn_info->srtt = 300;
@@ -557,8 +556,8 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
(qedi_ep->ip_type == TCP_IPV6),
1, (qedi_ep->vlan_id != 0));
+ conn_info->cwnd = DEF_MAX_CWND * conn_info->mss;
conn_info->rcv_wnd_scale = 4;
- conn_info->ts_ticks_per_second = 1000;
conn_info->da_timeout_value = 200;
conn_info->ack_frequency = 2;
@@ -1557,7 +1556,8 @@ char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
return msg;
}
-void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+ struct iscsi_eqe_data *data)
{
struct qedi_conn *qedi_conn;
struct qedi_ctx *qedi;
@@ -1603,7 +1603,8 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
}
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+ struct iscsi_eqe_data *data)
{
struct qedi_conn *qedi_conn;
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 3247287cb0e7..ea1315189922 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -182,7 +182,7 @@ struct qedi_cmd {
struct scsi_cmnd *scsi_cmd;
struct scatterlist *sg;
struct qedi_io_bdt io_tbl;
- struct iscsi_task_context request;
+ struct e4_iscsi_task_context request;
unsigned char *sense_buffer;
dma_addr_t sense_buffer_dma;
u16 task_id;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 8b637d1fe5a4..f57a94b4f0d9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -60,7 +60,7 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
{
struct qedi_ctx *qedi;
struct qedi_endpoint *qedi_ep;
- struct async_data *data;
+ struct iscsi_eqe_data *data;
int rval = 0;
if (!context || !fw_handle) {
@@ -72,18 +72,18 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
- data = (struct async_data *)fw_handle;
+ data = (struct iscsi_eqe_data *)fw_handle;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
- "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
- data->cid, data->itid, data->error_code,
- data->fw_debug_param);
+ "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
+ data->icid, data->conn_id, data->error_code,
+ data->error_pdu_opcode_reserved);
- qedi_ep = qedi->ep_tbl[data->cid];
+ qedi_ep = qedi->ep_tbl[data->icid];
if (!qedi_ep) {
QEDI_WARN(&qedi->dbg_ctx,
"Cannot process event, ep already disconnected, cid=0x%x\n",
- data->cid);
+ data->icid);
WARN_ON(1);
return -ENODEV;
}
@@ -339,12 +339,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi)
static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
- sizeof(struct status_block), &sb_phys,
+ sizeof(struct status_block_e4), &sb_phys,
GFP_KERNEL);
if (!sb_virt) {
QEDI_ERR(&qedi->dbg_ctx,
@@ -858,7 +858,6 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
- qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
err_alloc_mem:
return rval;
@@ -961,7 +960,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
{
struct qedi_ctx *qedi = fp->qedi;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
struct qedi_percpu_s *p = NULL;
struct global_queue *que;
u16 prod_idx;
@@ -1017,7 +1016,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp)
struct qedi_ctx *qedi = fp->qedi;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
u16 prod_idx;
barrier();
@@ -1264,8 +1263,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
pbl, pbl->address.hi, pbl->address.lo, i);
- pbl->opaque.hi = 0;
- pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+ pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+ pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
pbl++;
}
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index d61e3ac22e67..8a0e523fc089 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -7,8 +7,8 @@
* this source tree.
*/
-#define QEDI_MODULE_VERSION "8.10.4.0"
+#define QEDI_MODULE_VERSION "8.33.0.20"
#define QEDI_DRIVER_MAJOR_VER 8
-#define QEDI_DRIVER_MINOR_VER 10
-#define QEDI_DRIVER_REV_VER 4
-#define QEDI_DRIVER_ENG_VER 0
+#define QEDI_DRIVER_MINOR_VER 33
+#define QEDI_DRIVER_REV_VER 0
+#define QEDI_DRIVER_ENG_VER 20
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index 01f08c03f2c1..b784002ef0bd 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -4,13 +4,49 @@
#include <scsi/scsi_dbg.h>
#include "scsi_debugfs.h"
+#define SCSI_CMD_FLAG_NAME(name) [ilog2(SCMD_##name)] = #name
+static const char *const scsi_cmd_flags[] = {
+ SCSI_CMD_FLAG_NAME(TAGGED),
+ SCSI_CMD_FLAG_NAME(UNCHECKED_ISA_DMA),
+ SCSI_CMD_FLAG_NAME(INITIALIZED),
+};
+#undef SCSI_CMD_FLAG_NAME
+
+static int scsi_flags_show(struct seq_file *m, const unsigned long flags,
+ const char *const *flag_name, int flag_name_count)
+{
+ bool sep = false;
+ int i;
+
+ for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
+ if (!(flags & BIT(i)))
+ continue;
+ if (sep)
+ seq_puts(m, "|");
+ sep = true;
+ if (i < flag_name_count && flag_name[i])
+ seq_puts(m, flag_name[i]);
+ else
+ seq_printf(m, "%d", i);
+ }
+ return 0;
+}
+
void scsi_show_rq(struct seq_file *m, struct request *rq)
{
struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
- int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
- char buf[80];
+ int alloc_ms = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
+ int timeout_ms = jiffies_to_msecs(rq->timeout);
+ const u8 *const cdb = READ_ONCE(cmd->cmnd);
+ char buf[80] = "(?)";
- __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
- seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf,
- cmd->retries, msecs / 1000, msecs % 1000);
+ if (cdb)
+ __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len);
+ seq_printf(m, ", .cmd=%s, .retries=%d, .result = %#x, .flags=", buf,
+ cmd->retries, cmd->result);
+ scsi_flags_show(m, cmd->flags, scsi_cmd_flags,
+ ARRAY_SIZE(scsi_cmd_flags));
+ seq_printf(m, ", .timeout=%d.%03d, allocated %d.%03d s ago",
+ timeout_ms / 1000, timeout_ms % 1000,
+ alloc_ms / 1000, alloc_ms % 1000);
}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 769fdcba61df..f3b117246d47 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -34,7 +34,6 @@ struct scsi_dev_info_list_table {
};
-static const char spaces[] = " "; /* 16 of them */
static blist_flags_t scsi_default_dev_flags;
static LIST_HEAD(scsi_dev_info_list);
static char scsi_dev_flags[256];
@@ -296,20 +295,13 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
size_t from_length;
from_length = strlen(from);
- strncpy(to, from, min(to_length, from_length));
- if (from_length < to_length) {
- if (compatible) {
- /*
- * NUL terminate the string if it is short.
- */
- to[from_length] = '\0';
- } else {
- /*
- * space pad the string if it is short.
- */
- strncpy(&to[from_length], spaces,
- to_length - from_length);
- }
+ /* This zero-pads the destination */
+ strncpy(to, from, to_length);
+ if (from_length < to_length && !compatible) {
+ /*
+ * space pad the string if it is short.
+ */
+ memset(&to[from_length], ' ', to_length - from_length);
}
if (from_length > to_length)
printk(KERN_WARNING "%s: %s string '%s' is too long\n",
@@ -381,10 +373,8 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
model, compatible);
if (strflags)
- devinfo->flags = simple_strtoul(strflags, NULL, 0);
- else
- devinfo->flags = flags;
-
+ flags = (__force blist_flags_t)simple_strtoul(strflags, NULL, 0);
+ devinfo->flags = flags;
devinfo->compatible = compatible;
if (compatible)
@@ -457,7 +447,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
/*
* vendor strings must be an exact match
*/
- if (vmax != strlen(devinfo->vendor) ||
+ if (vmax != strnlen(devinfo->vendor,
+ sizeof(devinfo->vendor)) ||
memcmp(devinfo->vendor, vskip, vmax))
continue;
@@ -465,7 +456,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
* @model specifies the full string, and
* must be larger or equal to devinfo->model
*/
- mlen = strlen(devinfo->model);
+ mlen = strnlen(devinfo->model, sizeof(devinfo->model));
if (mmax < mlen || memcmp(devinfo->model, mskip, mlen))
continue;
return devinfo;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c9844043504e..c84f931388f2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -79,14 +79,15 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
if (shost->unchecked_isa_dma) {
scsi_sense_isadma_cache =
kmem_cache_create("scsi_sense_cache(DMA)",
- SCSI_SENSE_BUFFERSIZE, 0,
- SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
+ SCSI_SENSE_BUFFERSIZE, 0,
+ SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
if (!scsi_sense_isadma_cache)
ret = -ENOMEM;
} else {
scsi_sense_cache =
- kmem_cache_create("scsi_sense_cache",
- SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
+ kmem_cache_create_usercopy("scsi_sense_cache",
+ SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
+ 0, SCSI_SENSE_BUFFERSIZE, NULL);
if (!scsi_sense_cache)
ret = -ENOMEM;
}
@@ -1987,6 +1988,8 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
out_put_device:
put_device(&sdev->sdev_gendev);
out:
+ if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
+ blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
return false;
}
@@ -2048,9 +2051,9 @@ out_put_budget:
case BLK_STS_OK:
break;
case BLK_STS_RESOURCE:
- if (atomic_read(&sdev->device_busy) == 0 &&
- !scsi_device_blocked(sdev))
- blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
+ if (atomic_read(&sdev->device_busy) ||
+ scsi_device_blocked(sdev))
+ ret = BLK_STS_DEV_RESOURCE;
break;
default:
/*
@@ -2168,11 +2171,13 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
q->limits.cluster = 0;
/*
- * set a reasonable default alignment on word boundaries: the
- * host and device may alter it using
- * blk_queue_update_dma_alignment() later.
+ * Set a reasonable default alignment: The larger of 32-byte (dword),
+ * which is a common minimum for HBAs, and the minimum DMA alignment,
+ * which is set by the platform.
+ *
+ * Devices that require a bigger alignment can increase it later.
*/
- blk_queue_dma_alignment(q, 0x03);
+ blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
}
EXPORT_SYMBOL_GPL(__scsi_init_queue);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index be5e919db0e8..0880d975eed3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -770,7 +770,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
* SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
**/
static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
- int *bflags, int async)
+ blist_flags_t *bflags, int async)
{
int ret;
@@ -1049,14 +1049,15 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
* - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
**/
static int scsi_probe_and_add_lun(struct scsi_target *starget,
- u64 lun, int *bflagsp,
+ u64 lun, blist_flags_t *bflagsp,
struct scsi_device **sdevp,
enum scsi_scan_mode rescan,
void *hostdata)
{
struct scsi_device *sdev;
unsigned char *result;
- int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
+ blist_flags_t bflags;
+ int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
/*
@@ -1201,7 +1202,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* Modifies sdevscan->lun.
**/
static void scsi_sequential_lun_scan(struct scsi_target *starget,
- int bflags, int scsi_level,
+ blist_flags_t bflags, int scsi_level,
enum scsi_scan_mode rescan)
{
uint max_dev_lun;
@@ -1292,7 +1293,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
* 0: scan completed (or no memory, so further scanning is futile)
* 1: could not scan with REPORT LUN
**/
-static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
+static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
enum scsi_scan_mode rescan)
{
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -1538,7 +1539,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
unsigned int id, u64 lun, enum scsi_scan_mode rescan)
{
struct Scsi_Host *shost = dev_to_shost(parent);
- int bflags = 0;
+ blist_flags_t bflags = 0;
int res;
struct scsi_target *starget;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index cbc0fe2c5485..91b90f672d23 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -967,7 +967,8 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
-#define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name
+#define BLIST_FLAG_NAME(name) \
+ [ilog2((__force unsigned int)BLIST_##name)] = #name
static const char *const sdev_bflags_name[] = {
#include "scsi_devinfo_tbl.c"
};
@@ -984,7 +985,7 @@ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
const char *name = NULL;
- if (!(sdev->sdev_bflags & BIT(i)))
+ if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i)))
continue;
if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
name = sdev_bflags_name[i];
@@ -1411,7 +1412,10 @@ static void __scsi_remove_target(struct scsi_target *starget)
* check.
*/
if (sdev->channel != starget->channel ||
- sdev->id != starget->id ||
+ sdev->id != starget->id)
+ continue;
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL ||
!get_device(&sdev->sdev_gendev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index d0219e36080c..871ea582029e 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <scsi/scsi.h>
#include "scsi_priv.h"
#include <scsi/scsi_device.h>
@@ -50,14 +51,14 @@
/* Our blacklist flags */
enum {
- SPI_BLIST_NOIUS = 0x1,
+ SPI_BLIST_NOIUS = (__force blist_flags_t)0x1,
};
/* blacklist table, modelled on scsi_devinfo.c */
static struct {
char *vendor;
char *model;
- unsigned flags;
+ blist_flags_t flags;
} spi_static_device_list[] __initdata = {
{"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
{"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
@@ -221,9 +222,11 @@ static int spi_device_configure(struct transport_container *tc,
{
struct scsi_device *sdev = to_scsi_device(dev);
struct scsi_target *starget = sdev->sdev_target;
- unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
- &sdev->inquiry[16],
- SCSI_DEVINFO_SPI);
+ blist_flags_t bflags;
+
+ bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
+ &sdev->inquiry[16],
+ SCSI_DEVINFO_SPI);
/* Populate the target capability fields with the values
* gleaned from the device inquiry */
@@ -1007,11 +1010,20 @@ spi_dv_device(struct scsi_device *sdev)
u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
+ /*
+ * Because this function and the power management code both call
+ * scsi_device_quiesce(), it is not safe to perform domain validation
+ * while suspend or resume is in progress. Hence the
+ * lock/unlock_system_sleep() calls.
+ */
+ lock_system_sleep();
+
if (unlikely(spi_dv_in_progress(starget)))
- return;
+ goto unlock;
if (unlikely(scsi_device_get(sdev)))
- return;
+ goto unlock;
+
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
@@ -1047,6 +1059,8 @@ spi_dv_device(struct scsi_device *sdev)
out_put:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
+unlock:
+ unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3b45f7fc5620..3541caf3fceb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -851,16 +851,13 @@ static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
- int ret;
if (!(rq->cmd_flags & REQ_NOUNMAP)) {
switch (sdkp->zeroing_mode) {
case SD_ZERO_WS16_UNMAP:
- ret = sd_setup_write_same16_cmnd(cmd, true);
- goto out;
+ return sd_setup_write_same16_cmnd(cmd, true);
case SD_ZERO_WS10_UNMAP:
- ret = sd_setup_write_same10_cmnd(cmd, true);
- goto out;
+ return sd_setup_write_same10_cmnd(cmd, true);
}
}
@@ -868,15 +865,9 @@ static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
return BLKPREP_INVALID;
if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff)
- ret = sd_setup_write_same16_cmnd(cmd, false);
- else
- ret = sd_setup_write_same10_cmnd(cmd, false);
-
-out:
- if (sd_is_zoned(sdkp) && ret == BLKPREP_OK)
- return sd_zbc_write_lock_zone(cmd);
+ return sd_setup_write_same16_cmnd(cmd, false);
- return ret;
+ return sd_setup_write_same10_cmnd(cmd, false);
}
static void sd_config_write_same(struct scsi_disk *sdkp)
@@ -964,12 +955,6 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
- if (sd_is_zoned(sdkp)) {
- ret = sd_zbc_write_lock_zone(cmd);
- if (ret != BLKPREP_OK)
- return ret;
- }
-
sector >>= ilog2(sdp->sector_size) - 9;
nr_sectors >>= ilog2(sdp->sector_size) - 9;
@@ -1004,9 +989,6 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
ret = scsi_init_io(cmd);
rq->__data_len = nr_bytes;
- if (sd_is_zoned(sdkp) && ret != BLKPREP_OK)
- sd_zbc_write_unlock_zone(cmd);
-
return ret;
}
@@ -1036,19 +1018,12 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
sector_t threshold;
unsigned int this_count = blk_rq_sectors(rq);
unsigned int dif, dix;
- bool zoned_write = sd_is_zoned(sdkp) && rq_data_dir(rq) == WRITE;
int ret;
unsigned char protect;
- if (zoned_write) {
- ret = sd_zbc_write_lock_zone(SCpnt);
- if (ret != BLKPREP_OK)
- return ret;
- }
-
ret = scsi_init_io(SCpnt);
if (ret != BLKPREP_OK)
- goto out;
+ return ret;
WARN_ON_ONCE(SCpnt != rq->special);
/* from here on until we're complete, any goto out
@@ -1267,9 +1242,6 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
*/
ret = BLKPREP_OK;
out:
- if (zoned_write && ret != BLKPREP_OK)
- sd_zbc_write_unlock_zone(SCpnt);
-
return ret;
}
@@ -1312,17 +1284,16 @@ static int sd_init_command(struct scsi_cmnd *cmd)
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = SCpnt->request;
-
- if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
- sd_zbc_write_unlock_zone(SCpnt);
+ u8 *cmnd;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
__free_page(rq->special_vec.bv_page);
if (SCpnt->cmnd != scsi_req(rq)->cmd) {
- mempool_free(SCpnt->cmnd, sd_cdb_pool);
+ cmnd = SCpnt->cmnd;
SCpnt->cmnd = NULL;
SCpnt->cmd_len = 0;
+ mempool_free(cmnd, sd_cdb_pool);
}
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 320de758323e..0d663b5e45bb 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -77,7 +77,6 @@ struct scsi_disk {
unsigned int nr_zones;
unsigned int zone_blocks;
unsigned int zone_shift;
- unsigned long *zones_wlock;
unsigned int zones_optimal_open;
unsigned int zones_optimal_nonseq;
unsigned int zones_max_open;
@@ -283,8 +282,6 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
extern void sd_zbc_remove(struct scsi_disk *sdkp);
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
-extern int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd);
-extern void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd);
extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd);
extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
@@ -302,14 +299,6 @@ static inline void sd_zbc_remove(struct scsi_disk *sdkp) {}
static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
-static inline int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
-{
- /* Let the drive fail requests */
- return BLKPREP_OK;
-}
-
-static inline void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd) {}
-
static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
{
return BLKPREP_INVALID;
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 9049a189c8e5..89cf4498f535 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -230,17 +230,6 @@ static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
}
/**
- * sd_zbc_zone_no - Get the number of the zone conataining a sector.
- * @sdkp: The target disk
- * @sector: 512B sector address contained in the zone
- */
-static inline unsigned int sd_zbc_zone_no(struct scsi_disk *sdkp,
- sector_t sector)
-{
- return sectors_to_logical(sdkp->device, sector) >> sdkp->zone_shift;
-}
-
-/**
* sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
* @cmd: the command to setup
*
@@ -279,78 +268,6 @@ int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
}
/**
- * sd_zbc_write_lock_zone - Write lock a sequential zone.
- * @cmd: write command
- *
- * Called from sd_init_cmd() for write requests (standard write, write same or
- * write zeroes operations). If the request target zone is not already locked,
- * the zone is locked and BLKPREP_OK returned, allowing the request to proceed
- * through dispatch in scsi_request_fn(). Otherwise, BLKPREP_DEFER is returned,
- * forcing the request to wait for the zone to be unlocked, that is, for the
- * previously issued write request targeting the same zone to complete.
- *
- * This is called from blk_peek_request() context with the queue lock held and
- * before the request is removed from the scheduler. As a result, multiple
- * contexts executing concurrently scsi_request_fn() cannot result in write
- * sequence reordering as only a single write request per zone is allowed to
- * proceed.
- */
-int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
-{
- struct request *rq = cmd->request;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- sector_t sector = blk_rq_pos(rq);
- sector_t zone_sectors = sd_zbc_zone_sectors(sdkp);
- unsigned int zno = sd_zbc_zone_no(sdkp, sector);
-
- /*
- * Note: Checks of the alignment of the write command on
- * logical blocks is done in sd.c
- */
-
- /* Do not allow zone boundaries crossing on host-managed drives */
- if (blk_queue_zoned_model(sdkp->disk->queue) == BLK_ZONED_HM &&
- (sector & (zone_sectors - 1)) + blk_rq_sectors(rq) > zone_sectors)
- return BLKPREP_KILL;
-
- /*
- * Do not issue more than one write at a time per
- * zone. This solves write ordering problems due to
- * the unlocking of the request queue in the dispatch
- * path in the non scsi-mq case.
- */
- if (sdkp->zones_wlock &&
- test_and_set_bit(zno, sdkp->zones_wlock))
- return BLKPREP_DEFER;
-
- WARN_ON_ONCE(cmd->flags & SCMD_ZONE_WRITE_LOCK);
- cmd->flags |= SCMD_ZONE_WRITE_LOCK;
-
- return BLKPREP_OK;
-}
-
-/**
- * sd_zbc_write_unlock_zone - Write unlock a sequential zone.
- * @cmd: write command
- *
- * Called from sd_uninit_cmd(). Unlocking the request target zone will allow
- * dispatching the next write request for the zone.
- */
-void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
-{
- struct request *rq = cmd->request;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
-
- if (sdkp->zones_wlock && cmd->flags & SCMD_ZONE_WRITE_LOCK) {
- unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq));
- WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock));
- cmd->flags &= ~SCMD_ZONE_WRITE_LOCK;
- clear_bit_unlock(zno, sdkp->zones_wlock);
- smp_mb__after_atomic();
- }
-}
-
-/**
* sd_zbc_complete - ZBC command post processing.
* @cmd: Completed command
* @good_bytes: Command reply bytes
@@ -581,8 +498,123 @@ out_free:
return ret;
}
+/**
+ * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
+ * @sdkp: The disk of the bitmap
+ */
+static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
+{
+ struct request_queue *q = sdkp->disk->queue;
+
+ return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones)
+ * sizeof(unsigned long),
+ GFP_KERNEL, q->node);
+}
+
+/**
+ * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
+ * @sdkp: disk used
+ * @buf: report reply buffer
+ * @seq_zone_bitamp: bitmap of sequential zones to set
+ *
+ * Parse reported zone descriptors in @buf to identify sequential zones and
+ * set the reported zone bit in @seq_zones_bitmap accordingly.
+ * Since read-only and offline zones cannot be written, do not
+ * mark them as sequential in the bitmap.
+ * Return the LBA after the last zone reported.
+ */
+static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ unsigned int buflen,
+ unsigned long *seq_zones_bitmap)
+{
+ sector_t lba, next_lba = sdkp->capacity;
+ unsigned int buf_len, list_length;
+ unsigned char *rec;
+ u8 type, cond;
+
+ list_length = get_unaligned_be32(&buf[0]) + 64;
+ buf_len = min(list_length, buflen);
+ rec = buf + 64;
+
+ while (rec < buf + buf_len) {
+ type = rec[0] & 0x0f;
+ cond = (rec[1] >> 4) & 0xf;
+ lba = get_unaligned_be64(&rec[16]);
+ if (type != ZBC_ZONE_TYPE_CONV &&
+ cond != ZBC_ZONE_COND_READONLY &&
+ cond != ZBC_ZONE_COND_OFFLINE)
+ set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap);
+ next_lba = lba + get_unaligned_be64(&rec[8]);
+ rec += 64;
+ }
+
+ return next_lba;
+}
+
+/**
+ * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap.
+ * @sdkp: target disk
+ *
+ * Allocate a zone bitmap and initialize it by identifying sequential zones.
+ */
+static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
+{
+ struct request_queue *q = sdkp->disk->queue;
+ unsigned long *seq_zones_bitmap;
+ sector_t lba = 0;
+ unsigned char *buf;
+ int ret = -ENOMEM;
+
+ seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp);
+ if (!seq_zones_bitmap)
+ return -ENOMEM;
+
+ buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ while (lba < sdkp->capacity) {
+ ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, lba);
+ if (ret)
+ goto out;
+ lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
+ seq_zones_bitmap);
+ }
+
+ if (lba != sdkp->capacity) {
+ /* Something went wrong */
+ ret = -EIO;
+ }
+
+out:
+ kfree(buf);
+ if (ret) {
+ kfree(seq_zones_bitmap);
+ return ret;
+ }
+
+ q->seq_zones_bitmap = seq_zones_bitmap;
+
+ return 0;
+}
+
+static void sd_zbc_cleanup(struct scsi_disk *sdkp)
+{
+ struct request_queue *q = sdkp->disk->queue;
+
+ kfree(q->seq_zones_bitmap);
+ q->seq_zones_bitmap = NULL;
+
+ kfree(q->seq_zones_wlock);
+ q->seq_zones_wlock = NULL;
+
+ q->nr_zones = 0;
+}
+
static int sd_zbc_setup(struct scsi_disk *sdkp)
{
+ struct request_queue *q = sdkp->disk->queue;
+ int ret;
/* READ16/WRITE16 is mandatory for ZBC disks */
sdkp->device->use_16_for_rw = 1;
@@ -594,15 +626,36 @@ static int sd_zbc_setup(struct scsi_disk *sdkp)
sdkp->nr_zones =
round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
- if (!sdkp->zones_wlock) {
- sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones),
- sizeof(unsigned long),
- GFP_KERNEL);
- if (!sdkp->zones_wlock)
- return -ENOMEM;
+ /*
+ * Initialize the device request queue information if the number
+ * of zones changed.
+ */
+ if (sdkp->nr_zones != q->nr_zones) {
+
+ sd_zbc_cleanup(sdkp);
+
+ q->nr_zones = sdkp->nr_zones;
+ if (sdkp->nr_zones) {
+ q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp);
+ if (!q->seq_zones_wlock) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = sd_zbc_setup_seq_zones_bitmap(sdkp);
+ if (ret) {
+ sd_zbc_cleanup(sdkp);
+ goto err;
+ }
+ }
+
}
return 0;
+
+err:
+ sd_zbc_cleanup(sdkp);
+ return ret;
}
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
@@ -656,14 +709,14 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
err:
sdkp->capacity = 0;
+ sd_zbc_cleanup(sdkp);
return ret;
}
void sd_zbc_remove(struct scsi_disk *sdkp)
{
- kfree(sdkp->zones_wlock);
- sdkp->zones_wlock = NULL;
+ sd_zbc_cleanup(sdkp);
}
void sd_zbc_print_zones(struct scsi_disk *sdkp)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index f098877eed4a..c198b96368dd 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1140,10 +1140,10 @@ static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned lon
}
#endif
-static unsigned int
+static __poll_t
sg_poll(struct file *filp, poll_table * wait)
{
- unsigned int res = 0;
+ __poll_t res = 0;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
@@ -1152,29 +1152,29 @@ sg_poll(struct file *filp, poll_table * wait)
sfp = filp->private_data;
if (!sfp)
- return POLLERR;
+ return EPOLLERR;
sdp = sfp->parentdp;
if (!sdp)
- return POLLERR;
+ return EPOLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(srp, &sfp->rq_list, entry) {
/* if any read waiting, flag it */
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
- res = POLLIN | POLLRDNORM;
+ res = EPOLLIN | EPOLLRDNORM;
++count;
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (atomic_read(&sdp->detaching))
- res |= POLLHUP;
+ res |= EPOLLHUP;
else if (!sfp->cmd_q) {
if (0 == count)
- res |= POLLOUT | POLLWRNORM;
+ res |= EPOLLOUT | EPOLLWRNORM;
} else if (count < SG_MAX_QUEUE)
- res |= POLLOUT | POLLWRNORM;
+ res |= EPOLLOUT | EPOLLWRNORM;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
- "sg_poll: res=0x%x\n", (int) res));
+ "sg_poll: res=0x%x\n", (__force u32) res));
return res;
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 620510787763..8c51d628b52e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -953,10 +953,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
case TEST_UNIT_READY:
break;
default:
- set_host_byte(scmnd, DID_TARGET_FAILURE);
+ set_host_byte(scmnd, DID_ERROR);
}
break;
case SRB_STATUS_INVALID_LUN:
+ set_host_byte(scmnd, DID_NO_CONNECT);
do_work = true;
process_err_fn = storvsc_remove_lun;
break;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 8196976182c9..c7da2c185990 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -6561,12 +6561,15 @@ static int ufshcd_config_vreg(struct device *dev,
struct ufs_vreg *vreg, bool on)
{
int ret = 0;
- struct regulator *reg = vreg->reg;
- const char *name = vreg->name;
+ struct regulator *reg;
+ const char *name;
int min_uV, uA_load;
BUG_ON(!vreg);
+ reg = vreg->reg;
+ name = vreg->name;
+
if (regulator_count_voltages(reg) > 0) {
min_uV = on ? vreg->min_uV : 0;
ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);