summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/aha152x.c235
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/isci/host.c6
-rw-r--r--drivers/scsi/libiscsi.c28
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c120
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c81
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c5
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c52
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c33
-rw-r--r--drivers/scsi/pmcraid.c491
-rw-r--r--drivers/scsi/pmcraid.h33
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c69
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c3
-rw-r--r--drivers/scsi/scsi_debug.c205
-rw-r--r--drivers/scsi/scsi_logging.c2
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c239
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/sr_ioctl.c15
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c15
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c17
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/scsi/ufs/ufshpb.c11
-rw-r--r--drivers/scsi/virtio_scsi.c8
-rw-r--r--drivers/scsi/zorro7xx.c2
50 files changed, 754 insertions, 1138 deletions
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 5f554a3a0f62..caeebfb67149 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -317,14 +317,18 @@ enum {
};
struct aha152x_cmd_priv {
- struct scsi_pointer scsi_pointer;
+ char *ptr;
+ int this_residual;
+ struct scatterlist *buffer;
+ int status;
+ int message;
+ int sent_command;
+ int phase;
};
-static struct scsi_pointer *aha152x_scsi_pointer(struct scsi_cmnd *cmd)
+static struct aha152x_cmd_priv *aha152x_priv(struct scsi_cmnd *cmd)
{
- struct aha152x_cmd_priv *acmd = scsi_cmd_priv(cmd);
-
- return &acmd->scsi_pointer;
+ return scsi_cmd_priv(cmd);
}
MODULE_AUTHOR("Jürgen Fischer");
@@ -890,17 +894,16 @@ void aha152x_release(struct Scsi_Host *shpnt)
static int setup_expected_interrupts(struct Scsi_Host *shpnt)
{
if(CURRENT_SC) {
- struct scsi_pointer *scsi_pointer =
- aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
- scsi_pointer->phase |= 1 << 16;
+ acp->phase |= 1 << 16;
- if (scsi_pointer->phase & selecting) {
+ if (acp->phase & selecting) {
SETPORT(SSTAT1, SELTO);
SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
SETPORT(SIMODE1, ENSELTIMO);
} else {
- SETPORT(SIMODE0, (scsi_pointer->phase & spiordy) ? ENSPIORDY : 0);
+ SETPORT(SIMODE0, (acp->phase & spiordy) ? ENSPIORDY : 0);
SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
}
} else if(STATE==seldi) {
@@ -924,17 +927,16 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
struct completion *complete, int phase)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(SCpnt);
+ struct aha152x_cmd_priv *acp = aha152x_priv(SCpnt);
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
- scsi_pointer->phase = not_issued | phase;
- scsi_pointer->Status = 0x1; /* Ilegal status by SCSI standard */
- scsi_pointer->Message = 0;
- scsi_pointer->have_data_in = 0;
- scsi_pointer->sent_command = 0;
+ acp->phase = not_issued | phase;
+ acp->status = 0x1; /* Illegal status by SCSI standard */
+ acp->message = 0;
+ acp->sent_command = 0;
- if (scsi_pointer->phase & (resetting | check_condition)) {
+ if (acp->phase & (resetting | check_condition)) {
if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n");
return FAILED;
@@ -957,15 +959,15 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
SCp.phase : current state of the command */
if ((phase & resetting) || !scsi_sglist(SCpnt)) {
- scsi_pointer->ptr = NULL;
- scsi_pointer->this_residual = 0;
+ acp->ptr = NULL;
+ acp->this_residual = 0;
scsi_set_resid(SCpnt, 0);
- scsi_pointer->buffer = NULL;
+ acp->buffer = NULL;
} else {
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
- scsi_pointer->buffer = scsi_sglist(SCpnt);
- scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
- scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ acp->buffer = scsi_sglist(SCpnt);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
DO_LOCK(flags);
@@ -1015,7 +1017,7 @@ static void reset_done(struct scsi_cmnd *SCpnt)
static void aha152x_scsi_done(struct scsi_cmnd *SCpnt)
{
- if (aha152x_scsi_pointer(SCpnt)->phase & resetting)
+ if (aha152x_priv(SCpnt)->phase & resetting)
reset_done(SCpnt);
else
scsi_done(SCpnt);
@@ -1101,7 +1103,7 @@ static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
DO_LOCK(flags);
- if (aha152x_scsi_pointer(SCpnt)->phase & resetted) {
+ if (aha152x_priv(SCpnt)->phase & resetted) {
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0);
@@ -1395,31 +1397,30 @@ static void busfree_run(struct Scsi_Host *shpnt)
SETPORT(SSTAT1, CLRBUSFREE);
if(CURRENT_SC) {
- struct scsi_pointer *scsi_pointer =
- aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
#if defined(AHA152X_STAT)
action++;
#endif
- scsi_pointer->phase &= ~syncneg;
+ acp->phase &= ~syncneg;
- if (scsi_pointer->phase & completed) {
+ if (acp->phase & completed) {
/* target sent COMMAND COMPLETE */
- done(shpnt, scsi_pointer->Status, DID_OK);
+ done(shpnt, acp->status, DID_OK);
- } else if (scsi_pointer->phase & aborted) {
- done(shpnt, scsi_pointer->Status, DID_ABORT);
+ } else if (acp->phase & aborted) {
+ done(shpnt, acp->status, DID_ABORT);
- } else if (scsi_pointer->phase & resetted) {
- done(shpnt, scsi_pointer->Status, DID_RESET);
+ } else if (acp->phase & resetted) {
+ done(shpnt, acp->status, DID_RESET);
- } else if (scsi_pointer->phase & disconnected) {
+ } else if (acp->phase & disconnected) {
/* target sent DISCONNECT */
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->disconnections++;
#endif
append_SC(&DISCONNECTED_SC, CURRENT_SC);
- scsi_pointer->phase |= 1 << 16;
+ acp->phase |= 1 << 16;
CURRENT_SC = NULL;
} else {
@@ -1438,24 +1439,23 @@ static void busfree_run(struct Scsi_Host *shpnt)
action++;
#endif
- if (aha152x_scsi_pointer(DONE_SC)->phase & check_condition) {
+ if (aha152x_priv(DONE_SC)->phase & check_condition) {
struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
struct aha152x_scdata *sc = SCDATA(cmd);
scsi_eh_restore_cmnd(cmd, &sc->ses);
- aha152x_scsi_pointer(cmd)->Status = SAM_STAT_CHECK_CONDITION;
+ aha152x_priv(cmd)->status = SAM_STAT_CHECK_CONDITION;
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- } else if (aha152x_scsi_pointer(DONE_SC)->Status ==
- SAM_STAT_CHECK_CONDITION) {
+ } else if (aha152x_priv(DONE_SC)->status == SAM_STAT_CHECK_CONDITION) {
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->busfree_with_check_condition++;
#endif
- if(!(aha152x_scsi_pointer(DONE_SC)->phase & not_issued)) {
+ if (!(aha152x_priv(DONE_SC)->phase & not_issued)) {
struct aha152x_scdata *sc;
struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
@@ -1480,7 +1480,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- if (!(aha152x_scsi_pointer(ptr)->phase & resetting)) {
+ if (!(aha152x_priv(ptr)->phase & resetting)) {
kfree(ptr->host_scribble);
ptr->host_scribble=NULL;
}
@@ -1503,13 +1503,12 @@ static void busfree_run(struct Scsi_Host *shpnt)
DO_UNLOCK(flags);
if(CURRENT_SC) {
- struct scsi_pointer *scsi_pointer =
- aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
#if defined(AHA152X_STAT)
action++;
#endif
- scsi_pointer->phase |= selecting;
+ acp->phase |= selecting;
/* clear selection timeout */
SETPORT(SSTAT1, SELTO);
@@ -1537,13 +1536,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
*/
static void seldo_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
SETPORT(SCSISIG, 0);
SETPORT(SSTAT1, CLRBUSFREE);
SETPORT(SSTAT1, CLRPHASECHG);
- scsi_pointer->phase &= ~(selecting | not_issued);
+ acp->phase &= ~(selecting | not_issued);
SETPORT(SCSISEQ, 0);
@@ -1558,12 +1557,12 @@ static void seldo_run(struct Scsi_Host *shpnt)
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
- if (scsi_pointer->phase & aborting) {
+ if (acp->phase & aborting) {
ADDMSGO(ABORT);
- } else if (scsi_pointer->phase & resetting) {
+ } else if (acp->phase & resetting) {
ADDMSGO(BUS_DEVICE_RESET);
} else if (SYNCNEG==0 && SYNCHRONOUS) {
- scsi_pointer->phase |= syncneg;
+ acp->phase |= syncneg;
MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8);
SYNCNEG=1; /* negotiation in progress */
}
@@ -1578,7 +1577,7 @@ static void seldo_run(struct Scsi_Host *shpnt)
*/
static void selto_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp;
SETPORT(SCSISEQ, 0);
SETPORT(SSTAT1, CLRSELTIMO);
@@ -1586,9 +1585,10 @@ static void selto_run(struct Scsi_Host *shpnt)
if (!CURRENT_SC)
return;
- scsi_pointer->phase &= ~selecting;
+ acp = aha152x_priv(CURRENT_SC);
+ acp->phase &= ~selecting;
- if (scsi_pointer->phase & aborted)
+ if (acp->phase & aborted)
done(shpnt, SAM_STAT_GOOD, DID_ABORT);
else if (TESTLO(SSTAT0, SELINGO))
done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY);
@@ -1616,10 +1616,9 @@ static void seldi_run(struct Scsi_Host *shpnt)
SETPORT(SSTAT1, CLRPHASECHG);
if(CURRENT_SC) {
- struct scsi_pointer *scsi_pointer =
- aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
- if (!(scsi_pointer->phase & not_issued))
+ if (!(acp->phase & not_issued))
scmd_printk(KERN_ERR, CURRENT_SC,
"command should not have been issued yet\n");
@@ -1676,7 +1675,7 @@ static void seldi_run(struct Scsi_Host *shpnt)
static void msgi_run(struct Scsi_Host *shpnt)
{
for(;;) {
- struct scsi_pointer *scsi_pointer;
+ struct aha152x_cmd_priv *acp;
int sstat1 = GETPORT(SSTAT1);
if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT))
@@ -1714,9 +1713,9 @@ static void msgi_run(struct Scsi_Host *shpnt)
continue;
}
- scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
- scsi_pointer->Message = MSGI(0);
- scsi_pointer->phase &= ~disconnected;
+ acp = aha152x_priv(CURRENT_SC);
+ acp->message = MSGI(0);
+ acp->phase &= ~disconnected;
MSGILEN=0;
@@ -1724,8 +1723,8 @@ static void msgi_run(struct Scsi_Host *shpnt)
continue;
}
- scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
- scsi_pointer->Message = MSGI(0);
+ acp = aha152x_priv(CURRENT_SC);
+ acp->message = MSGI(0);
switch (MSGI(0)) {
case DISCONNECT:
@@ -1733,11 +1732,11 @@ static void msgi_run(struct Scsi_Host *shpnt)
scmd_printk(KERN_WARNING, CURRENT_SC,
"target was not allowed to disconnect\n");
- scsi_pointer->phase |= disconnected;
+ acp->phase |= disconnected;
break;
case COMMAND_COMPLETE:
- scsi_pointer->phase |= completed;
+ acp->phase |= completed;
break;
case MESSAGE_REJECT:
@@ -1867,11 +1866,9 @@ static void msgi_end(struct Scsi_Host *shpnt)
*/
static void msgo_init(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
-
if(MSGOLEN==0) {
- if ((scsi_pointer->phase & syncneg) && SYNCNEG==2 &&
- SYNCRATE==0) {
+ if ((aha152x_priv(CURRENT_SC)->phase & syncneg) &&
+ SYNCNEG == 2 && SYNCRATE == 0) {
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
} else {
scmd_printk(KERN_INFO, CURRENT_SC,
@@ -1888,7 +1885,7 @@ static void msgo_init(struct Scsi_Host *shpnt)
*/
static void msgo_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
while(MSGO_I<MSGOLEN) {
if (TESTLO(SSTAT0, SPIORDY))
@@ -1901,13 +1898,13 @@ static void msgo_run(struct Scsi_Host *shpnt)
if (MSGO(MSGO_I) & IDENTIFY_BASE)
- scsi_pointer->phase |= identified;
+ acp->phase |= identified;
if (MSGO(MSGO_I)==ABORT)
- scsi_pointer->phase |= aborted;
+ acp->phase |= aborted;
if (MSGO(MSGO_I)==BUS_DEVICE_RESET)
- scsi_pointer->phase |= resetted;
+ acp->phase |= resetted;
SETPORT(SCSIDAT, MSGO(MSGO_I++));
}
@@ -1936,7 +1933,7 @@ static void msgo_end(struct Scsi_Host *shpnt)
*/
static void cmd_init(struct Scsi_Host *shpnt)
{
- if (aha152x_scsi_pointer(CURRENT_SC)->sent_command) {
+ if (aha152x_priv(CURRENT_SC)->sent_command) {
scmd_printk(KERN_ERR, CURRENT_SC,
"command already sent\n");
done(shpnt, SAM_STAT_GOOD, DID_ERROR);
@@ -1967,7 +1964,7 @@ static void cmd_end(struct Scsi_Host *shpnt)
"command sent incompletely (%d/%d)\n",
CMD_I, CURRENT_SC->cmd_len);
else
- aha152x_scsi_pointer(CURRENT_SC)->sent_command++;
+ aha152x_priv(CURRENT_SC)->sent_command++;
}
/*
@@ -1979,7 +1976,7 @@ static void status_run(struct Scsi_Host *shpnt)
if (TESTLO(SSTAT0, SPIORDY))
return;
- aha152x_scsi_pointer(CURRENT_SC)->Status = GETPORT(SCSIDAT);
+ aha152x_priv(CURRENT_SC)->status = GETPORT(SCSIDAT);
}
@@ -2003,7 +2000,7 @@ static void datai_init(struct Scsi_Host *shpnt)
static void datai_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer;
+ struct aha152x_cmd_priv *acp;
unsigned long the_time;
int fifodata, data_count;
@@ -2041,36 +2038,35 @@ static void datai_run(struct Scsi_Host *shpnt)
fifodata = GETPORT(FIFOSTAT);
}
- scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
- if (scsi_pointer->this_residual > 0) {
- while (fifodata > 0 && scsi_pointer->this_residual > 0) {
- data_count = fifodata > scsi_pointer->this_residual ?
- scsi_pointer->this_residual :
- fifodata;
+ acp = aha152x_priv(CURRENT_SC);
+ if (acp->this_residual > 0) {
+ while (fifodata > 0 && acp->this_residual > 0) {
+ data_count = fifodata > acp->this_residual ?
+ acp->this_residual : fifodata;
fifodata -= data_count;
if (data_count & 1) {
SETPORT(DMACNTRL0, ENDMA|_8BIT);
- *scsi_pointer->ptr++ = GETPORT(DATAPORT);
- scsi_pointer->this_residual--;
+ *acp->ptr++ = GETPORT(DATAPORT);
+ acp->this_residual--;
DATA_LEN++;
SETPORT(DMACNTRL0, ENDMA);
}
if (data_count > 1) {
data_count >>= 1;
- insw(DATAPORT, scsi_pointer->ptr, data_count);
- scsi_pointer->ptr += 2 * data_count;
- scsi_pointer->this_residual -= 2 * data_count;
+ insw(DATAPORT, acp->ptr, data_count);
+ acp->ptr += 2 * data_count;
+ acp->this_residual -= 2 * data_count;
DATA_LEN += 2 * data_count;
}
- if (scsi_pointer->this_residual == 0 &&
- !sg_is_last(scsi_pointer->buffer)) {
+ if (acp->this_residual == 0 &&
+ !sg_is_last(acp->buffer)) {
/* advance to next buffer */
- scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
- scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
- scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ acp->buffer = sg_next(acp->buffer);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
}
} else if (fifodata > 0) {
@@ -2138,15 +2134,15 @@ static void datao_init(struct Scsi_Host *shpnt)
static void datao_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
unsigned long the_time;
int data_count;
/* until phase changes or all data sent */
- while (TESTLO(DMASTAT, INTSTAT) && scsi_pointer->this_residual > 0) {
+ while (TESTLO(DMASTAT, INTSTAT) && acp->this_residual > 0) {
data_count = 128;
- if (data_count > scsi_pointer->this_residual)
- data_count = scsi_pointer->this_residual;
+ if (data_count > acp->this_residual)
+ data_count = acp->this_residual;
if(TESTLO(DMASTAT, DFIFOEMP)) {
scmd_printk(KERN_ERR, CURRENT_SC,
@@ -2157,26 +2153,25 @@ static void datao_run(struct Scsi_Host *shpnt)
if(data_count & 1) {
SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT);
- SETPORT(DATAPORT, *scsi_pointer->ptr++);
- scsi_pointer->this_residual--;
+ SETPORT(DATAPORT, *acp->ptr++);
+ acp->this_residual--;
CMD_INC_RESID(CURRENT_SC, -1);
SETPORT(DMACNTRL0,WRITE_READ|ENDMA);
}
if(data_count > 1) {
data_count >>= 1;
- outsw(DATAPORT, scsi_pointer->ptr, data_count);
- scsi_pointer->ptr += 2 * data_count;
- scsi_pointer->this_residual -= 2 * data_count;
+ outsw(DATAPORT, acp->ptr, data_count);
+ acp->ptr += 2 * data_count;
+ acp->this_residual -= 2 * data_count;
CMD_INC_RESID(CURRENT_SC, -2 * data_count);
}
- if (scsi_pointer->this_residual == 0 &&
- !sg_is_last(scsi_pointer->buffer)) {
+ if (acp->this_residual == 0 && !sg_is_last(acp->buffer)) {
/* advance to next buffer */
- scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
- scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
- scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ acp->buffer = sg_next(acp->buffer);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
the_time=jiffies + 100*HZ;
@@ -2192,7 +2187,7 @@ static void datao_run(struct Scsi_Host *shpnt)
static void datao_end(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
if(TESTLO(DMASTAT, DFIFOEMP)) {
u32 datao_cnt = GETSTCNT();
@@ -2211,10 +2206,9 @@ static void datao_end(struct Scsi_Host *shpnt)
sg = sg_next(sg);
}
- scsi_pointer->buffer = sg;
- scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer) + done;
- scsi_pointer->this_residual = scsi_pointer->buffer->length -
- done;
+ acp->buffer = sg;
+ acp->ptr = SG_ADDRESS(acp->buffer) + done;
+ acp->this_residual = acp->buffer->length - done;
}
SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
@@ -2229,7 +2223,6 @@ static void datao_end(struct Scsi_Host *shpnt)
*/
static int update_state(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
int dataphase=0;
unsigned int stat0 = GETPORT(SSTAT0);
unsigned int stat1 = GETPORT(SSTAT1);
@@ -2244,7 +2237,7 @@ static int update_state(struct Scsi_Host *shpnt)
} else if (stat0 & SELDI && PREVSTATE == busfree) {
STATE=seldi;
} else if (stat0 & SELDO && CURRENT_SC &&
- (scsi_pointer->phase & selecting)) {
+ (aha152x_priv(CURRENT_SC)->phase & selecting)) {
STATE=seldo;
} else if(stat1 & SELTO) {
STATE=selto;
@@ -2376,8 +2369,7 @@ static void is_complete(struct Scsi_Host *shpnt)
SETPORT(SXFRCTL0, CH1);
SETPORT(DMACNTRL0, 0);
if(CURRENT_SC)
- aha152x_scsi_pointer(CURRENT_SC)->phase &=
- ~spiordy;
+ aha152x_priv(CURRENT_SC)->phase &= ~spiordy;
}
/*
@@ -2399,8 +2391,7 @@ static void is_complete(struct Scsi_Host *shpnt)
SETPORT(DMACNTRL0, 0);
SETPORT(SXFRCTL0, CH1|SPIOEN);
if(CURRENT_SC)
- aha152x_scsi_pointer(CURRENT_SC)->phase |=
- spiordy;
+ aha152x_priv(CURRENT_SC)->phase |= spiordy;
}
/*
@@ -2490,7 +2481,7 @@ static void disp_enintr(struct Scsi_Host *shpnt)
*/
static void show_command(struct scsi_cmnd *ptr)
{
- const int phase = aha152x_scsi_pointer(ptr)->phase;
+ const int phase = aha152x_priv(ptr)->phase;
scsi_print_command(ptr);
scmd_printk(KERN_DEBUG, ptr,
@@ -2538,8 +2529,8 @@ static void show_queues(struct Scsi_Host *shpnt)
static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(ptr);
- const int phase = scsi_pointer->phase;
+ struct aha152x_cmd_priv *acp = aha152x_priv(ptr);
+ const int phase = acp->phase;
int i;
seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ",
@@ -2549,8 +2540,8 @@ static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
seq_printf(m, "0x%02x ", ptr->cmnd[i]);
seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
- scsi_get_resid(ptr), scsi_pointer->this_residual,
- sg_nents(scsi_pointer->buffer) - 1);
+ scsi_get_resid(ptr), acp->this_residual,
+ sg_nents(acp->buffer) - 1);
if (phase & not_issued)
seq_puts(m, "not issued|");
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 679a4fd13874..793fe19993a9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -420,8 +420,6 @@ ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
/* config registers for header type 0 devices */
#define PCIR_MAPS 0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0 0x2e
/****************************** PCI-X definitions *****************************/
#define PCIXR_COMMAND 0x96
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 2f0bdb9225a4..5fad41b1ab58 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -260,8 +260,8 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
- subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
- subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+ subvendor = ahd_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+ subdevice = ahd_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
full_id = ahd_compose_id(device,
vendor,
subdevice,
@@ -298,7 +298,7 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
* Record if this is an HP board.
*/
subvendor = ahd_pci_read_config(ahd->dev_softc,
- PCIR_SUBVEND_0, /*bytes*/2);
+ PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
if (subvendor == SUBID_HP)
ahd->flags |= AHD_HP_BOARD;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 4782a304e93c..51d9f4de0734 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -433,8 +433,6 @@ ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
/* config registers for header type 0 devices */
#define PCIR_MAPS 0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0 0x2e
typedef enum
{
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index dab3a6d12c4d..2d4c85426dc3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -673,8 +673,8 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
- subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
- subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+ subvendor = ahc_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+ subdevice = ahc_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
full_id = ahc_compose_id(device, vendor, subdevice, subvendor);
/*
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 0103f811cc25..776544385598 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1169,7 +1169,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
ofld_kcqe->fcoe_conn_context_id);
interface = tgt->port->priv;
if (hba != interface->hba) {
- printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n");
goto ofld_cmpl_err;
}
/*
@@ -1226,12 +1226,12 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
* and enable
*/
if (tgt->context_id != context_id) {
- printk(KERN_ERR PFX "context id mis-match\n");
+ printk(KERN_ERR PFX "context id mismatch\n");
return;
}
interface = tgt->port->priv;
if (hba != interface->hba) {
- printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n");
goto enbl_cmpl_err;
}
if (!ofld_kcqe->completion_status)
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5521469ce678..6c864b093ac9 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
break;
- if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
+ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
if (nopin->op_code == ISCSI_OP_NOOP_IN &&
nopin->itt == (u16) RESERVED_ITT) {
printk(KERN_ALERT "bnx2i: Unsolicited "
@@ -2398,7 +2398,7 @@ static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
}
if (hba != ep->hba) {
- printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+ printk(KERN_ALERT "conn destroy- error hba mismatch\n");
return;
}
@@ -2432,7 +2432,7 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
}
if (hba != ep->hba) {
- printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+ printk(KERN_ALERT "ofld_cmpl: error hba mismatch\n");
return;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index fe86fd61a995..15fbd09baa94 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
/* Must suspend all rx queue activity for this ep */
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
}
/* CONN_DISCONNECT timeout may or may not be an issue depending
* on what transcribed in TCP layer, different targets behave
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 8c7d4dda4cf2..4365d52c6430 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, conn 0x%p.\n", csk, conn);
- if (unlikely(!conn || conn->suspend_rx)) {
+ if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
+ "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
csk, conn, conn ? conn->id : 0xFF,
- conn ? conn->suspend_rx : 0xFF);
+ conn ? conn->flags : 0xFF);
return;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 37d06f993b76..1d9be771f3ee 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1172,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
case SCSI_ACCESS_STATE_OPTIMAL:
case SCSI_ACCESS_STATE_ACTIVE:
case SCSI_ACCESS_STATE_LBA:
- return BLK_STS_OK;
case SCSI_ACCESS_STATE_TRANSITIONING:
- return BLK_STS_AGAIN;
+ return BLK_STS_OK;
default:
req->rq_flags |= RQF_QUIET;
return BLK_STS_IOERR;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 461ef8a76c4c..4bda2f6cb352 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -442,7 +442,6 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_INTERNAL_ABORT:
hisi_sas_task_prep_abort(hisi_hba, slot);
break;
- fallthrough;
default:
return;
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 80238e6a3c98..eee1a24f7e15 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -36,7 +36,7 @@
#define IBMVSCSIS_VERSION "v0.2"
-#define INITIAL_SRP_LIMIT 800
+#define INITIAL_SRP_LIMIT 1024
#define DEFAULT_MAX_SECTORS 256
#define MAX_TXU 1024 * 1024
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index d690d9cf7eb1..35589b6af90d 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -413,7 +413,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received "
"event 0x%x for io request object "
- "that doesnt exist.\n",
+ "that doesn't exist.\n",
__func__,
ihost,
ent);
@@ -428,7 +428,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received "
"event 0x%x for remote device object "
- "that doesnt exist.\n",
+ "that doesn't exist.\n",
__func__,
ihost,
ent);
@@ -462,7 +462,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
} else
dev_err(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received event 0x%x "
- "for remote device object 0x%0x that doesnt "
+ "for remote device object 0x%0x that doesn't "
"exist.\n",
__func__,
ihost,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index d09926e6c8a8..797abf4f5399 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
struct iscsi_task *task;
itt_t itt;
- if (session->state == ISCSI_STATE_TERMINATE)
+ if (session->state == ISCSI_STATE_TERMINATE ||
+ !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
return NULL;
if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
@@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
return true;
}
@@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
* Do this after dropping the extra ref because if this was a requeue
* it's removed from that list and cleanup_queued_task would miss it.
*/
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
/*
* Save the task and ref in case we weren't cleaning up this
* task and get woken up again.
@@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
int rc = 0;
spin_lock_bh(&conn->session->frwd_lock);
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto fault;
}
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_REQUEUE << 16;
goto fault;
@@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
void iscsi_suspend_queue(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->session->frwd_lock);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
@@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
if (ihost->workq)
flush_workqueue(ihost->workq);
}
@@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
iscsi_conn_queue_work(conn);
}
@@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
iscsi_suspend_tx(conn);
spin_lock_bh(&session->frwd_lock);
+ clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
+
if (!is_active) {
/*
* if logout timed out before userspace could even send a PDU
@@ -3045,7 +3048,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
- memset(conn, 0, sizeof(*conn) + dd_size);
conn->dd_data = cls_conn->dd_data + sizeof(*conn);
conn->session = session;
@@ -3318,6 +3320,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
spin_lock_bh(&session->frwd_lock);
if (is_leading)
session->leadconn = conn;
+
+ set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
spin_unlock_bh(&session->frwd_lock);
/*
@@ -3330,8 +3334,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
/*
* Unblock xmitworker(), Login Phase will pass through.
*/
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_conn_bind);
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 2e9ffe3d1a55..883005757ddb 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
*/
conn->last_recv = jiffies;
- if (unlikely(conn->suspend_rx)) {
+ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
ISCSI_DBG_TCP(conn, "Rx suspended!\n");
*status = ISCSI_TCP_SUSPENDED;
return 0;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index f0cf8ffdc5f3..0025760230e5 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -897,6 +897,11 @@ enum lpfc_irq_chann_mode {
NHT_MODE,
};
+enum lpfc_hba_bit_flags {
+ FABRIC_COMANDS_BLOCKED,
+ HBA_PCI_ERR,
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
struct lpfc_io_buf * (*lpfc_get_scsi_buf)
@@ -1043,7 +1048,6 @@ struct lpfc_hba {
* Firmware supports Forced Link Speed
* capability
*/
-#define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
#define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */
#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
@@ -1350,7 +1354,6 @@ struct lpfc_hba {
atomic_t fabric_iocb_count;
struct timer_list fabric_block_timer;
unsigned long bit_flags;
-#define FABRIC_COMANDS_BLOCKED 0
atomic_t num_rsrc_err;
atomic_t num_cmd_success;
unsigned long last_rsrc_error_time;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 96408cd6c4c8..9897a1aa387b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -670,3 +670,6 @@ struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
uint32_t hash, uint8_t *buf);
void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
+
+void lpfc_sli_rpi_release(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index ef6e8cd8c26a..872a26376ccb 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1330,7 +1330,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) {
/* FLOGI needs to be 3 for WQE FCFI */
- ct = ((SLI4_CT_FCFI >> 1) & 1) | (SLI4_CT_FCFI & 1);
+ ct = SLI4_CT_FCFI;
bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
/* Set the fcfi to the fcfi we registered with */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 0144da30e3db..2b877dff5ed4 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -109,8 +109,8 @@ lpfc_rport_invalid(struct fc_rport *rport)
ndlp = rdata->pnode;
if (!rdata->pnode) {
- pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n",
- __func__, rport, rport->scsi_target_id);
+ pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
return -EINVAL;
}
@@ -169,9 +169,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3181 dev_loss_callbk x%06x, rport x%px flg x%x "
- "load_flag x%x refcnt %d\n",
+ "load_flag x%x refcnt %d state %d xpt x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
- vport->load_flag, kref_read(&ndlp->kref));
+ vport->load_flag, kref_read(&ndlp->kref),
+ ndlp->nlp_state, ndlp->fc4_xpt_flags);
/* Don't schedule a worker thread event if the vport is going down.
* The teardown process cleans up the node via lpfc_drop_node.
@@ -181,6 +182,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->rport = NULL;
ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+ /* clear the NLP_XPT_REGD if the node is not registered
+ * with nvme-fc
+ */
+ if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
/* Remove the node reference from remote_port_add now.
* The driver will not call remote_port_delete.
@@ -225,18 +231,36 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->rport = NULL;
spin_unlock_irqrestore(&ndlp->lock, iflags);
- /* We need to hold the node by incrementing the reference
- * count until this queued work is done
- */
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ if (phba->worker_thread) {
+ /* We need to hold the node by incrementing the reference
+ * count until this queued work is done
+ */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (evtp->evt_arg1) {
+ evtp->evt = LPFC_EVT_DEV_LOSS;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ lpfc_worker_wake_up(phba);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ } else {
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3188 worker thread is stopped %s x%06x, "
+ " rport x%px flg x%x load_flag x%x refcnt "
+ "%d\n", __func__, ndlp->nlp_DID,
+ ndlp->rport, ndlp->nlp_flag,
+ vport->load_flag, kref_read(&ndlp->kref));
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ /* Node is in dev loss. No further transaction. */
+ ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (evtp->evt_arg1) {
- evtp->evt = LPFC_EVT_DEV_LOSS;
- list_add_tail(&evtp->evt_listp, &phba->work_list);
- lpfc_worker_wake_up(phba);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -503,11 +527,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0203 Devloss timeout on "
"WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
- "NPort x%06x Data: x%x x%x x%x\n",
+ "NPort x%06x Data: x%x x%x x%x refcnt %d\n",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ ndlp->nlp_state, ndlp->nlp_rpi,
+ kref_read(&ndlp->kref));
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
"0204 Devloss timeout on "
@@ -755,18 +780,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
int free_evt;
int fcf_inuse;
uint32_t nlp_did;
+ bool hba_pci_err;
spin_lock_irq(&phba->hbalock);
while (!list_empty(&phba->work_list)) {
list_remove_head((&phba->work_list), evtp, typeof(*evtp),
evt_listp);
spin_unlock_irq(&phba->hbalock);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
free_evt = 1;
switch (evtp->evt) {
case LPFC_EVT_ELS_RETRY:
ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
- lpfc_els_retry_delay_handler(ndlp);
- free_evt = 0; /* evt is part of ndlp */
+ if (!hba_pci_err) {
+ lpfc_els_retry_delay_handler(ndlp);
+ free_evt = 0; /* evt is part of ndlp */
+ }
/* decrement the node reference count held
* for this queued work
*/
@@ -788,8 +817,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
break;
case LPFC_EVT_RECOVER_PORT:
ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
- lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
- free_evt = 0;
+ if (!hba_pci_err) {
+ lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
+ free_evt = 0;
+ }
/* decrement the node reference count held for
* this queued work
*/
@@ -859,14 +890,18 @@ lpfc_work_done(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct lpfc_vport *vport;
int i;
+ bool hba_pci_err;
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
spin_lock_irq(&phba->hbalock);
ha_copy = phba->work_ha;
phba->work_ha = 0;
spin_unlock_irq(&phba->hbalock);
+ if (hba_pci_err)
+ ha_copy = 0;
/* First, try to post the next mailbox command to SLI4 device */
- if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
lpfc_sli4_post_async_mbox(phba);
if (ha_copy & HA_ERATT) {
@@ -886,7 +921,7 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_latt(phba);
/* Handle VMID Events */
- if (lpfc_is_vmid_enabled(phba)) {
+ if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) {
if (phba->pport->work_port_events &
WORKER_CHECK_VMID_ISSUE_QFPA) {
lpfc_check_vmid_qfpa_issue(phba);
@@ -936,6 +971,8 @@ lpfc_work_done(struct lpfc_hba *phba)
work_port_events = vport->work_port_events;
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
+ if (hba_pci_err)
+ continue;
if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport);
if (work_port_events & WORKER_ELS_TMO)
@@ -1173,12 +1210,14 @@ lpfc_linkdown(struct lpfc_hba *phba)
struct lpfc_vport **vports;
LPFC_MBOXQ_t *mb;
int i;
+ int offline;
if (phba->link_state == LPFC_LINK_DOWN)
return 0;
/* Block all SCSI stack I/Os */
lpfc_scsi_dev_block(phba);
+ offline = pci_channel_offline(phba->pcidev);
phba->defer_flogi_acc_flag = false;
@@ -1219,7 +1258,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_destroy_vport_work_array(phba, vports);
/* Clean up any SLI3 firmware default rpi's */
- if (phba->sli_rev > LPFC_SLI_REV3)
+ if (phba->sli_rev > LPFC_SLI_REV3 || offline)
goto skip_unreg_did;
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -4712,6 +4751,11 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, iflags);
if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0999 %s Not regd: ndlp x%px rport x%px DID "
+ "x%x FLG x%x XPT x%x\n",
+ __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->fc4_xpt_flags);
return;
}
@@ -4722,6 +4766,13 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
vport->phba->nport_event_cnt++;
lpfc_unregister_remote_port(ndlp);
+ } else if (!ndlp->rport) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
+ " XPT x%x refcnt %d\n",
+ __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
}
if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
@@ -5371,6 +5422,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_flag &= ~NLP_UNREG_INP;
mempool_free(mbox, phba->mbox_mem_pool);
acc_plogi = 1;
+ lpfc_nlp_put(ndlp);
}
} else {
lpfc_printf_vlog(vport, KERN_INFO,
@@ -6097,12 +6149,34 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
}
}
+/*
+ * lpfc_notify_xport_npr - notifies xport of node disappearance
+ * @vport: Pointer to Virtual Port object.
+ *
+ * Transitions all ndlps to NPR state. When lpfc_nlp_set_state
+ * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
+ * and transport notified that the node is gone.
+ * Return Code:
+ * none
+ */
+static void
+lpfc_notify_xport_npr(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+ nlp_listp) {
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ }
+}
void
lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
{
lpfc_els_flush_rscn(vport);
lpfc_els_flush_cmd(vport);
lpfc_disc_flush_list(vport);
+ if (pci_channel_offline(vport->phba->pcidev))
+ lpfc_notify_xport_npr(vport);
}
/*****************************************************************************/
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index eed6464bd880..461d333b1b3a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -95,6 +95,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
+static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1642,7 +1643,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
spin_lock_irq(&phba->hbalock);
if (phba->link_state == LPFC_HBA_ERROR &&
- phba->hba_flag & HBA_PCI_ERR) {
+ test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -1985,6 +1986,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (pci_channel_offline(phba->pcidev)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3166 pci channel is offline\n");
+ lpfc_sli_flush_io_rings(phba);
return;
}
@@ -2973,6 +2975,22 @@ lpfc_cleanup(struct lpfc_vport *vport)
NLP_EVT_DEVICE_RM);
}
+ /* This is a special case flush to return all
+ * IOs before entering this loop. There are
+ * two points in the code where a flush is
+ * avoided if the FC_UNLOADING flag is set.
+ * one is in the multipool destroy,
+ * (this prevents a crash) and the other is
+ * in the nvme abort handler, ( also prevents
+ * a crash). Both of these exceptions are
+ * cases where the slot is still accessible.
+ * The flush here is only when the pci slot
+ * is offline.
+ */
+ if (vport->load_flag & FC_UNLOADING &&
+ pci_channel_offline(phba->pcidev))
+ lpfc_sli_flush_io_rings(vport->phba);
+
/* At this point, ALL ndlp's should be gone
* because of the previous NLP_EVT_DEVICE_RM.
* Lets wait for this to happen, if needed.
@@ -2985,7 +3003,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
list_for_each_entry_safe(ndlp, next_ndlp,
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
- LOG_TRACE_EVENT,
+ LOG_DISCOVERY,
"0282 did:x%x ndlp:x%px "
"refcnt:%d xflags x%x nflag x%x\n",
ndlp->nlp_DID, (void *)ndlp,
@@ -3682,7 +3700,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
int i;
- int offline = 0;
+ int offline;
+ bool hba_pci_err;
if (vport->fc_flag & FC_OFFLINE_MODE)
return;
@@ -3692,6 +3711,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_linkdown(phba);
offline = pci_channel_offline(phba->pcidev);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Issue an unreg_login to all nodes on all vports */
vports = lpfc_create_vport_work_array(phba);
@@ -3715,11 +3735,14 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(&ndlp->lock);
- if (offline) {
+ if (offline || hba_pci_err) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_UNREG_INP |
NLP_RPI_REGISTERED);
spin_unlock_irq(&ndlp->lock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli_rpi_release(vports[i],
+ ndlp);
} else {
lpfc_unreg_rpi(vports[i], ndlp);
}
@@ -13354,8 +13377,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Abort all iocbs associated with the hba */
lpfc_sli_hba_iocb_abort(phba);
- /* Wait for completion of device XRI exchange busy */
- lpfc_sli4_xri_exchange_busy_wait(phba);
+ if (!pci_channel_offline(phba->pcidev))
+ /* Wait for completion of device XRI exchange busy */
+ lpfc_sli4_xri_exchange_busy_wait(phba);
/* per-phba callback de-registration for hotplug event */
if (phba->pport)
@@ -13374,15 +13398,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Disable FW logging to host memory */
lpfc_ras_stop_fwlog(phba);
- /* Unset the queues shared with the hardware then release all
- * allocated resources.
- */
- lpfc_sli4_queue_unset(phba);
- lpfc_sli4_queue_destroy(phba);
-
/* Reset SLI4 HBA FCoE function */
lpfc_pci_function_reset(phba);
+ /* release all queue allocated resources. */
+ lpfc_sli4_queue_destroy(phba);
+
/* Free RAS DMA memory */
if (phba->ras_fwlog.ras_enabled)
lpfc_sli4_ras_dma_free(phba);
@@ -14262,6 +14283,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
"2711 PCI channel permanent disable for failure\n");
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
+ lpfc_sli4_prep_dev_for_reset(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -15057,24 +15079,28 @@ lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
static void
lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2826 PCI channel disable preparing for reset\n");
+ int offline = pci_channel_offline(phba->pcidev);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2826 PCI channel disable preparing for reset offline"
+ " %d\n", offline);
/* Block any management I/Os to the device */
lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
- /* Block all SCSI devices' I/Os on the host */
- lpfc_scsi_dev_block(phba);
+ /* HBA_PCI_ERR was set in io_error_detect */
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
/* Flush all driver's outstanding I/Os as we are to reset */
lpfc_sli_flush_io_rings(phba);
+ lpfc_offline(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
+ lpfc_sli4_queue_destroy(phba);
/* Disable interrupt and pci device */
lpfc_sli4_disable_intr(phba);
- lpfc_sli4_queue_destroy(phba);
pci_disable_device(phba->pcidev);
}
@@ -15123,6 +15149,7 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ bool hba_pci_err;
switch (state) {
case pci_channel_io_normal:
@@ -15130,17 +15157,24 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
lpfc_sli4_prep_dev_for_recover(phba);
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
- phba->hba_flag |= HBA_PCI_ERR;
+ hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Fatal error, prepare for slot reset */
- lpfc_sli4_prep_dev_for_reset(phba);
+ if (!hba_pci_err)
+ lpfc_sli4_prep_dev_for_reset(phba);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2832 Already handling PCI error "
+ "state: x%x\n", state);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
- phba->hba_flag |= HBA_PCI_ERR;
+ set_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Permanent failure, prepare for device down */
lpfc_sli4_prep_dev_for_perm_failure(phba);
return PCI_ERS_RESULT_DISCONNECT;
default:
- phba->hba_flag |= HBA_PCI_ERR;
+ hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
+ if (!hba_pci_err)
+ lpfc_sli4_prep_dev_for_reset(phba);
/* Unknown state, prepare and request slot reset */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2825 Unknown PCI error state: x%x\n", state);
@@ -15174,17 +15208,21 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
struct lpfc_sli *psli = &phba->sli;
uint32_t intr_mode;
+ bool hba_pci_err;
dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
if (pci_enable_device_mem(pdev)) {
printk(KERN_ERR "lpfc: Cannot re-enable "
- "PCI device after reset.\n");
+ "PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_restore_state(pdev);
- phba->hba_flag &= ~HBA_PCI_ERR;
+ hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
+ if (!hba_pci_err)
+ dev_info(&pdev->dev,
+ "hba_pci_err was not set, recovering slot reset.\n");
/*
* As the new kernel behavior of pci_restore_state() API call clears
* device saved_state flag, need to save the restored state again.
@@ -15198,6 +15236,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -15239,8 +15279,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
*/
if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
/* Perform device reset */
- lpfc_offline_prep(phba, LPFC_MBX_WAIT);
- lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
/* Bring the device back online */
lpfc_online(phba);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 1213a299f9aa..8d26f207ebd2 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -93,6 +93,11 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
+
+ if (!vport || vport->load_flag & FC_UNLOADING ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
+
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
if (qhandle == NULL)
return -ENOMEM;
@@ -267,7 +272,8 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
return -EINVAL;
remoteport = lpfc_rport->remoteport;
- if (!vport->localport)
+ if (!vport->localport ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -EINVAL;
lport = vport->localport->private;
@@ -559,6 +565,8 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_DID, ntype, nstate);
return -ENODEV;
}
+ if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
if (!vport->phba->sli4_hba.nvmels_wq)
return -ENOMEM;
@@ -662,7 +670,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
return -EINVAL;
vport = lport->vport;
- if (vport->load_flag & FC_UNLOADING)
+ if (vport->load_flag & FC_UNLOADING ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -ENODEV;
atomic_inc(&lport->fc4NvmeLsRequests);
@@ -1516,7 +1525,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
- if (unlikely(vport->load_flag & FC_UNLOADING)) {
+ if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
+ phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
atomic_inc(&lport->xmt_fcp_err);
@@ -2169,8 +2179,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_nvme = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
qp = &phba->sli4_hba.hdwq[i];
- if (!vport || !vport->localport ||
- !qp || !qp->io_wq)
+ if (!vport->localport || !qp || !qp->io_wq)
return;
pring = qp->io_wq->pring;
@@ -2180,8 +2189,9 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_scsi += qp->abts_scsi_io_bufs;
abts_nvme += qp->abts_nvme_io_bufs;
}
- if (!vport || !vport->localport ||
- vport->phba->hba_flag & HBA_PCI_ERR)
+ if (!vport->localport ||
+ test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
+ vport->load_flag & FC_UNLOADING)
return;
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -2541,8 +2551,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* return values is ignored. The upcall is a courtesy to the
* transport.
*/
- if (vport->load_flag & FC_UNLOADING ||
- unlikely(vport->phba->hba_flag & HBA_PCI_ERR))
+ if (vport->load_flag & FC_UNLOADING)
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
ret = nvme_fc_unregister_remoteport(remoteport);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3c132604fd91..ba9dbb51b75f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5929,13 +5929,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
}
lpfc_cmd->waitq = &waitq;
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
spin_unlock(&pring_s4->ring_lock);
- else
+ ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
+ lpfc_sli_abort_fcp_cmpl);
+ } else {
pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
-
- ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
- lpfc_sli_abort_fcp_cmpl);
+ ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ lpfc_sli_abort_fcp_cmpl);
+ }
/* Make sure HBA is alive */
lpfc_issue_hb_tmo(phba);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 20d40957a385..6adaf79e67cc 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2828,6 +2828,12 @@ __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
+void
+lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ __lpfc_sli_rpi_release(vport, ndlp);
+}
+
/**
* lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
* @phba: Pointer to HBA context object.
@@ -3715,7 +3721,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
unsigned long iflag;
u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock_irqsave(&pring->ring_lock, iflag);
+ else
+ spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock_irqrestore(&pring->ring_lock, iflag);
+ else
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
ulp_command = get_job_cmnd(phba, saveq);
ulp_status = get_job_ulpstatus(phba, saveq);
@@ -4052,10 +4066,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
break;
}
- spin_unlock_irqrestore(&phba->hbalock, iflag);
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
- spin_lock_irqsave(&phba->hbalock, iflag);
if (unlikely(!cmdiocbq))
break;
if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
@@ -4536,42 +4548,62 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
void
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- LIST_HEAD(completions);
+ LIST_HEAD(tx_completions);
+ LIST_HEAD(txcmplq_completions);
struct lpfc_iocbq *iocb, *next_iocb;
+ int offline;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_fabric_abort_hba(phba);
}
+ offline = pci_channel_offline(phba->pcidev);
/* Error everything on txq and txcmplq
* First do the txq.
*/
if (phba->sli_rev >= LPFC_SLI_REV4) {
spin_lock_irq(&pring->ring_lock);
- list_splice_init(&pring->txq, &completions);
+ list_splice_init(&pring->txq, &tx_completions);
pring->txq_cnt = 0;
- spin_unlock_irq(&pring->ring_lock);
- spin_lock_irq(&phba->hbalock);
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
- spin_unlock_irq(&phba->hbalock);
+ if (offline) {
+ list_splice_init(&pring->txcmplq,
+ &txcmplq_completions);
+ } else {
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring,
+ iocb, NULL);
+ }
+ spin_unlock_irq(&pring->ring_lock);
} else {
spin_lock_irq(&phba->hbalock);
- list_splice_init(&pring->txq, &completions);
+ list_splice_init(&pring->txq, &tx_completions);
pring->txq_cnt = 0;
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
+ if (offline) {
+ list_splice_init(&pring->txcmplq, &txcmplq_completions);
+ } else {
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring,
+ iocb, NULL);
+ }
spin_unlock_irq(&phba->hbalock);
}
- /* Make sure HBA is alive */
- lpfc_issue_hb_tmo(phba);
+ if (offline) {
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+ } else {
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+ }
/* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
}
@@ -4624,11 +4656,6 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
struct lpfc_iocbq *piocb, *next_iocb;
spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & HBA_IOQ_FLUSH ||
- !phba->sli4_hba.hdwq) {
- spin_unlock_irq(&phba->hbalock);
- return;
- }
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
@@ -10693,10 +10720,10 @@ __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
/* Words 0 - 2 */
bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
- bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
- bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
+ bde->addr_low = bpl->addr_low;
+ bde->addr_high = bpl->addr_high;
bde->type_size = cpu_to_le32(xmit_len);
- bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BLP_64);
+ bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
/* Word 3 */
cmdwqe->gen_req.request_payload_len = xmit_len;
@@ -10997,6 +11024,10 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
unsigned long iflags;
int rc;
+ /* If the PCI channel is in offline state, do not post iocbs. */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IOCB_ERROR;
+
if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli_prep_wqe(phba, piocb);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e52f37e5d896..a4d3259b8c52 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.0"
+#define LPFC_DRIVER_VERSION "14.2.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 611871ef15b5..4919ea54b827 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2560,6 +2560,9 @@ struct megasas_instance_template {
#define MEGASAS_IS_LOGICAL(sdev) \
((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+#define MEGASAS_IS_LUN_VALID(sdev) \
+ (((sdev)->lun == 0) ? 1 : 0)
+
#define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
scp->device->id)
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 8bf72dbc33b7..db6793608447 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2126,6 +2126,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
goto scan_target;
}
return -ENXIO;
+ } else if (!MEGASAS_IS_LUN_VALID(sdev)) {
+ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+ return -ENXIO;
}
scan_target:
@@ -2156,6 +2159,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev)
instance = megasas_lookup_instance(sdev->host->host_no);
if (MEGASAS_IS_LOGICAL(sdev)) {
+ if (!MEGASAS_IS_LUN_VALID(sdev)) {
+ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+ return;
+ }
ld_tgt_id = MEGASAS_TARGET_ID(sdev);
instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
if (megasas_dbg_lvl & LD_PD_DEBUG)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index b57f1803371e..538d2c0cd971 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5716,13 +5716,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
* having same upper 32bits in their base memory address.
- * @reply_pool_start_address: Base address of a reply queue set
+ * @start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
*
* Return: 1 if reply queues in a set have a same upper 32bits in their base
* memory address, else 0.
*/
-
static int
mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
{
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 0563078227de..a8dd14c91efd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -394,10 +394,13 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
retry_count++;
if (ioc->config_cmds.smid == smid)
mpt3sas_base_free_smid(ioc, smid);
- if ((ioc->shost_recovery) || (ioc->config_cmds.status &
- MPT3_CMD_RESET) || ioc->pci_error_recovery)
+ if (ioc->config_cmds.status & MPT3_CMD_RESET)
goto retry_config;
- issue_host_reset = 1;
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ issue_host_reset = 0;
+ r = -EFAULT;
+ } else
+ issue_host_reset = 1;
goto free_mem;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 00792767c620..7e476f50935b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -11035,6 +11035,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
{
struct _sas_port *mpt3sas_port, *next;
unsigned long flags;
+ int port_id;
/* remove sibling ports attached to this expander */
list_for_each_entry_safe(mpt3sas_port, next,
@@ -11055,6 +11056,8 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_port->hba_port);
}
+ port_id = sas_expander->port->port_id;
+
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
sas_expander->sas_address_parent, sas_expander->port);
@@ -11062,7 +11065,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
"expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
sas_expander->handle, (unsigned long long)
sas_expander->sas_address,
- sas_expander->port->port_id);
+ port_id);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_del(&sas_expander->list);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 7ac63eb5ccd3..2fde496fff5f 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -647,6 +647,7 @@ static struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
+ { PCI_VDEVICE(TTI, 0x2640), chip_6440 },
{ PCI_VDEVICE(TTI, 0x2710), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2720), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2721), chip_9480 },
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index c4a838635893..5d7dfefd6f6c 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -192,10 +192,11 @@ struct sym53c500_data {
int fast_pio;
};
-static struct scsi_pointer *sym53c500_scsi_pointer(struct scsi_cmnd *cmd)
-{
- return scsi_cmd_priv(cmd);
-}
+struct sym53c500_cmd_priv {
+ int status;
+ int message;
+ int phase;
+};
enum Phase {
idle,
@@ -356,7 +357,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct sym53c500_data *data =
(struct sym53c500_data *)dev->hostdata;
struct scsi_cmnd *curSC = data->current_SC;
- struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(curSC);
+ struct sym53c500_cmd_priv *scp = scsi_cmd_priv(curSC);
int fast_pio = data->fast_pio;
spin_lock_irqsave(dev->host_lock, flags);
@@ -403,12 +404,11 @@ SYM53C500_intr(int irq, void *dev_id)
if (int_reg & 0x20) { /* Disconnect */
DEB(printk("SYM53C500: disconnect intr received\n"));
- if (scsi_pointer->phase != message_in) { /* Unexpected disconnect */
+ if (scp->phase != message_in) { /* Unexpected disconnect */
curSC->result = DID_NO_CONNECT << 16;
} else { /* Command complete, return status and message */
- curSC->result = (scsi_pointer->Status & 0xff) |
- ((scsi_pointer->Message & 0xff) << 8) |
- (DID_OK << 16);
+ curSC->result = (scp->status & 0xff) |
+ ((scp->message & 0xff) << 8) | (DID_OK << 16);
}
goto idle_out;
}
@@ -419,7 +419,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct scatterlist *sg;
int i;
- scsi_pointer->phase = data_out;
+ scp->phase = data_out;
VDEB(printk("SYM53C500: Data-Out phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -438,7 +438,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct scatterlist *sg;
int i;
- scsi_pointer->phase = data_in;
+ scp->phase = data_in;
VDEB(printk("SYM53C500: Data-In phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -453,12 +453,12 @@ SYM53C500_intr(int irq, void *dev_id)
break;
case 0x02: /* COMMAND */
- scsi_pointer->phase = command_ph;
+ scp->phase = command_ph;
printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n");
break;
case 0x03: /* STATUS */
- scsi_pointer->phase = status_ph;
+ scp->phase = status_ph;
VDEB(printk("SYM53C500: Status phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
outb(INIT_CMD_COMPLETE, port_base + CMD_REG);
@@ -471,24 +471,22 @@ SYM53C500_intr(int irq, void *dev_id)
case 0x06: /* MESSAGE-OUT */
DEB(printk("SYM53C500: Message-Out phase\n"));
- scsi_pointer->phase = message_out;
+ scp->phase = message_out;
outb(SET_ATN, port_base + CMD_REG); /* Reject the message */
outb(MSG_ACCEPT, port_base + CMD_REG);
break;
case 0x07: /* MESSAGE-IN */
VDEB(printk("SYM53C500: Message-In phase\n"));
- scsi_pointer->phase = message_in;
+ scp->phase = message_in;
- scsi_pointer->Status = inb(port_base + SCSI_FIFO);
- scsi_pointer->Message = inb(port_base + SCSI_FIFO);
+ scp->status = inb(port_base + SCSI_FIFO);
+ scp->message = inb(port_base + SCSI_FIFO);
VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f));
- DEB(printk("Status = %02x Message = %02x\n",
- scsi_pointer->Status, scsi_pointer->Message));
+ DEB(printk("Status = %02x Message = %02x\n", scp->status, scp->message));
- if (scsi_pointer->Message == SAVE_POINTERS ||
- scsi_pointer->Message == DISCONNECT) {
+ if (scp->message == SAVE_POINTERS || scp->message == DISCONNECT) {
outb(SET_ATN, port_base + CMD_REG); /* Reject message */
DEB(printk("Discarding SAVE_POINTERS message\n"));
}
@@ -500,7 +498,7 @@ out:
return IRQ_HANDLED;
idle_out:
- scsi_pointer->phase = idle;
+ scp->phase = idle;
scsi_done(curSC);
goto out;
}
@@ -548,7 +546,7 @@ SYM53C500_info(struct Scsi_Host *SChost)
static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
{
- struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(SCpnt);
+ struct sym53c500_cmd_priv *scp = scsi_cmd_priv(SCpnt);
int i;
int port_base = SCpnt->device->host->io_port;
struct sym53c500_data *data =
@@ -565,9 +563,9 @@ static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
VDEB(printk("\n"));
data->current_SC = SCpnt;
- scsi_pointer->phase = command_ph;
- scsi_pointer->Status = 0;
- scsi_pointer->Message = 0;
+ scp->phase = command_ph;
+ scp->status = 0;
+ scp->message = 0;
/* We are locked here already by the mid layer */
REG0(port_base);
@@ -682,7 +680,7 @@ static struct scsi_host_template sym53c500_driver_template = {
.this_id = 7,
.sg_tablesize = 32,
.shost_groups = SYM53C500_shost_groups,
- .cmd_size = sizeof(struct scsi_pointer),
+ .cmd_size = sizeof(struct sym53c500_cmd_priv),
};
static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index f90b707c190b..01c5e8ff4cc5 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -766,6 +766,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
+ /* Enable higher IQs and OQs, 32 to 63, bit 16 */
+ if (pm8001_ha->max_q_num > 32)
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+ 1 << 16;
/* Disable end to end CRC checking */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
@@ -1027,6 +1031,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
if (0x0000 != gst_len_mpistate)
return -EBUSY;
+ /*
+ * As per controller datasheet, after successful MPI
+ * initialization minimum 500ms delay is required before
+ * issuing commands.
+ */
+ msleep(500);
+
return 0;
}
@@ -1727,10 +1738,11 @@ static void
pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- mask = (u32)(1 << vec);
-
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+ if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
+ else
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_enable(pm8001_ha);
@@ -1746,12 +1758,15 @@ static void
pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- if (vec == 0xFF)
- mask = 0xFFFFFFFF;
+ if (vec == 0xFF) {
+ /* disable all vectors 0-31, 32-63 */
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
+ } else if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
else
- mask = (u32)(1 << vec);
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_disable(pm8001_ha);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 928532180d32..fd674ed1febe 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3182,124 +3182,6 @@ static int pmcraid_build_ioadl(
}
/**
- * pmcraid_free_sglist - Frees an allocated SG buffer list
- * @sglist: scatter/gather list pointer
- *
- * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
- *
- * Return value:
- * none
- */
-static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
-{
- sgl_free_order(sglist->scatterlist, sglist->order);
- kfree(sglist);
-}
-
-/**
- * pmcraid_alloc_sglist - Allocates memory for a SG list
- * @buflen: buffer length
- *
- * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
- * list.
- *
- * Return value
- * pointer to sglist / NULL on failure
- */
-static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
-{
- struct pmcraid_sglist *sglist;
- int sg_size;
- int order;
-
- sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
- order = (sg_size > 0) ? get_order(sg_size) : 0;
-
- /* Allocate a scatter/gather list for the DMA */
- sglist = kzalloc(sizeof(struct pmcraid_sglist), GFP_KERNEL);
- if (sglist == NULL)
- return NULL;
-
- sglist->order = order;
- sgl_alloc_order(buflen, order, false, GFP_KERNEL | __GFP_ZERO,
- &sglist->num_sg);
-
- return sglist;
-}
-
-/**
- * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
- * @sglist: scatter/gather list pointer
- * @buffer: buffer pointer
- * @len: buffer length
- * @direction: data transfer direction
- *
- * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
- *
- * Return value:
- * 0 on success / other on failure
- */
-static int pmcraid_copy_sglist(
- struct pmcraid_sglist *sglist,
- void __user *buffer,
- u32 len,
- int direction
-)
-{
- struct scatterlist *sg;
- void *kaddr;
- int bsize_elem;
- int i;
- int rc = 0;
-
- /* Determine the actual number of bytes per element */
- bsize_elem = PAGE_SIZE * (1 << sglist->order);
-
- sg = sglist->scatterlist;
-
- for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
- struct page *page = sg_page(sg);
-
- kaddr = kmap(page);
- if (direction == DMA_TO_DEVICE)
- rc = copy_from_user(kaddr, buffer, bsize_elem);
- else
- rc = copy_to_user(buffer, kaddr, bsize_elem);
-
- kunmap(page);
-
- if (rc) {
- pmcraid_err("failed to copy user data into sg list\n");
- return -EFAULT;
- }
-
- sg->length = bsize_elem;
- }
-
- if (len % bsize_elem) {
- struct page *page = sg_page(sg);
-
- kaddr = kmap(page);
-
- if (direction == DMA_TO_DEVICE)
- rc = copy_from_user(kaddr, buffer, len % bsize_elem);
- else
- rc = copy_to_user(buffer, kaddr, len % bsize_elem);
-
- kunmap(page);
-
- sg->length = len % bsize_elem;
- }
-
- if (rc) {
- pmcraid_err("failed to copy user data into sg list\n");
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-/**
* pmcraid_queuecommand_lck - Queue a mid-layer request
* @scsi_cmd: scsi command struct
*
@@ -3454,365 +3336,6 @@ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
return rc;
}
-
-/**
- * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
- * commands sent over IOCTL interface
- *
- * @cmd : pointer to struct pmcraid_cmd
- * @buflen : length of the request buffer
- * @direction : data transfer direction
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static int pmcraid_build_passthrough_ioadls(
- struct pmcraid_cmd *cmd,
- int buflen,
- int direction
-)
-{
- struct pmcraid_sglist *sglist = NULL;
- struct scatterlist *sg = NULL;
- struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
- struct pmcraid_ioadl_desc *ioadl;
- int i;
-
- sglist = pmcraid_alloc_sglist(buflen);
-
- if (!sglist) {
- pmcraid_err("can't allocate memory for passthrough SGls\n");
- return -ENOMEM;
- }
-
- sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
- sglist->scatterlist,
- sglist->num_sg, direction);
-
- if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
- dev_err(&cmd->drv_inst->pdev->dev,
- "Failed to map passthrough buffer!\n");
- pmcraid_free_sglist(sglist);
- return -EIO;
- }
-
- cmd->sglist = sglist;
- ioarcb->request_flags0 |= NO_LINK_DESCS;
-
- ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
-
- /* Initialize IOADL descriptor addresses */
- for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
- ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
- ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
- ioadl[i].flags = 0;
- }
-
- /* setup the last descriptor */
- ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
-
- return 0;
-}
-
-
-/**
- * pmcraid_release_passthrough_ioadls - release passthrough ioadls
- *
- * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
- * @buflen: size of the request buffer
- * @direction: data transfer direction
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static void pmcraid_release_passthrough_ioadls(
- struct pmcraid_cmd *cmd,
- int buflen,
- int direction
-)
-{
- struct pmcraid_sglist *sglist = cmd->sglist;
-
- if (buflen > 0) {
- dma_unmap_sg(&cmd->drv_inst->pdev->dev,
- sglist->scatterlist,
- sglist->num_sg,
- direction);
- pmcraid_free_sglist(sglist);
- cmd->sglist = NULL;
- }
-}
-
-/**
- * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
- *
- * @pinstance: pointer to adapter instance structure
- * @ioctl_cmd: ioctl code
- * @buflen: unused
- * @arg: pointer to pmcraid_passthrough_buffer user buffer
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static long pmcraid_ioctl_passthrough(
- struct pmcraid_instance *pinstance,
- unsigned int ioctl_cmd,
- unsigned int buflen,
- void __user *arg
-)
-{
- struct pmcraid_passthrough_ioctl_buffer *buffer;
- struct pmcraid_ioarcb *ioarcb;
- struct pmcraid_cmd *cmd;
- struct pmcraid_cmd *cancel_cmd;
- void __user *request_buffer;
- unsigned long request_offset;
- unsigned long lock_flags;
- void __user *ioasa;
- u32 ioasc;
- int request_size;
- int buffer_size;
- u8 direction;
- int rc = 0;
-
- /* If IOA reset is in progress, wait 10 secs for reset to complete */
- if (pinstance->ioa_reset_in_progress) {
- rc = wait_event_interruptible_timeout(
- pinstance->reset_wait_q,
- !pinstance->ioa_reset_in_progress,
- msecs_to_jiffies(10000));
-
- if (!rc)
- return -ETIMEDOUT;
- else if (rc < 0)
- return -ERESTARTSYS;
- }
-
- /* If adapter is not in operational state, return error */
- if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
- pmcraid_err("IOA is not operational\n");
- return -ENOTTY;
- }
-
- buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
- buffer = kmalloc(buffer_size, GFP_KERNEL);
-
- if (!buffer) {
- pmcraid_err("no memory for passthrough buffer\n");
- return -ENOMEM;
- }
-
- request_offset =
- offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
-
- request_buffer = arg + request_offset;
-
- rc = copy_from_user(buffer, arg,
- sizeof(struct pmcraid_passthrough_ioctl_buffer));
-
- ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa);
-
- if (rc) {
- pmcraid_err("ioctl: can't copy passthrough buffer\n");
- rc = -EFAULT;
- goto out_free_buffer;
- }
-
- request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length);
-
- if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
- direction = DMA_TO_DEVICE;
- } else {
- direction = DMA_FROM_DEVICE;
- }
-
- if (request_size < 0) {
- rc = -EINVAL;
- goto out_free_buffer;
- }
-
- /* check if we have any additional command parameters */
- if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length)
- > PMCRAID_ADD_CMD_PARAM_LEN) {
- rc = -EINVAL;
- goto out_free_buffer;
- }
-
- cmd = pmcraid_get_free_cmd(pinstance);
-
- if (!cmd) {
- pmcraid_err("free command block is not available\n");
- rc = -ENOMEM;
- goto out_free_buffer;
- }
-
- cmd->scsi_cmd = NULL;
- ioarcb = &(cmd->ioa_cb->ioarcb);
-
- /* Copy the user-provided IOARCB stuff field by field */
- ioarcb->resource_handle = buffer->ioarcb.resource_handle;
- ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
- ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
- ioarcb->request_type = buffer->ioarcb.request_type;
- ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
- ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
- memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
-
- if (buffer->ioarcb.add_cmd_param_length) {
- ioarcb->add_cmd_param_length =
- buffer->ioarcb.add_cmd_param_length;
- ioarcb->add_cmd_param_offset =
- buffer->ioarcb.add_cmd_param_offset;
- memcpy(ioarcb->add_data.u.add_cmd_params,
- buffer->ioarcb.add_data.u.add_cmd_params,
- le16_to_cpu(buffer->ioarcb.add_cmd_param_length));
- }
-
- /* set hrrq number where the IOA should respond to. Note that all cmds
- * generated internally uses hrrq_id 0, exception to this is the cmd
- * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
- * hrrq_id assigned here in queuecommand
- */
- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
- pinstance->num_hrrq;
-
- if (request_size) {
- rc = pmcraid_build_passthrough_ioadls(cmd,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("couldn't build passthrough ioadls\n");
- goto out_free_cmd;
- }
- }
-
- /* If data is being written into the device, copy the data from user
- * buffers
- */
- if (direction == DMA_TO_DEVICE && request_size > 0) {
- rc = pmcraid_copy_sglist(cmd->sglist,
- request_buffer,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("failed to copy user buffer\n");
- goto out_free_sglist;
- }
- }
-
- /* passthrough ioctl is a blocking command so, put the user to sleep
- * until timeout. Note that a timeout value of 0 means, do timeout.
- */
- cmd->cmd_done = pmcraid_internal_done;
- init_completion(&cmd->wait_for_completion);
- cmd->completion_req = 1;
-
- pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
- le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
- cmd->ioa_cb->ioarcb.cdb[0],
- le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
-
- spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
- _pmcraid_fire_command(cmd);
- spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
- /* NOTE ! Remove the below line once abort_task is implemented
- * in firmware. This line disables ioctl command timeout handling logic
- * similar to IO command timeout handling, making ioctl commands to wait
- * until the command completion regardless of timeout value specified in
- * ioarcb
- */
- buffer->ioarcb.cmd_timeout = 0;
-
- /* If command timeout is specified put caller to wait till that time,
- * otherwise it would be blocking wait. If command gets timed out, it
- * will be aborted.
- */
- if (buffer->ioarcb.cmd_timeout == 0) {
- wait_for_completion(&cmd->wait_for_completion);
- } else if (!wait_for_completion_timeout(
- &cmd->wait_for_completion,
- msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) {
-
- pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
- le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
- cmd->ioa_cb->ioarcb.cdb[0]);
-
- spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
- cancel_cmd = pmcraid_abort_cmd(cmd);
- spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
- if (cancel_cmd) {
- wait_for_completion(&cancel_cmd->wait_for_completion);
- ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
- pmcraid_return_cmd(cancel_cmd);
-
- /* if abort task couldn't find the command i.e it got
- * completed prior to aborting, return good completion.
- * if command got aborted successfully or there was IOA
- * reset due to abort task itself getting timedout then
- * return -ETIMEDOUT
- */
- if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
- PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
- if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
- rc = -ETIMEDOUT;
- goto out_handle_response;
- }
- }
-
- /* no command block for abort task or abort task failed to abort
- * the IOARCB, then wait for 150 more seconds and initiate reset
- * sequence after timeout
- */
- if (!wait_for_completion_timeout(
- &cmd->wait_for_completion,
- msecs_to_jiffies(150 * 1000))) {
- pmcraid_reset_bringup(cmd->drv_inst);
- rc = -ETIMEDOUT;
- }
- }
-
-out_handle_response:
- /* copy entire IOASA buffer and return IOCTL success.
- * If copying IOASA to user-buffer fails, return
- * EFAULT
- */
- if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
- sizeof(struct pmcraid_ioasa))) {
- pmcraid_err("failed to copy ioasa buffer to user\n");
- rc = -EFAULT;
- }
-
- /* If the data transfer was from device, copy the data onto user
- * buffers
- */
- else if (direction == DMA_FROM_DEVICE && request_size > 0) {
- rc = pmcraid_copy_sglist(cmd->sglist,
- request_buffer,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("failed to copy user buffer\n");
- rc = -EFAULT;
- }
- }
-
-out_free_sglist:
- pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
-
-out_free_cmd:
- pmcraid_return_cmd(cmd);
-
-out_free_buffer:
- kfree(buffer);
-
- return rc;
-}
-
-
-
-
/**
* pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
*
@@ -3922,20 +3445,6 @@ static long pmcraid_chr_ioctl(
switch (_IOC_TYPE(cmd)) {
- case PMCRAID_PASSTHROUGH_IOCTL:
- /* If ioctl code is to download microcode, we need to block
- * mid-layer requests.
- */
- if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
- scsi_block_requests(pinstance->host);
-
- retval = pmcraid_ioctl_passthrough(pinstance, cmd,
- hdr->buffer_length, argp);
-
- if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
- scsi_unblock_requests(pinstance->host);
- break;
-
case PMCRAID_DRIVER_IOCTL:
arg += sizeof(struct pmcraid_ioctl_header);
retval = pmcraid_ioctl_driver(pinstance, cmd,
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index bbb75318f1e7..9f59930e8b4f 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -1023,40 +1023,15 @@ struct pmcraid_ioctl_header {
#define PMCRAID_IOCTL_SIGNATURE "PMCRAID"
/*
- * pmcraid_passthrough_ioctl_buffer - structure given as argument to
- * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires
- * 32-byte alignment so, it is necessary to pack this structure to avoid any
- * holes between ioctl_header and passthrough buffer
- *
- * .ioactl_header : ioctl header
- * .ioarcb : filled-up ioarcb buffer, driver always reads this buffer
- * .ioasa : buffer for ioasa, driver fills this with IOASA from firmware
- * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on
- * the transfer directions passed in ioarcb.flags0. Contents
- * of this buffer are valid only when ioarcb.data_transfer_len
- * is not zero.
- */
-struct pmcraid_passthrough_ioctl_buffer {
- struct pmcraid_ioctl_header ioctl_header;
- struct pmcraid_ioarcb ioarcb;
- struct pmcraid_ioasa ioasa;
- u8 request_buffer[];
-} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
-
-/*
* keys to differentiate between driver handled IOCTLs and passthrough
* IOCTLs passed to IOA. driver determines the ioctl type using macro
* _IOC_TYPE
*/
#define PMCRAID_DRIVER_IOCTL 'D'
-#define PMCRAID_PASSTHROUGH_IOCTL 'F'
#define DRV_IOCTL(n, size) \
_IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
-#define FMW_IOCTL(n, size) \
- _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
-
/*
* _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
* This is to facilitate applications avoiding un-necessary memory allocations.
@@ -1069,12 +1044,4 @@ struct pmcraid_passthrough_ioctl_buffer {
#define PMCRAID_IOCTL_RESET_ADAPTER \
DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header))
-/* passthrough/firmware handled commands */
-#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND \
- FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE \
- FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-
#endif /* _PMCRAID_H */
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 8196f89f404e..31ec429104e2 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -860,6 +860,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
return qedi_iscsi_send_ioreq(task);
}
+static void qedi_offload_work(struct work_struct *work)
+{
+ struct qedi_endpoint *qedi_ep =
+ container_of(work, struct qedi_endpoint, offload_work);
+ struct qedi_ctx *qedi;
+ int wait_delay = 5 * HZ;
+ int ret;
+
+ qedi = qedi_ep->qedi;
+
+ ret = qedi_iscsi_offload_conn(qedi_ep);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+ qedi_ep->iscsi_cid, qedi_ep, ret);
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ return;
+ }
+
+ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state ==
+ EP_STATE_OFLDCONN_COMPL),
+ wait_delay);
+ if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+ qedi_ep->iscsi_cid, qedi_ep);
+ }
+}
+
static struct iscsi_endpoint *
qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -908,6 +939,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
}
qedi_ep = ep->dd_data;
memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
qedi_ep->state = EP_STATE_IDLE;
qedi_ep->iscsi_cid = (u32)-1;
qedi_ep->qedi = qedi;
@@ -1056,12 +1088,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
qedi_ep = ep->dd_data;
qedi = qedi_ep->qedi;
+ flush_work(&qedi_ep->offload_work);
+
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
- if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
- flush_work(&qedi_ep->offload_work);
-
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
abrt_conn = qedi_conn->abrt_conn;
@@ -1235,37 +1266,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
return rc;
}
-static void qedi_offload_work(struct work_struct *work)
-{
- struct qedi_endpoint *qedi_ep =
- container_of(work, struct qedi_endpoint, offload_work);
- struct qedi_ctx *qedi;
- int wait_delay = 5 * HZ;
- int ret;
-
- qedi = qedi_ep->qedi;
-
- ret = qedi_iscsi_offload_conn(qedi_ep);
- if (ret) {
- QEDI_ERR(&qedi->dbg_ctx,
- "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
- qedi_ep->iscsi_cid, qedi_ep, ret);
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- return;
- }
-
- ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
- (qedi_ep->state ==
- EP_STATE_OFLDCONN_COMPL),
- wait_delay);
- if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- QEDI_ERR(&qedi->dbg_ctx,
- "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
- qedi_ep->iscsi_cid, qedi_ep);
- }
-}
-
static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
{
struct qedi_ctx *qedi;
@@ -1381,7 +1381,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
qedi_ep->dst_addr, qedi_ep->dst_port);
}
- INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
queue_work(qedi->offload_thread, &qedi_ep->offload_work);
ret = 0;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 85dbf81f3204..6dfcfd8e7337 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3826,6 +3826,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&cmd->cmd_lock, flags);
if (cmd->aborted) {
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/*
* It's normal to see 2 calls in this path:
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index c607755cce00..592a290e6cfa 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -32,7 +32,6 @@
#include <linux/blkdev.h>
#include <linux/crc-t10dif.h>
#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/atomic.h>
#include <linux/hrtimer.h>
@@ -732,9 +731,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static atomic_t sdebug_num_hosts;
-static DEFINE_MUTEX(add_host_mutex);
-
+static int sdebug_num_hosts;
static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
static int sdebug_ato = DEF_ATO;
static int sdebug_cdb_len = DEF_CDB_LEN;
@@ -781,7 +778,6 @@ static int sdebug_uuid_ctl = DEF_UUID_CTL;
static bool sdebug_random = DEF_RANDOM;
static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
static bool sdebug_removable = DEF_REMOVABLE;
-static bool sdebug_deflect_incoming;
static bool sdebug_clustering;
static bool sdebug_host_lock = DEF_HOST_LOCK;
static bool sdebug_strict = DEF_STRICT;
@@ -5122,10 +5118,6 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
- if (smp_load_acquire(&sdebug_deflect_incoming)) {
- pr_info("Exit early due to deflect_incoming\n");
- return 1;
- }
if (devip == NULL) {
devip = find_build_dev_info(sdp);
if (devip == NULL)
@@ -5211,7 +5203,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
}
/* Deletes (stops) timers or work queues of all queued commands */
-static void stop_all_queued(bool done_with_no_conn)
+static void stop_all_queued(void)
{
unsigned long iflags;
int j, k;
@@ -5220,15 +5212,13 @@ static void stop_all_queued(bool done_with_no_conn)
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
struct sdebug_defer *sd_dp;
- struct scsi_cmnd *scp;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
spin_lock_irqsave(&sqp->qc_lock, iflags);
for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
if (test_bit(k, sqp->in_use_bm)) {
sqcp = &sqp->qc_arr[k];
- scp = sqcp->a_cmnd;
- if (!scp)
+ if (sqcp->a_cmnd == NULL)
continue;
devip = (struct sdebug_dev_info *)
sqcp->a_cmnd->device->hostdata;
@@ -5243,10 +5233,6 @@ static void stop_all_queued(bool done_with_no_conn)
l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
stop_qc_helper(sd_dp, l_defer_t);
- if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) {
- scp->result = DID_NO_CONNECT << 16;
- scsi_done(scp);
- }
clear_bit(k, sqp->in_use_bm);
spin_lock_irqsave(&sqp->qc_lock, iflags);
}
@@ -5389,7 +5375,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
}
}
spin_unlock(&sdebug_host_list_lock);
- stop_all_queued(false);
+ stop_all_queued();
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
"%s: %d device(s) found\n", __func__, k);
@@ -5449,50 +5435,13 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
}
}
-static void sdeb_block_all_queues(void)
-{
- int j;
- struct sdebug_queue *sqp;
-
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
- atomic_set(&sqp->blocked, (int)true);
-}
-
-static void sdeb_unblock_all_queues(void)
+static void block_unblock_all_queues(bool block)
{
int j;
struct sdebug_queue *sqp;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
- atomic_set(&sqp->blocked, (int)false);
-}
-
-static void
-sdeb_add_n_hosts(int num_hosts)
-{
- if (num_hosts < 1)
- return;
- do {
- bool found;
- unsigned long idx;
- struct sdeb_store_info *sip;
- bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
-
- found = false;
- if (want_phs) {
- xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) {
- sdeb_most_recent_idx = (int)idx;
- found = true;
- break;
- }
- if (found) /* re-use case */
- sdebug_add_host_helper((int)idx);
- else
- sdebug_do_add_host(true /* make new store */);
- } else {
- sdebug_do_add_host(false);
- }
- } while (--num_hosts);
+ atomic_set(&sqp->blocked, (int)block);
}
/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
@@ -5505,10 +5454,10 @@ static void tweak_cmnd_count(void)
modulo = abs(sdebug_every_nth);
if (modulo < 2)
return;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
count = atomic_read(&sdebug_cmnd_count);
atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
}
static void clear_queue_stats(void)
@@ -5526,15 +5475,6 @@ static bool inject_on_this_cmd(void)
return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
}
-static int process_deflect_incoming(struct scsi_cmnd *scp)
-{
- u8 opcode = scp->cmnd[0];
-
- if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16)
- return 0;
- return DID_NO_CONNECT << 16;
-}
-
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
/* Complete the processing of the thread that queued a SCSI command to this
@@ -5544,7 +5484,8 @@ static int process_deflect_incoming(struct scsi_cmnd *scp)
*/
static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
int scsi_result,
- int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *),
+ int (*pfp)(struct scsi_cmnd *,
+ struct sdebug_dev_info *),
int delta_jiff, int ndelay)
{
bool new_sd_dp;
@@ -5565,27 +5506,13 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
sdp = cmnd->device;
- if (delta_jiff == 0) {
- sqp = get_queue(cmnd);
- if (atomic_read(&sqp->blocked)) {
- if (smp_load_acquire(&sdebug_deflect_incoming))
- return process_deflect_incoming(cmnd);
- else
- return SCSI_MLQUEUE_HOST_BUSY;
- }
+ if (delta_jiff == 0)
goto respond_in_thread;
- }
sqp = get_queue(cmnd);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (unlikely(atomic_read(&sqp->blocked))) {
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (smp_load_acquire(&sdebug_deflect_incoming)) {
- scsi_result = process_deflect_incoming(cmnd);
- goto respond_in_thread;
- }
- if (sdebug_verbose)
- pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
num_in_q = atomic_read(&devip->num_in_q);
@@ -5774,12 +5701,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
respond_in_thread: /* call back to mid-layer using invocation thread */
cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
cmnd->result &= ~SDEG_RES_IMMED_MASK;
- if (cmnd->result == 0 && scsi_result != 0) {
+ if (cmnd->result == 0 && scsi_result != 0)
cmnd->result = scsi_result;
- if (sdebug_verbose)
- pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n",
- blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result);
- }
scsi_done(cmnd);
return 0;
}
@@ -6064,7 +5987,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
int j, k;
struct sdebug_queue *sqp;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
k = find_first_bit(sqp->in_use_bm,
@@ -6078,7 +6001,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
sdebug_jdelay = jdelay;
sdebug_ndelay = 0;
}
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
}
return res;
}
@@ -6104,7 +6027,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
int j, k;
struct sdebug_queue *sqp;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
k = find_first_bit(sqp->in_use_bm,
@@ -6119,7 +6042,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
}
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
}
return res;
}
@@ -6433,7 +6356,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
(n <= SDEBUG_CANQUEUE) &&
(sdebug_host_max_queue == 0)) {
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
k = 0;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
@@ -6448,7 +6371,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
atomic_set(&retired_max_queue, k + 1);
else
atomic_set(&retired_max_queue, 0);
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
return count;
}
return -EINVAL;
@@ -6537,48 +6460,43 @@ static DRIVER_ATTR_RW(virtual_gb);
static ssize_t add_host_show(struct device_driver *ddp, char *buf)
{
/* absolute number of hosts currently active is what is shown */
- return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts));
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
}
-/*
- * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'.
- * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing.
- * Returns -EBUSY if another add_host sysfs invocation is active.
- */
static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
size_t count)
{
+ bool found;
+ unsigned long idx;
+ struct sdeb_store_info *sip;
+ bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
int delta_hosts;
- if (count == 0 || kstrtoint(buf, 0, &delta_hosts))
+ if (sscanf(buf, "%d", &delta_hosts) != 1)
return -EINVAL;
- if (sdebug_verbose)
- pr_info("prior num_hosts=%d, num_to_add=%d\n",
- atomic_read(&sdebug_num_hosts), delta_hosts);
- if (delta_hosts == 0)
- return count;
- if (mutex_trylock(&add_host_mutex) == 0)
- return -EBUSY;
if (delta_hosts > 0) {
- sdeb_add_n_hosts(delta_hosts);
- } else if (delta_hosts < 0) {
- smp_store_release(&sdebug_deflect_incoming, true);
- sdeb_block_all_queues();
- if (delta_hosts >= atomic_read(&sdebug_num_hosts))
- stop_all_queued(true);
do {
- if (atomic_read(&sdebug_num_hosts) < 1) {
- free_all_queued();
- break;
+ found = false;
+ if (want_phs) {
+ xa_for_each_marked(per_store_ap, idx, sip,
+ SDEB_XA_NOT_IN_USE) {
+ sdeb_most_recent_idx = (int)idx;
+ found = true;
+ break;
+ }
+ if (found) /* re-use case */
+ sdebug_add_host_helper((int)idx);
+ else
+ sdebug_do_add_host(true);
+ } else {
+ sdebug_do_add_host(false);
}
+ } while (--delta_hosts);
+ } else if (delta_hosts < 0) {
+ do {
sdebug_do_remove_host(false);
} while (++delta_hosts);
- sdeb_unblock_all_queues();
- smp_store_release(&sdebug_deflect_incoming, false);
}
- mutex_unlock(&add_host_mutex);
- if (sdebug_verbose)
- pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts));
return count;
}
static DRIVER_ATTR_RW(add_host);
@@ -7089,10 +7007,6 @@ static int __init scsi_debug_init(void)
sdebug_add_host = 0;
for (k = 0; k < hosts_to_add; k++) {
- if (smp_load_acquire(&sdebug_deflect_incoming)) {
- pr_info("exit early as sdebug_deflect_incoming is set\n");
- return 0;
- }
if (want_store && k == 0) {
ret = sdebug_add_host_helper(idx);
if (ret < 0) {
@@ -7110,12 +7024,8 @@ static int __init scsi_debug_init(void)
}
}
if (sdebug_verbose)
- pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts));
+ pr_info("built %d host(s)\n", sdebug_num_hosts);
- /*
- * Even though all the hosts have been established, due to async device (LU) scanning
- * by the scsi mid-level, there may still be devices (LUs) being set up.
- */
return 0;
bus_unreg:
@@ -7131,17 +7041,12 @@ free_q_arr:
static void __exit scsi_debug_exit(void)
{
- int k;
+ int k = sdebug_num_hosts;
- /* Possible race with LUs still being set up; stop them asap */
- sdeb_block_all_queues();
- smp_store_release(&sdebug_deflect_incoming, true);
- stop_all_queued(false);
- for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++)
+ stop_all_queued();
+ for (; k; k--)
sdebug_do_remove_host(true);
free_all_queued();
- if (sdebug_verbose)
- pr_info("removed %d hosts\n", k);
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
@@ -7311,13 +7216,13 @@ static int sdebug_add_host_helper(int per_host_idx)
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
sdbg_host->dev.release = &sdebug_release_adapter;
- dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts));
+ dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
error = device_register(&sdbg_host->dev);
if (error)
goto clean;
- atomic_inc(&sdebug_num_hosts);
+ ++sdebug_num_hosts;
return 0;
clean:
@@ -7381,7 +7286,7 @@ static void sdebug_do_remove_host(bool the_end)
return;
device_unregister(&sdbg_host->dev);
- atomic_dec(&sdebug_num_hosts);
+ --sdebug_num_hosts;
}
static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
@@ -7389,10 +7294,10 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
int num_in_q = 0;
struct sdebug_dev_info *devip;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
devip = (struct sdebug_dev_info *)sdev->hostdata;
if (NULL == devip) {
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
return -ENODEV;
}
num_in_q = atomic_read(&devip->num_in_q);
@@ -7411,7 +7316,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
__func__, qdepth, num_in_q);
}
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
return sdev->queue_depth;
}
@@ -7519,12 +7424,13 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct sdebug_defer *sd_dp;
sqp = sdebug_q_arr + queue_num;
- qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
- if (qc_idx >= sdebug_max_queue)
- return 0;
spin_lock_irqsave(&sqp->qc_lock, iflags);
+ qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+ if (qc_idx >= sdebug_max_queue)
+ goto unlock;
+
for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
if (first) {
first = false;
@@ -7589,6 +7495,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
break;
}
+unlock:
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (num_entries > 0)
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index ff89de86545d..b02af340c2d3 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -30,7 +30,7 @@ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
{
struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
- if (!rq->q->disk)
+ if (!rq->q || !rq->q->disk)
return NULL;
return rq->q->disk->disk_name;
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index f4e6c68ac99e..2ef78083f1ef 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -223,6 +223,8 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
int ret;
struct sbitmap sb_backup;
+ depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
+
/*
* realloc if new shift is calculated, which is caused by setting
* up one new default queue depth after calling ->slave_configure
@@ -245,6 +247,9 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
scsi_device_max_queue_depth(sdev),
new_shift, GFP_KERNEL,
sdev->request_queue->node, false, true);
+ if (!ret)
+ sbitmap_resize(&sdev->budget_map, depth);
+
if (need_free) {
if (ret)
sdev->budget_map = sb_backup;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 226a50944c00..dc6872e352bd 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1384,10 +1384,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) {
sdev->bsg_dev = scsi_bsg_register_queue(sdev);
if (IS_ERR(sdev->bsg_dev)) {
- /*
- * We're treating error on bsg register as non-fatal, so
- * pretend nothing went wrong.
- */
error = PTR_ERR(sdev->bsg_dev);
sdev_printk(KERN_INFO, sdev,
"Failed to register bsg queue, errno=%d\n",
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 27951ea05dd4..2c0dd64159b0 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -86,6 +86,9 @@ struct iscsi_internal {
struct transport_container session_cont;
};
+static DEFINE_IDR(iscsi_ep_idr);
+static DEFINE_MUTEX(iscsi_ep_idr_mutex);
+
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_conn_cleanup_workq;
@@ -168,6 +171,11 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
static void iscsi_endpoint_release(struct device *dev)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+
+ mutex_lock(&iscsi_ep_idr_mutex);
+ idr_remove(&iscsi_ep_idr, ep->id);
+ mutex_unlock(&iscsi_ep_idr_mutex);
+
kfree(ep);
}
@@ -180,7 +188,7 @@ static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
+ return sysfs_emit(buf, "%d\n", ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
@@ -193,48 +201,32 @@ static struct attribute_group iscsi_endpoint_group = {
.attrs = iscsi_endpoint_attrs,
};
-#define ISCSI_MAX_EPID -1
-
-static int iscsi_match_epid(struct device *dev, const void *data)
-{
- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- const uint64_t *epid = data;
-
- return *epid == ep->id;
-}
-
struct iscsi_endpoint *
iscsi_create_endpoint(int dd_size)
{
- struct device *dev;
struct iscsi_endpoint *ep;
- uint64_t id;
- int err;
-
- for (id = 1; id < ISCSI_MAX_EPID; id++) {
- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
- iscsi_match_epid);
- if (!dev)
- break;
- else
- put_device(dev);
- }
- if (id == ISCSI_MAX_EPID) {
- printk(KERN_ERR "Too many connections. Max supported %u\n",
- ISCSI_MAX_EPID - 1);
- return NULL;
- }
+ int err, id;
ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
if (!ep)
return NULL;
+ mutex_lock(&iscsi_ep_idr_mutex);
+ id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
+ if (id < 0) {
+ mutex_unlock(&iscsi_ep_idr_mutex);
+ printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
+ id);
+ goto free_ep;
+ }
+ mutex_unlock(&iscsi_ep_idr_mutex);
+
ep->id = id;
ep->dev.class = &iscsi_endpoint_class;
- dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
+ dev_set_name(&ep->dev, "ep-%d", id);
err = device_register(&ep->dev);
if (err)
- goto free_ep;
+ goto free_id;
err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
if (err)
@@ -248,6 +240,10 @@ unregister_dev:
device_unregister(&ep->dev);
return NULL;
+free_id:
+ mutex_lock(&iscsi_ep_idr_mutex);
+ idr_remove(&iscsi_ep_idr, id);
+ mutex_unlock(&iscsi_ep_idr_mutex);
free_ep:
kfree(ep);
return NULL;
@@ -275,14 +271,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
*/
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
- struct device *dev;
+ struct iscsi_endpoint *ep;
- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
- iscsi_match_epid);
- if (!dev)
- return NULL;
+ mutex_lock(&iscsi_ep_idr_mutex);
+ ep = idr_find(&iscsi_ep_idr, handle);
+ if (!ep)
+ goto unlock;
- return iscsi_dev_to_endpoint(dev);
+ get_device(&ep->dev);
+unlock:
+ mutex_unlock(&iscsi_ep_idr_mutex);
+ return ep;
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
@@ -2202,10 +2201,10 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
switch (flag) {
case STOP_CONN_RECOVER:
- conn->state = ISCSI_CONN_FAILED;
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
break;
case STOP_CONN_TERM:
- conn->state = ISCSI_CONN_DOWN;
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
break;
default:
iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
@@ -2217,6 +2216,49 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
}
+static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
+{
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+ struct iscsi_endpoint *ep;
+
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+
+ if (!conn->ep || !session->transport->ep_disconnect)
+ return;
+
+ ep = conn->ep;
+ conn->ep = NULL;
+
+ session->transport->unbind_conn(conn, is_active);
+ session->transport->ep_disconnect(ep);
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+}
+
+static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
+ struct iscsi_endpoint *ep,
+ bool is_active)
+{
+ /* Check if this was a conn error and the kernel took ownership */
+ spin_lock_irq(&conn->lock);
+ if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
+ iscsi_ep_disconnect(conn, is_active);
+ } else {
+ spin_unlock_irq(&conn->lock);
+ ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
+ mutex_unlock(&conn->ep_mutex);
+
+ flush_work(&conn->cleanup_work);
+ /*
+ * Userspace is now done with the EP so we can release the ref
+ * iscsi_cleanup_conn_work_fn took.
+ */
+ iscsi_put_endpoint(ep);
+ mutex_lock(&conn->ep_mutex);
+ }
+}
+
static int iscsi_if_stop_conn(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
@@ -2238,11 +2280,24 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
iscsi_stop_conn(conn, flag);
} else {
/*
+ * For offload, when iscsid is restarted it won't know about
+ * existing endpoints so it can't do a ep_disconnect. We clean
+ * it up here for userspace.
+ */
+ mutex_lock(&conn->ep_mutex);
+ if (conn->ep)
+ iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+ mutex_unlock(&conn->ep_mutex);
+
+ /*
* Figure out if it was the kernel or userspace initiating this.
*/
+ spin_lock_irq(&conn->lock);
if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
iscsi_stop_conn(conn, flag);
} else {
+ spin_unlock_irq(&conn->lock);
ISCSI_DBG_TRANS_CONN(conn,
"flush kernel conn cleanup.\n");
flush_work(&conn->cleanup_work);
@@ -2251,31 +2306,14 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
* Only clear for recovery to avoid extra cleanup runs during
* termination.
*/
+ spin_lock_irq(&conn->lock);
clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+ spin_unlock_irq(&conn->lock);
}
ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
return 0;
}
-static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
-{
- struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
- struct iscsi_endpoint *ep;
-
- ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
- conn->state = ISCSI_CONN_FAILED;
-
- if (!conn->ep || !session->transport->ep_disconnect)
- return;
-
- ep = conn->ep;
- conn->ep = NULL;
-
- session->transport->unbind_conn(conn, is_active);
- session->transport->ep_disconnect(ep);
- ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
-}
-
static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
{
struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
@@ -2284,18 +2322,11 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
mutex_lock(&conn->ep_mutex);
/*
- * If we are not at least bound there is nothing for us to do. Userspace
- * will do a ep_disconnect call if offload is used, but will not be
- * doing a stop since there is nothing to clean up, so we have to clear
- * the cleanup bit here.
+ * Get a ref to the ep, so we don't release its ID until after
+ * userspace is done referencing it in iscsi_if_disconnect_bound_ep.
*/
- if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
- ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
- clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
- mutex_unlock(&conn->ep_mutex);
- return;
- }
-
+ if (conn->ep)
+ get_device(&conn->ep->dev);
iscsi_ep_disconnect(conn, false);
if (system_state != SYSTEM_RUNNING) {
@@ -2340,11 +2371,12 @@ iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
conn->dd_data = &conn[1];
mutex_init(&conn->ep_mutex);
+ spin_lock_init(&conn->lock);
INIT_LIST_HEAD(&conn->conn_list);
INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
conn->transport = transport;
conn->cid = cid;
- conn->state = ISCSI_CONN_DOWN;
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
/* this is released in the dev's release function */
if (!get_device(&session->dev))
@@ -2542,9 +2574,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
+ unsigned long flags;
+ int state;
- if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
- queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
+ spin_lock_irqsave(&conn->lock, flags);
+ /*
+ * Userspace will only do a stop call if we are at least bound. And, we
+ * only need to do the in kernel cleanup if in the UP state so cmds can
+ * be released to upper layers. If in other states just wait for
+ * userspace to avoid races that can leave the cleanup_work queued.
+ */
+ state = READ_ONCE(conn->state);
+ switch (state) {
+ case ISCSI_CONN_BOUND:
+ case ISCSI_CONN_UP:
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
+ &conn->flags)) {
+ queue_work(iscsi_conn_cleanup_workq,
+ &conn->cleanup_work);
+ }
+ break;
+ default:
+ ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
+ state);
+ break;
+ }
+ spin_unlock_irqrestore(&conn->lock, flags);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -2894,7 +2949,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
- int err = 0, value = 0;
+ int err = 0, value = 0, state;
if (ev->u.set_param.len > PAGE_SIZE)
return -EINVAL;
@@ -2911,8 +2966,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value;
break;
default:
- if ((conn->state == ISCSI_CONN_BOUND) ||
- (conn->state == ISCSI_CONN_UP)) {
+ state = READ_ONCE(conn->state);
+ if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
} else {
@@ -2984,16 +3039,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
}
mutex_lock(&conn->ep_mutex);
- /* Check if this was a conn error and the kernel took ownership */
- if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
- ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
- mutex_unlock(&conn->ep_mutex);
-
- flush_work(&conn->cleanup_work);
- goto put_ep;
- }
-
- iscsi_ep_disconnect(conn, false);
+ iscsi_if_disconnect_bound_ep(conn, ep, false);
mutex_unlock(&conn->ep_mutex);
put_ep:
iscsi_put_endpoint(ep);
@@ -3696,24 +3742,17 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
return -EINVAL;
mutex_lock(&conn->ep_mutex);
+ spin_lock_irq(&conn->lock);
if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
mutex_unlock(&conn->ep_mutex);
ev->r.retcode = -ENOTCONN;
return 0;
}
+ spin_unlock_irq(&conn->lock);
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_BIND_CONN:
- if (conn->ep) {
- /*
- * For offload boot support where iscsid is restarted
- * during the pivot root stage, the ep will be intact
- * here when the new iscsid instance starts up and
- * reconnects.
- */
- iscsi_ep_disconnect(conn, true);
- }
-
session = iscsi_session_lookup(ev->u.b_conn.sid);
if (!session) {
err = -EINVAL;
@@ -3724,7 +3763,7 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
if (!ev->r.retcode)
- conn->state = ISCSI_CONN_BOUND;
+ WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
if (ev->r.retcode || !transport->ep_connect)
break;
@@ -3743,7 +3782,8 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
case ISCSI_UEVENT_START_CONN:
ev->r.retcode = transport->start_conn(conn);
if (!ev->r.retcode)
- conn->state = ISCSI_CONN_UP;
+ WRITE_ONCE(conn->state, ISCSI_CONN_UP);
+
break;
case ISCSI_UEVENT_SEND_PDU:
pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
@@ -4050,10 +4090,11 @@ static ssize_t show_conn_state(struct device *dev,
{
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
const char *state = "unknown";
+ int conn_state = READ_ONCE(conn->state);
- if (conn->state >= 0 &&
- conn->state < ARRAY_SIZE(connection_state_names))
- state = connection_state_names[conn->state];
+ if (conn_state >= 0 &&
+ conn_state < ARRAY_SIZE(connection_state_names))
+ state = connection_state_names[conn_state];
return sysfs_emit(buf, "%s\n", state);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a390679cf458..dc6e55761fd1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3216,6 +3216,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_block_limits(sdkp);
sd_read_block_characteristics(sdkp);
sd_zbc_read_zones(sdkp, buffer);
+ sd_read_cpr(sdkp);
}
sd_print_capacity(sdkp, old_capacity);
@@ -3225,7 +3226,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
- sd_read_cpr(sdkp);
}
/*
@@ -3475,6 +3475,7 @@ static int sd_probe(struct device *dev)
error = device_add_disk(dev, gd, NULL);
if (error) {
put_device(&sdkp->disk_dev);
+ blk_cleanup_disk(gd);
goto out;
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 5ba9df334968..cbd92891a762 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -535,7 +535,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
scsi_autopm_get_device(sdev);
- if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) {
+ if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) {
ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
if (ret != -ENOSYS)
goto put;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index ddd00efc4882..fbdb5124d7f7 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -41,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
int result;
unsigned char *buffer;
- buffer = kmalloc(32, GFP_KERNEL);
+ buffer = kzalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -55,10 +55,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
cgc.data_direction = DMA_FROM_DEVICE;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
tochdr->cdth_trk0 = buffer[2];
tochdr->cdth_trk1 = buffer[3];
+err:
kfree(buffer);
return result;
}
@@ -71,7 +74,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
int result;
unsigned char *buffer;
- buffer = kmalloc(32, GFP_KERNEL);
+ buffer = kzalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -86,6 +89,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
cgc.data_direction = DMA_FROM_DEVICE;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
tocentry->cdte_ctrl = buffer[5] & 0xf;
tocentry->cdte_adr = buffer[5] >> 4;
@@ -98,6 +103,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
+ buffer[10]) << 8) + buffer[11];
+err:
kfree(buffer);
return result;
}
@@ -384,7 +390,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
{
Scsi_CD *cd = cdi->handle;
struct packet_command cgc;
- char *buffer = kmalloc(32, GFP_KERNEL);
+ char *buffer = kzalloc(32, GFP_KERNEL);
int result;
if (!buffer)
@@ -400,10 +406,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
cgc.data_direction = DMA_FROM_DEVICE;
cgc.timeout = IOCTL_TIMEOUT;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
memcpy(mcn->medium_catalog_number, buffer + 9, 13);
mcn->medium_catalog_number[13] = 0;
+err:
kfree(buffer);
return result;
}
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 0d2e950d0865..586c0e567ff9 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -957,18 +957,6 @@ static const struct reset_control_ops ufs_qcom_reset_ops = {
.deassert = ufs_qcom_reset_deassert,
};
-#define ANDROID_BOOT_DEV_MAX 30
-static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
-
-#ifndef MODULE
-static int __init get_android_boot_dev(char *str)
-{
- strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
- return 1;
-}
-__setup("androidboot.bootdevice=", get_android_boot_dev);
-#endif
-
/**
* ufs_qcom_init - bind phy with controller
* @hba: host controller instance
@@ -988,9 +976,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
struct resource *res;
struct ufs_clk_info *clki;
- if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
- return -ENODEV;
-
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
err = -ENOMEM;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index f76692053ca1..e892b9feffb1 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -428,6 +428,12 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
return ufs_intel_common_init(hba);
}
+static int ufs_intel_mtl_init(struct ufs_hba *hba)
+{
+ hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
+ return ufs_intel_common_init(hba);
+}
+
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_common_init,
@@ -465,6 +471,16 @@ static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
.device_reset = ufs_intel_device_reset,
};
+static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_mtl_init,
+ .exit = ufs_intel_common_exit,
+ .hce_enable_notify = ufs_intel_hce_enable_notify,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
+ .device_reset = ufs_intel_device_reset,
+};
+
#ifdef CONFIG_PM_SLEEP
static int ufshcd_pci_restore(struct device *dev)
{
@@ -579,6 +595,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
{ PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ } /* terminate list */
};
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 88c20f3608c2..94f545be183a 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -820,8 +820,6 @@ struct ufs_hba {
enum ufs_pm_level rpm_lvl;
/* Desired UFS power management level during system PM */
enum ufs_pm_level spm_lvl;
- struct device_attribute rpm_lvl_attr;
- struct device_attribute spm_lvl_attr;
int pm_op_in_progress;
/* Auto-Hibernate Idle Timer register value */
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index b2bec19022cd..81099b68bbfb 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -867,12 +867,6 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
struct ufshpb_region *rgn, *victim_rgn = NULL;
list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
- if (!rgn) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: no region allocated\n",
- __func__);
- return NULL;
- }
if (ufshpb_check_srgns_issue_state(hpb, rgn))
continue;
@@ -888,6 +882,11 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
break;
}
+ if (!victim_rgn)
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: no region allocated\n",
+ __func__);
+
return victim_rgn;
}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 0e6110da69e7..578c4b6d0f7d 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -988,7 +988,7 @@ static struct virtio_driver virtio_scsi_driver = {
.remove = virtscsi_remove,
};
-static int __init init(void)
+static int __init virtio_scsi_init(void)
{
int ret = -ENOMEM;
@@ -1020,14 +1020,14 @@ error:
return ret;
}
-static void __exit fini(void)
+static void __exit virtio_scsi_fini(void)
{
unregister_virtio_driver(&virtio_scsi_driver);
mempool_destroy(virtscsi_cmd_pool);
kmem_cache_destroy(virtscsi_cmd_cache);
}
-module_init(init);
-module_exit(fini);
+module_init(virtio_scsi_init);
+module_exit(virtio_scsi_fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio SCSI HBA driver");
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index 27b9e2baab1a..7acf9193a9e8 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z)
scsi_remove_host(host);
NCR_700_release(host);
+ if (host->base > 0x01000000)
+ iounmap(hostdata->base);
kfree(hostdata);
free_irq(host->irq, host);
zorro_release_device(z);