summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 13:41:41 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 13:41:41 -0800
commit8762069330316392331e693befd8a5b632833618 (patch)
tree26716f1064b9b0b376d4bf77604cdcdc600efdb2 /drivers
parent6861eaf79155f0a5544ff989754159f806795c31 (diff)
parent833f7d4819a88f027033e0033ea44f7ae3e45a9b (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "Updates to the usual drivers (ufs, lpfc, qla2xxx, libsas). The major core change is a rework to remove the two helpers around scsi_execute_cmd and use it as the only submission interface along with other minor fixes and updates" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (142 commits) scsi: ufs: core: Fix an error handling path in ufshcd_read_desc_param() scsi: ufs: core: Fix device management cmd timeout flow scsi: aic94xx: Add missing check for dma_map_single() scsi: smartpqi: Replace one-element array with flexible-array member scsi: mpt3sas: Fix a memory leak scsi: qla2xxx: Remove the unused variable wwn scsi: ufs: core: Fix kernel-doc syntax scsi: ufs: core: Add hibernation callbacks scsi: snic: Fix memory leak with using debugfs_lookup() scsi: ufs: core: Limit DMA alignment check scsi: Documentation: Correct spelling scsi: Documentation: Correct spelling scsi: target: Documentation: Correct spelling scsi: aacraid: Allocate cmd_priv with scsicmd scsi: ufs: qcom: dt-bindings: Add SM8550 compatible string scsi: ufs: ufs-qcom: Clear qunipro_g4_sel for HW version major 5 scsi: ufs: qcom: fix platform_msi_domain_free_irqs() reference scsi: ufs: core: Enable DMA clustering scsi: ufs: exynos: Fix the maximum segment size scsi: ufs: exynos: Fix DMA alignment for PAGE_SIZE != 4096 ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-scsi.c23
-rw-r--r--drivers/hwmon/drivetemp.c11
-rw-r--r--drivers/scsi/3w-sas.c12
-rw-r--r--drivers/scsi/3w-sas.h4
-rw-r--r--drivers/scsi/aacraid/aachba.c5
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c3
-rw-r--r--drivers/scsi/ch.c30
-rw-r--r--drivers/scsi/cxlflash/superpipe.c34
-rw-r--r--drivers/scsi/cxlflash/vlun.c32
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c26
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c13
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c22
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c12
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c2
-rw-r--r--drivers/scsi/ipr.c9
-rw-r--r--drivers/scsi/ips.c11
-rw-r--r--drivers/scsi/libsas/sas_ata.c88
-rw-r--r--drivers/scsi/libsas/sas_discover.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c125
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c63
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c94
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c83
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mvumi.c4
-rw-r--r--drivers/scsi/mvumi.h6
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h51
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_edif_bsg.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c407
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c97
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h110
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c107
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c302
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c38
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c61
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c106
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c13
-rw-r--r--drivers/scsi/scsi.c12
-rw-r--r--drivers/scsi/scsi_ioctl.c7
-rw-r--r--drivers/scsi/scsi_lib.c78
-rw-r--r--drivers/scsi/scsi_scan.c26
-rw-r--r--drivers/scsi/scsi_transport_spi.c31
-rw-r--r--drivers/scsi/sd.c83
-rw-r--r--drivers/scsi/sd_zbc.c8
-rw-r--r--drivers/scsi/ses.c14
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c3
-rw-r--r--drivers/scsi/snic/snic_debugfs.c4
-rw-r--r--drivers/scsi/sr.c11
-rw-r--r--drivers/scsi/sr_ioctl.c17
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/target/target_core_pscsi.c12
-rw-r--r--drivers/ufs/core/Makefile2
-rw-r--r--drivers/ufs/core/ufs-mcq.c431
-rw-r--r--drivers/ufs/core/ufs_bsg.c144
-rw-r--r--drivers/ufs/core/ufshcd-priv.h109
-rw-r--r--drivers/ufs/core/ufshcd.c863
-rw-r--r--drivers/ufs/core/ufshpb.c4
-rw-r--r--drivers/ufs/host/Kconfig19
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ufs-exynos.c10
-rw-r--r--drivers/ufs/host/ufs-qcom.c548
-rw-r--r--drivers/ufs/host/ufs-qcom.h97
-rw-r--r--drivers/ufs/host/ufs-sprd.c458
-rw-r--r--drivers/ufs/host/ufs-sprd.h85
88 files changed, 3710 insertions, 1761 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index aa338f10cef2..e093c7a7deeb 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -383,8 +383,12 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
u8 scsi_cmd[MAX_COMMAND_SIZE];
u8 args[4], *argbuf = NULL;
int argsize = 0;
- enum dma_data_direction data_dir;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ .sense = sensebuf,
+ .sense_len = sizeof(sensebuf),
+ };
int cmd_result;
if (arg == NULL)
@@ -407,11 +411,9 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
scsi_cmd[1] = (4 << 1); /* PIO Data-in */
scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
block count in sector count field */
- data_dir = DMA_FROM_DEVICE;
} else {
scsi_cmd[1] = (3 << 1); /* Non-data */
scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
- data_dir = DMA_NONE;
}
scsi_cmd[0] = ATA_16;
@@ -429,9 +431,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
- cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
- sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
-
+ cmd_result = scsi_execute_cmd(scsidev, scsi_cmd, REQ_OP_DRV_IN, argbuf,
+ argsize, 10 * HZ, 5, &exec_args);
if (cmd_result < 0) {
rc = cmd_result;
goto error;
@@ -491,6 +492,11 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
u8 args[7];
struct scsi_sense_hdr sshdr;
int cmd_result;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ .sense = sensebuf,
+ .sense_len = sizeof(sensebuf),
+ };
if (arg == NULL)
return -EINVAL;
@@ -513,9 +519,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
- cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
- sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
-
+ cmd_result = scsi_execute_cmd(scsidev, scsi_cmd, REQ_OP_DRV_IN, NULL,
+ 0, 10 * HZ, 5, &exec_args);
if (cmd_result < 0) {
rc = cmd_result;
goto error;
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 5bac2b0fc7bb..8e5759b42390 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -164,7 +164,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
u8 lba_low, u8 lba_mid, u8 lba_high)
{
u8 scsi_cmd[MAX_COMMAND_SIZE];
- int data_dir;
+ enum req_op op;
memset(scsi_cmd, 0, sizeof(scsi_cmd));
scsi_cmd[0] = ATA_16;
@@ -175,7 +175,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
* field.
*/
scsi_cmd[2] = 0x06;
- data_dir = DMA_TO_DEVICE;
+ op = REQ_OP_DRV_OUT;
} else {
scsi_cmd[1] = (4 << 1); /* PIO Data-in */
/*
@@ -183,7 +183,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
* field.
*/
scsi_cmd[2] = 0x0e;
- data_dir = DMA_FROM_DEVICE;
+ op = REQ_OP_DRV_IN;
}
scsi_cmd[4] = feature;
scsi_cmd[6] = 1; /* 1 sector */
@@ -192,9 +192,8 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
scsi_cmd[12] = lba_high;
scsi_cmd[14] = ata_command;
- return scsi_execute_req(st->sdev, scsi_cmd, data_dir,
- st->smartdata, ATA_SECT_SIZE, NULL, HZ, 5,
- NULL);
+ return scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
+ ATA_SECT_SIZE, HZ, 5, NULL);
}
static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 3ebe66151dcb..f41c93454f0c 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -690,7 +690,7 @@ static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
newcommand->request_id__lunl =
cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
if (length) {
- newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
newcommand->sg_list[0].length = TW_CPU_TO_SGL(length);
}
newcommand->sgl_entries__lunh =
@@ -702,7 +702,7 @@ static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
/* Load the sg list */
sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0));
- sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
sgl->length = TW_CPU_TO_SGL(length);
oldcommand->size += pae;
oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0;
@@ -748,7 +748,7 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
/* Now allocate ioctl buf memory */
- cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_Ioctl_Buf_Apache), &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
retval = -ENOMEM;
goto out2;
@@ -757,7 +757,7 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
/* Now copy down the entire ioctl */
- if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
+ if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache)))
goto out3;
/* See which ioctl we are doing */
@@ -815,11 +815,11 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
}
/* Now copy the entire response to userspace */
- if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0)
retval = 0;
out3:
/* Now free ioctl buf memory */
- dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_Ioctl_Buf_Apache), cpu_addr, dma_handle);
out2:
mutex_unlock(&tw_dev->ioctl_lock);
out:
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index b0508039a280..096dec29e2ac 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -335,7 +335,7 @@ typedef struct TAG_TW_Ioctl_Apache {
TW_Ioctl_Driver_Command driver_command;
char padding[488];
TW_Command_Full firmware_command;
- char data_buffer[1];
+ char data_buffer[];
} TW_Ioctl_Buf_Apache;
/* GetParam descriptor */
@@ -344,7 +344,7 @@ typedef struct {
unsigned short parameter_id;
unsigned short parameter_size_bytes;
unsigned short actual_parameter_size_bytes;
- unsigned char data[1];
+ unsigned char data[];
} TW_Param_Apache;
/* Compatibility information structure */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 4d4cb47b3846..24c049eff157 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -818,8 +818,8 @@ static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
int aac_probe_container(struct aac_dev *dev, int cid)
{
- struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd), GFP_KERNEL);
- struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
+ struct aac_cmd_priv *cmd_priv;
+ struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd) + sizeof(*cmd_priv), GFP_KERNEL);
struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL);
int status;
@@ -838,6 +838,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
while (scsicmd->device == scsidev)
schedule();
kfree(scsidev);
+ cmd_priv = aac_priv(scsicmd);
status = cmd_priv->status;
kfree(scsicmd);
return status;
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index ed119a3f6f2e..7f0208300110 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -50,6 +50,9 @@ static int asd_map_scatterlist(struct sas_task *task,
dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
task->total_xfer_len,
task->data_dir);
+ if (dma_mapping_error(&asd_ha->pcidev->dev, dma))
+ return -ENOMEM;
+
sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 7ab29eaec6f3..72fe6df78bc5 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -184,20 +184,21 @@ static int ch_find_errno(struct scsi_sense_hdr *sshdr)
static int
ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
- void *buffer, unsigned buflength,
- enum dma_data_direction direction)
+ void *buffer, unsigned int buflength, enum req_op op)
{
int errno, retries = 0, timeout, result;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
? timeout_init : timeout_move;
retry:
errno = 0;
- result = scsi_execute_req(ch->device, cmd, direction, buffer,
- buflength, &sshdr, timeout * HZ,
- MAX_RETRIES, NULL);
+ result = scsi_execute_cmd(ch->device, cmd, op, buffer, buflength,
+ timeout * HZ, MAX_RETRIES, &exec_args);
if (result < 0)
return result;
if (scsi_sense_valid(&sshdr)) {
@@ -254,7 +255,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
cmd[5] = 1;
cmd[9] = 255;
if (0 == (result = ch_do_scsi(ch, cmd, 12,
- buffer, 256, DMA_FROM_DEVICE))) {
+ buffer, 256, REQ_OP_DRV_IN))) {
if (((buffer[16] << 8) | buffer[17]) != elem) {
DPRINTK("asked for element 0x%02x, got 0x%02x\n",
elem,(buffer[16] << 8) | buffer[17]);
@@ -284,7 +285,7 @@ ch_init_elem(scsi_changer *ch)
memset(cmd,0,sizeof(cmd));
cmd[0] = INITIALIZE_ELEMENT_STATUS;
cmd[1] = (ch->device->lun & 0x7) << 5;
- err = ch_do_scsi(ch, cmd, 6, NULL, 0, DMA_NONE);
+ err = ch_do_scsi(ch, cmd, 6, NULL, 0, REQ_OP_DRV_IN);
VPRINTK(KERN_INFO, "... finished\n");
return err;
}
@@ -306,10 +307,10 @@ ch_readconfig(scsi_changer *ch)
cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = 0x1d;
cmd[4] = 255;
- result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE);
+ result = ch_do_scsi(ch, cmd, 10, buffer, 255, REQ_OP_DRV_IN);
if (0 != result) {
cmd[1] |= (1<<3);
- result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE);
+ result = ch_do_scsi(ch, cmd, 10, buffer, 255, REQ_OP_DRV_IN);
}
if (0 == result) {
ch->firsts[CHET_MT] =
@@ -434,7 +435,7 @@ ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate)
cmd[4] = (elem >> 8) & 0xff;
cmd[5] = elem & 0xff;
cmd[8] = rotate ? 1 : 0;
- return ch_do_scsi(ch, cmd, 10, NULL, 0, DMA_NONE);
+ return ch_do_scsi(ch, cmd, 10, NULL, 0, REQ_OP_DRV_IN);
}
static int
@@ -455,7 +456,7 @@ ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate)
cmd[6] = (dest >> 8) & 0xff;
cmd[7] = dest & 0xff;
cmd[10] = rotate ? 1 : 0;
- return ch_do_scsi(ch, cmd, 12, NULL,0, DMA_NONE);
+ return ch_do_scsi(ch, cmd, 12, NULL, 0, REQ_OP_DRV_IN);
}
static int
@@ -481,7 +482,7 @@ ch_exchange(scsi_changer *ch, u_int trans, u_int src,
cmd[9] = dest2 & 0xff;
cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0);
- return ch_do_scsi(ch, cmd, 12, NULL, 0, DMA_NONE);
+ return ch_do_scsi(ch, cmd, 12, NULL, 0, REQ_OP_DRV_IN);
}
static void
@@ -531,7 +532,7 @@ ch_set_voltag(scsi_changer *ch, u_int elem,
memcpy(buffer,tag,32);
ch_check_voltag(buffer);
- result = ch_do_scsi(ch, cmd, 12, buffer, 256, DMA_TO_DEVICE);
+ result = ch_do_scsi(ch, cmd, 12, buffer, 256, REQ_OP_DRV_OUT);
kfree(buffer);
return result;
}
@@ -799,8 +800,7 @@ static long ch_ioctl(struct file *file,
ch_cmd[5] = 1;
ch_cmd[9] = 255;
- result = ch_do_scsi(ch, ch_cmd, 12,
- buffer, 256, DMA_FROM_DEVICE);
+ result = ch_do_scsi(ch, ch_cmd, 12, buffer, 256, REQ_OP_DRV_IN);
if (!result) {
cge.cge_status = buffer[18];
cge.cge_flags = 0;
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index df0ebabbf387..22cfc2e1dfb9 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -308,19 +308,19 @@ out:
* @lli: LUN destined for capacity request.
*
* The READ_CAP16 can take quite a while to complete. Should an EEH occur while
- * in scsi_execute(), the EEH handler will attempt to recover. As part of the
- * recovery, the handler drains all currently running ioctls, waiting until they
- * have completed before proceeding with a reset. As this routine is used on the
- * ioctl path, this can create a condition where the EEH handler becomes stuck,
- * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily
- * unmark this thread as an ioctl thread by releasing the ioctl read semaphore.
- * This will allow the EEH handler to proceed with a recovery while this thread
- * is still running. Once the scsi_execute() returns, reacquire the ioctl read
- * semaphore and check the adapter state in case it changed while inside of
- * scsi_execute(). The state check will wait if the adapter is still being
- * recovered or return a failure if the recovery failed. In the event that the
- * adapter reset failed, simply return the failure as the ioctl would be unable
- * to continue.
+ * in scsi_execute_cmd(), the EEH handler will attempt to recover. As part of
+ * the recovery, the handler drains all currently running ioctls, waiting until
+ * they have completed before proceeding with a reset. As this routine is used
+ * on the ioctl path, this can create a condition where the EEH handler becomes
+ * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
+ * temporarily unmark this thread as an ioctl thread by releasing the ioctl
+ * read semaphore. This will allow the EEH handler to proceed with a recovery
+ * while this thread is still running. Once the scsi_execute_cmd() returns,
+ * reacquire the ioctl read semaphore and check the adapter state in case it
+ * changed while inside of scsi_execute_cmd(). The state check will wait if the
+ * adapter is still being recovered or return a failure if the recovery failed.
+ * In the event that the adapter reset failed, simply return the failure as the
+ * ioctl would be unable to continue.
*
* Note that the above puts a requirement on this routine to only be called on
* an ioctl thread.
@@ -333,6 +333,9 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
struct device *dev = &cfg->dev->dev;
struct glun_info *gli = lli->parent;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
u8 *cmd_buf = NULL;
u8 *scsi_cmd = NULL;
int rc = 0;
@@ -357,9 +360,8 @@ retry:
/* Drop the ioctl read semahpore across lengthy call */
up_read(&cfg->ioctl_rwsem);
- result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
- CMD_BUFSIZE, NULL, &sshdr, to, CMD_RETRIES,
- 0, 0, NULL);
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, cmd_buf,
+ CMD_BUFSIZE, to, CMD_RETRIES, &exec_args);
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
if (rc) {
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 5c74dc7c2288..9caabf550436 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -397,19 +397,19 @@ static int init_vlun(struct llun_info *lli)
* @nblks: Number of logical blocks to write same.
*
* The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur
- * while in scsi_execute(), the EEH handler will attempt to recover. As part of
- * the recovery, the handler drains all currently running ioctls, waiting until
- * they have completed before proceeding with a reset. As this routine is used
- * on the ioctl path, this can create a condition where the EEH handler becomes
- * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
- * temporarily unmark this thread as an ioctl thread by releasing the ioctl read
- * semaphore. This will allow the EEH handler to proceed with a recovery while
- * this thread is still running. Once the scsi_execute() returns, reacquire the
- * ioctl read semaphore and check the adapter state in case it changed while
- * inside of scsi_execute(). The state check will wait if the adapter is still
- * being recovered or return a failure if the recovery failed. In the event that
- * the adapter reset failed, simply return the failure as the ioctl would be
- * unable to continue.
+ * while in scsi_execute_cmd(), the EEH handler will attempt to recover. As
+ * part of the recovery, the handler drains all currently running ioctls,
+ * waiting until they have completed before proceeding with a reset. As this
+ * routine is used on the ioctl path, this can create a condition where the
+ * EEH handler becomes stuck, infinitely waiting for this ioctl thread. To
+ * avoid this behavior, temporarily unmark this thread as an ioctl thread by
+ * releasing the ioctl read semaphore. This will allow the EEH handler to
+ * proceed with a recovery while this thread is still running. Once the
+ * scsi_execute_cmd() returns, reacquire the ioctl read semaphore and check the
+ * adapter state in case it changed while inside of scsi_execute_cmd(). The
+ * state check will wait if the adapter is still being recovered or return a
+ * failure if the recovery failed. In the event that the adapter reset failed,
+ * simply return the failure as the ioctl would be unable to continue.
*
* Note that the above puts a requirement on this routine to only be called on
* an ioctl thread.
@@ -450,9 +450,9 @@ static int write_same16(struct scsi_device *sdev,
/* Drop the ioctl read semahpore across lengthy call */
up_read(&cfg->ioctl_rwsem);
- result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
- CMD_BUFSIZE, NULL, NULL, to,
- CMD_RETRIES, 0, 0, NULL);
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_OUT,
+ cmd_buf, CMD_BUFSIZE, to,
+ CMD_RETRIES, NULL);
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
if (rc) {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 29a2865b8e2e..362fa631f39b 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -127,8 +127,11 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
int bufflen, struct scsi_sense_hdr *sshdr, int flags)
{
u8 cdb[MAX_COMMAND_SIZE];
- blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
+ blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = sshdr,
+ };
/* Prepare the command. */
memset(cdb, 0x0, MAX_COMMAND_SIZE);
@@ -139,9 +142,9 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
cdb[1] = MI_REPORT_TARGET_PGS;
put_unaligned_be32(bufflen, &cdb[6]);
- return scsi_execute(sdev, cdb, DMA_FROM_DEVICE, buff, bufflen, NULL,
- sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
- ALUA_FAILOVER_RETRIES, req_flags, 0, NULL);
+ return scsi_execute_cmd(sdev, cdb, opf, buff, bufflen,
+ ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, &exec_args);
}
/*
@@ -157,8 +160,11 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
u8 cdb[MAX_COMMAND_SIZE];
unsigned char stpg_data[8];
int stpg_len = 8;
- blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
+ blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = sshdr,
+ };
/* Prepare the data buffer */
memset(stpg_data, 0, stpg_len);
@@ -171,9 +177,9 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
cdb[1] = MO_SET_TARGET_PGS;
put_unaligned_be32(stpg_len, &cdb[6]);
- return scsi_execute(sdev, cdb, DMA_TO_DEVICE, stpg_data, stpg_len, NULL,
- sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
- ALUA_FAILOVER_RETRIES, req_flags, 0, NULL);
+ return scsi_execute_cmd(sdev, cdb, opf, stpg_data,
+ stpg_len, ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, &exec_args);
}
static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 2e21ab447873..3cf88db2d5b2 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -239,8 +239,11 @@ static int send_trespass_cmd(struct scsi_device *sdev,
unsigned char cdb[MAX_COMMAND_SIZE];
int err, res = SCSI_DH_OK, len;
struct scsi_sense_hdr sshdr;
- blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
+ blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
if (csdev->flags & CLARIION_SHORT_TRESPASS) {
page22 = short_trespass;
@@ -263,9 +266,9 @@ static int send_trespass_cmd(struct scsi_device *sdev,
BUG_ON((len > CLARIION_BUFFER_SIZE));
memcpy(csdev->buffer, page22, len);
- err = scsi_execute(sdev, cdb, DMA_TO_DEVICE, csdev->buffer, len, NULL,
- &sshdr, CLARIION_TIMEOUT * HZ, CLARIION_RETRIES,
- req_flags, 0, NULL);
+ err = scsi_execute_cmd(sdev, cdb, opf, csdev->buffer, len,
+ CLARIION_TIMEOUT * HZ, CLARIION_RETRIES,
+ &exec_args);
if (err) {
if (scsi_sense_valid(&sshdr))
res = trespass_endio(sdev, &sshdr);
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 0d2cfa60aa06..5f2f943d926c 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -83,12 +83,15 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
unsigned char cmd[6] = { TEST_UNIT_READY };
struct scsi_sense_hdr sshdr;
int ret = SCSI_DH_OK, res;
- blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
+ blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
retry:
- res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL);
+ res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
+ HP_SW_RETRIES, &exec_args);
if (res) {
if (scsi_sense_valid(&sshdr))
ret = tur_done(sdev, h, &sshdr);
@@ -121,12 +124,15 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
struct scsi_device *sdev = h->sdev;
int res, rc = SCSI_DH_OK;
int retry_cnt = HP_SW_RETRIES;
- blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
+ blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
retry:
- res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL);
+ res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
+ HP_SW_RETRIES, &exec_args);
if (res) {
if (!scsi_sense_valid(&sshdr)) {
sdev_printk(KERN_WARNING, sdev,
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index bf8754741f85..c5538645057a 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -536,8 +536,11 @@ static void send_mode_select(struct work_struct *work)
unsigned char cdb[MAX_COMMAND_SIZE];
struct scsi_sense_hdr sshdr;
unsigned int data_size;
- blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
+ blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
spin_lock(&ctlr->ms_lock);
list_splice_init(&ctlr->ms_head, &list);
@@ -555,9 +558,8 @@ static void send_mode_select(struct work_struct *work)
(char *) h->ctlr->array_name, h->ctlr->index,
(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
- if (scsi_execute(sdev, cdb, DMA_TO_DEVICE, &h->ctlr->mode_select,
- data_size, NULL, &sshdr, RDAC_TIMEOUT * HZ,
- RDAC_RETRIES, req_flags, 0, NULL)) {
+ if (scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size,
+ RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args)) {
err = mode_select_handle_sense(sdev, &sshdr);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
index b8c048cdb17f..8f96049f62dd 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.c
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -4,7 +4,7 @@
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*/
-/**
+/*
* All common (i.e. transport-independent) SLI-4 functions are implemented
* in this file.
*/
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 1c8040d250ea..198d3f20d682 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3912,7 +3912,6 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
{
int bsize_elem, i, result = 0;
struct scatterlist *sg;
- void *kaddr;
/* Determine the actual number of bytes per element */
bsize_elem = PAGE_SIZE * (1 << sglist->order);
@@ -3923,9 +3922,7 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
buffer += bsize_elem) {
struct page *page = sg_page(sg);
- kaddr = kmap(page);
- memcpy(kaddr, buffer, bsize_elem);
- kunmap(page);
+ memcpy_to_page(page, 0, buffer, bsize_elem);
sg->length = bsize_elem;
@@ -3938,9 +3935,7 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
if (len % bsize_elem) {
struct page *page = sg_page(sg);
- kaddr = kmap(page);
- memcpy(kaddr, buffer, len % bsize_elem);
- kunmap(page);
+ memcpy_to_page(page, 0, buffer, len % bsize_elem);
sg->length = len % bsize_elem;
}
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 16419aeec02d..bb206509265e 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1499,17 +1499,16 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
struct scatterlist *sg = scsi_sglist(SC);
char *buffer;
- /* kmap_atomic() ensures addressability of the user buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags);
- buffer = kmap_atomic(sg_page(sg)) + sg->offset;
- if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
- buffer[2] == 'P' && buffer[3] == 'P') {
- kunmap_atomic(buffer - sg->offset);
+ buffer = kmap_local_page(sg_page(sg)) + sg->offset;
+ if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
+ buffer[2] == 'P' && buffer[3] == 'P') {
+ kunmap_local(buffer);
local_irq_restore(flags);
return 1;
}
- kunmap_atomic(buffer - sg->offset);
+ kunmap_local(buffer);
local_irq_restore(flags);
}
return 0;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 12d9bdd11a4c..77714a495cbb 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -238,7 +238,17 @@ static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
return to_sas_internal(dev->port->ha->core.shost->transportt);
}
-static int sas_get_ata_command_set(struct domain_device *dev);
+static int sas_get_ata_command_set(struct domain_device *dev)
+{
+ struct ata_taskfile tf;
+
+ if (dev->dev_type == SAS_SATA_PENDING)
+ return ATA_DEV_UNKNOWN;
+
+ ata_tf_from_fis(dev->frame_rcvd, &tf);
+
+ return ata_dev_classify(&tf);
+}
int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
{
@@ -636,20 +646,6 @@ void sas_ata_task_abort(struct sas_task *task)
complete(waiting);
}
-static int sas_get_ata_command_set(struct domain_device *dev)
-{
- struct dev_to_host_fis *fis =
- (struct dev_to_host_fis *) dev->frame_rcvd;
- struct ata_taskfile tf;
-
- if (dev->dev_type == SAS_SATA_PENDING)
- return ATA_DEV_UNKNOWN;
-
- ata_tf_from_fis((const u8 *)fis, &tf);
-
- return ata_dev_classify(&tf);
-}
-
void sas_probe_sata(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
@@ -678,6 +674,68 @@ void sas_probe_sata(struct asd_sas_port *port)
}
+int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
+ struct domain_device *child, int phy_id)
+{
+ struct sas_rphy *rphy;
+ int ret;
+
+ if (child->linkrate > parent->min_linkrate) {
+ struct sas_phy *cphy = child->phy;
+ enum sas_linkrate min_prate = cphy->minimum_linkrate,
+ parent_min_lrate = parent->min_linkrate,
+ min_linkrate = (min_prate > parent_min_lrate) ?
+ parent_min_lrate : 0;
+ struct sas_phy_linkrates rates = {
+ .maximum_linkrate = parent->min_linkrate,
+ .minimum_linkrate = min_linkrate,
+ };
+
+ pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n",
+ SAS_ADDR(child->sas_addr), phy_id);
+ ret = sas_smp_phy_control(parent, phy_id,
+ PHY_FUNC_LINK_RESET, &rates);
+ if (ret) {
+ pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n",
+ SAS_ADDR(child->sas_addr), phy_id, ret);
+ return ret;
+ }
+ pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n",
+ SAS_ADDR(child->sas_addr), phy_id);
+ child->linkrate = child->min_linkrate;
+ }
+ ret = sas_get_ata_info(child, phy);
+ if (ret)
+ return ret;
+
+ sas_init_dev(child);
+ ret = sas_ata_init(child);
+ if (ret)
+ return ret;
+
+ rphy = sas_end_device_alloc(phy->port);
+ if (!rphy)
+ return -ENOMEM;
+
+ rphy->identify.phy_identifier = phy_id;
+ child->rphy = rphy;
+ get_device(&rphy->dev);
+
+ list_add_tail(&child->disco_list_node, &parent->port->disco_list);
+
+ ret = sas_discover_sata(child);
+ if (ret) {
+ pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n",
+ SAS_ADDR(child->sas_addr),
+ SAS_ADDR(parent->sas_addr), phy_id, ret);
+ sas_rphy_free(child->rphy);
+ list_del(&child->disco_list_node);
+ return ret;
+ }
+
+ return 0;
+}
+
static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
{
struct domain_device *dev, *n;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d5bc1314c341..72fdb2e5d047 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -455,14 +455,8 @@ static void sas_discover_domain(struct work_struct *work)
break;
case SAS_SATA_DEV:
case SAS_SATA_PM:
-#ifdef CONFIG_SCSI_SAS_ATA
error = sas_discover_sata(dev);
break;
-#else
- pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
- fallthrough;
-#endif
- /* Fall through - only for the #else condition above. */
default:
error = -ENXIO;
pr_err("unhandled device %d\n", dev->dev_type);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index a04cad620e93..dc670304f181 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -751,13 +751,46 @@ static void sas_ex_get_linkrate(struct domain_device *parent,
child->pathways = min(child->pathways, parent->pathways);
}
+static int sas_ex_add_dev(struct domain_device *parent, struct ex_phy *phy,
+ struct domain_device *child, int phy_id)
+{
+ struct sas_rphy *rphy;
+ int res;
+
+ child->dev_type = SAS_END_DEVICE;
+ rphy = sas_end_device_alloc(phy->port);
+ if (!rphy)
+ return -ENOMEM;
+
+ child->tproto = phy->attached_tproto;
+ sas_init_dev(child);
+
+ child->rphy = rphy;
+ get_device(&rphy->dev);
+ rphy->identify.phy_identifier = phy_id;
+ sas_fill_in_rphy(child, rphy);
+
+ list_add_tail(&child->disco_list_node, &parent->port->disco_list);
+
+ res = sas_notify_lldd_dev_found(child);
+ if (res) {
+ pr_notice("notify lldd for device %016llx at %016llx:%02d returned 0x%x\n",
+ SAS_ADDR(child->sas_addr),
+ SAS_ADDR(parent->sas_addr), phy_id, res);
+ sas_rphy_free(child->rphy);
+ list_del(&child->disco_list_node);
+ return res;
+ }
+
+ return 0;
+}
+
static struct domain_device *sas_ex_discover_end_dev(
struct domain_device *parent, int phy_id)
{
struct expander_device *parent_ex = &parent->ex_dev;
struct ex_phy *phy = &parent_ex->ex_phy[phy_id];
struct domain_device *child = NULL;
- struct sas_rphy *rphy;
int res;
if (phy->attached_sata_host || phy->attached_sata_ps)
@@ -785,99 +818,23 @@ static struct domain_device *sas_ex_discover_end_dev(
sas_ex_get_linkrate(parent, child, phy);
sas_device_set_phy(child, phy->port);
-#ifdef CONFIG_SCSI_SAS_ATA
if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
- if (child->linkrate > parent->min_linkrate) {
- struct sas_phy *cphy = child->phy;
- enum sas_linkrate min_prate = cphy->minimum_linkrate,
- parent_min_lrate = parent->min_linkrate,
- min_linkrate = (min_prate > parent_min_lrate) ?
- parent_min_lrate : 0;
- struct sas_phy_linkrates rates = {
- .maximum_linkrate = parent->min_linkrate,
- .minimum_linkrate = min_linkrate,
- };
- int ret;
-
- pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n",
- SAS_ADDR(child->sas_addr), phy_id);
- ret = sas_smp_phy_control(parent, phy_id,
- PHY_FUNC_LINK_RESET, &rates);
- if (ret) {
- pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n",
- SAS_ADDR(child->sas_addr), phy_id, ret);
- goto out_free;
- }
- pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n",
- SAS_ADDR(child->sas_addr), phy_id);
- child->linkrate = child->min_linkrate;
- }
- res = sas_get_ata_info(child, phy);
- if (res)
- goto out_free;
-
- sas_init_dev(child);
- res = sas_ata_init(child);
- if (res)
- goto out_free;
- rphy = sas_end_device_alloc(phy->port);
- if (!rphy)
- goto out_free;
- rphy->identify.phy_identifier = phy_id;
-
- child->rphy = rphy;
- get_device(&rphy->dev);
-
- list_add_tail(&child->disco_list_node, &parent->port->disco_list);
-
- res = sas_discover_sata(child);
- if (res) {
- pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n",
- SAS_ADDR(child->sas_addr),
- SAS_ADDR(parent->sas_addr), phy_id, res);
- goto out_list_del;
- }
- } else
-#endif
- if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
- child->dev_type = SAS_END_DEVICE;
- rphy = sas_end_device_alloc(phy->port);
- /* FIXME: error handling */
- if (unlikely(!rphy))
- goto out_free;
- child->tproto = phy->attached_tproto;
- sas_init_dev(child);
-
- child->rphy = rphy;
- get_device(&rphy->dev);
- rphy->identify.phy_identifier = phy_id;
- sas_fill_in_rphy(child, rphy);
-
- list_add_tail(&child->disco_list_node, &parent->port->disco_list);
-
- res = sas_discover_end_dev(child);
- if (res) {
- pr_notice("sas_discover_end_dev() for device %016llx at %016llx:%02d returned 0x%x\n",
- SAS_ADDR(child->sas_addr),
- SAS_ADDR(parent->sas_addr), phy_id, res);
- goto out_list_del;
- }
+ res = sas_ata_add_dev(parent, phy, child, phy_id);
+ } else if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
+ res = sas_ex_add_dev(parent, phy, child, phy_id);
} else {
pr_notice("target proto 0x%x at %016llx:0x%x not handled\n",
phy->attached_tproto, SAS_ADDR(parent->sas_addr),
phy_id);
- goto out_free;
+ res = -ENODEV;
}
+ if (res)
+ goto out_free;
+
list_add_tail(&child->siblings, &parent_ex->children);
return child;
- out_list_del:
- sas_rphy_free(child->rphy);
- list_del(&child->disco_list_node);
- spin_lock_irq(&parent->port->dev_list_lock);
- list_del(&child->dev_list_node);
- spin_unlock_irq(&parent->port->dev_list_lock);
out_free:
sas_port_delete(phy->port);
out_err:
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 9ad233b40a9e..cf55f8e3bd9f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1592,8 +1592,6 @@ struct lpfc_hba {
struct timer_list cpuhp_poll_timer;
struct list_head poll_list; /* slowpath eq polling list */
#define LPFC_POLL_HB 1 /* slowpath heartbeat */
-#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
-#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
char os_host_name[MAXHOSTNAMELEN];
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 77e1b2911cb4..76c3434f8976 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1905,8 +1905,7 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr,
goto out_free_rdp;
}
- strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_NAME], 16);
- chbuf[16] = 0;
+ strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_NAME], 16);
len = scnprintf(buf, PAGE_SIZE - len, "VendorName:\t%s\n", chbuf);
len += scnprintf(buf + len, PAGE_SIZE - len,
@@ -1914,17 +1913,13 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr,
(uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI],
(uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 1],
(uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 2]);
- strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_PN], 16);
- chbuf[16] = 0;
+ strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_PN], 16);
len += scnprintf(buf + len, PAGE_SIZE - len, "VendorPN:\t%s\n", chbuf);
- strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_SN], 16);
- chbuf[16] = 0;
+ strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_SN], 16);
len += scnprintf(buf + len, PAGE_SIZE - len, "VendorSN:\t%s\n", chbuf);
- strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_REV], 4);
- chbuf[4] = 0;
+ strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_REV], 4);
len += scnprintf(buf + len, PAGE_SIZE - len, "VendorRev:\t%s\n", chbuf);
- strncpy(chbuf, &rdp_context->page_a0[SSF_DATE_CODE], 8);
- chbuf[8] = 0;
+ strscpy(chbuf, &rdp_context->page_a0[SSF_DATE_CODE], 8);
len += scnprintf(buf + len, PAGE_SIZE - len, "DateCode:\t%s\n", chbuf);
len += scnprintf(buf + len, PAGE_SIZE - len, "Identifier:\t%xh\n",
(uint8_t)rdp_context->page_a0[SSF_IDENTIFIER]);
@@ -1941,33 +1936,25 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr,
&rdp_context->page_a0[SSF_TRANSCEIVER_CODE_B7];
len += scnprintf(buf + len, PAGE_SIZE - len, "Speeds: \t");
- if (*(uint8_t *)trasn_code_byte7 == 0) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "Unknown\n");
- } else {
- if (trasn_code_byte7->fc_sp_100MB)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "1 ");
- if (trasn_code_byte7->fc_sp_200mb)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "2 ");
- if (trasn_code_byte7->fc_sp_400MB)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "4 ");
- if (trasn_code_byte7->fc_sp_800MB)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "8 ");
- if (trasn_code_byte7->fc_sp_1600MB)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "16 ");
- if (trasn_code_byte7->fc_sp_3200MB)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "32 ");
- if (trasn_code_byte7->speed_chk_ecc)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "64 ");
- len += scnprintf(buf + len, PAGE_SIZE - len, "GB\n");
- }
+ if (*(uint8_t *)trasn_code_byte7 == 0) {
+ len += scnprintf(buf + len, PAGE_SIZE - len, "Unknown\n");
+ } else {
+ if (trasn_code_byte7->fc_sp_100MB)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "1 ");
+ if (trasn_code_byte7->fc_sp_200mb)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "2 ");
+ if (trasn_code_byte7->fc_sp_400MB)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "4 ");
+ if (trasn_code_byte7->fc_sp_800MB)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "8 ");
+ if (trasn_code_byte7->fc_sp_1600MB)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "16 ");
+ if (trasn_code_byte7->fc_sp_3200MB)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "32 ");
+ if (trasn_code_byte7->speed_chk_ecc)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "64 ");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "GB\n");
+ }
temperature = (rdp_context->page_a2[SFF_TEMPERATURE_B1] << 8 |
rdp_context->page_a2[SFF_TEMPERATURE_B0]);
vcc = (rdp_context->page_a2[SFF_VCC_B1] << 8 |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 8928f016d09e..976fd5ee7f7e 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -253,7 +253,6 @@ int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
uint32_t len);
void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
-int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
void lpfc_sli4_poll_hbtimer(struct timer_list *t);
void lpfc_sli4_start_polling(struct lpfc_queue *q);
void lpfc_sli4_stop_polling(struct lpfc_queue *q);
@@ -684,6 +683,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
union lpfc_vmid_io_tag *tag);
void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
+void lpfc_reinit_vmid(struct lpfc_vport *vport);
void lpfc_sli_rpi_release(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 919741bbe267..569639dc8b2c 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1123,6 +1123,9 @@ stop_rr_fcf_flogi:
if (sp->cmn.priority_tagging)
vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
LPFC_VMID_TYPE_PRIO);
+ /* reinitialize the VMID datastructure before returning */
+ if (lpfc_is_vmid_enabled(phba))
+ lpfc_reinit_vmid(vport);
/*
* Address a timing race with dev_loss. If dev_loss is active on
@@ -2373,15 +2376,30 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* PRLI failed */
lpfc_printf_vlog(vport, mode, loglevel,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
- "data: x%x\n",
+ "data: x%x x%x\n",
ndlp->nlp_DID, ulp_status,
- ulp_word4, ndlp->fc4_prli_sent);
+ ulp_word4, ndlp->nlp_state,
+ ndlp->fc4_prli_sent);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
+ /* The following condition catches an inflight transition
+ * mismatch typically caused by an RSCN. Skip any
+ * processing to allow recovery.
+ */
+ if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "2784 PRLI cmpl: state mismatch "
+ "DID x%06x nstate x%x nflag x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ goto out;
+ }
+
/*
* For P2P topology, retain the node so that PLOGI can be
* attempted on it again.
@@ -4673,6 +4691,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* the nameserver fails */
maxretry = 0;
delay = 100;
+ } else if (cmd == ELS_CMD_PRLI &&
+ ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
+ /* State-command disagreement. The PRLI was
+ * failed with an invalid rpi meaning there
+ * some unexpected state change. Don't retry.
+ */
+ maxretry = 0;
+ retry = 0;
+ break;
}
retry = 1;
break;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 80375d73b732..a6df0a5b4006 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1129,21 +1129,6 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
struct lpfc_nodelist *ndlp, *next_ndlp;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
- /* It's possible the FLOGI to the fabric node never
- * successfully completed and never registered with the
- * transport. In this case there is no way to clean up
- * the node.
- */
- if (ndlp->nlp_DID == Fabric_DID) {
- if (ndlp->nlp_prev_state ==
- NLP_STE_UNUSED_NODE &&
- !ndlp->fc4_xpt_flags)
- lpfc_nlp_put(ndlp);
- }
- continue;
- }
-
if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
((vport->port_type == LPFC_NPIV_PORT) &&
((ndlp->nlp_DID == NameServer_DID) ||
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index fb3504dbb899..58fa39c403a0 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -4201,6 +4201,8 @@ struct lpfc_acqe_fc_la {
#define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5
#define LPFC_FC_LA_TYPE_UNEXP_WWPN 0x6
#define LPFC_FC_LA_TYPE_TRUNKING_EVENT 0x7
+#define LPFC_FC_LA_TYPE_ACTIVATE_FAIL 0x8
+#define LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT 0x9
#define lpfc_acqe_fc_la_port_type_SHIFT 6
#define lpfc_acqe_fc_la_port_type_MASK 0x00000003
#define lpfc_acqe_fc_la_port_type_WORD word0
@@ -4242,6 +4244,9 @@ struct lpfc_acqe_fc_la {
#define lpfc_acqe_fc_la_fault_SHIFT 0
#define lpfc_acqe_fc_la_fault_MASK 0x000000FF
#define lpfc_acqe_fc_la_fault_WORD word1
+#define lpfc_acqe_fc_la_link_status_SHIFT 8
+#define lpfc_acqe_fc_la_link_status_MASK 0x0000007F
+#define lpfc_acqe_fc_la_link_status_WORD word1
#define lpfc_acqe_fc_la_trunk_fault_SHIFT 0
#define lpfc_acqe_fc_la_trunk_fault_MASK 0x0000000F
#define lpfc_acqe_fc_la_trunk_fault_WORD word1
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 389a35308be3..6eb4085a3a22 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -5190,16 +5190,25 @@ static void
lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
struct lpfc_acqe_link *acqe_link)
{
- switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
- case LPFC_ASYNC_LINK_FAULT_NONE:
- case LPFC_ASYNC_LINK_FAULT_LOCAL:
- case LPFC_ASYNC_LINK_FAULT_REMOTE:
- case LPFC_ASYNC_LINK_FAULT_LR_LRR:
+ switch (bf_get(lpfc_acqe_fc_la_att_type, acqe_link)) {
+ case LPFC_FC_LA_TYPE_LINK_DOWN:
+ case LPFC_FC_LA_TYPE_TRUNKING_EVENT:
+ case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
+ case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0398 Unknown link fault code: x%x\n",
- bf_get(lpfc_acqe_link_fault, acqe_link));
+ switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
+ case LPFC_ASYNC_LINK_FAULT_NONE:
+ case LPFC_ASYNC_LINK_FAULT_LOCAL:
+ case LPFC_ASYNC_LINK_FAULT_REMOTE:
+ case LPFC_ASYNC_LINK_FAULT_LR_LRR:
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0398 Unknown link fault code: x%x\n",
+ bf_get(lpfc_acqe_link_fault, acqe_link));
+ break;
+ }
break;
}
}
@@ -6282,6 +6291,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
struct lpfc_mbx_read_top *la;
+ char *log_level;
int rc;
if (bf_get(lpfc_trailer_type, acqe_fc) !=
@@ -6313,25 +6323,70 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
phba->sli4_hba.link_state.fault =
bf_get(lpfc_acqe_link_fault, acqe_fc);
+ phba->sli4_hba.link_state.link_status =
+ bf_get(lpfc_acqe_fc_la_link_status, acqe_fc);
- if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
- LPFC_FC_LA_TYPE_LINK_DOWN)
- phba->sli4_hba.link_state.logical_speed = 0;
- else if (!phba->sli4_hba.conf_trunk)
- phba->sli4_hba.link_state.logical_speed =
+ /*
+ * Only select attention types need logical speed modification to what
+ * was previously set.
+ */
+ if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP &&
+ phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
+ if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
+ LPFC_FC_LA_TYPE_LINK_DOWN)
+ phba->sli4_hba.link_state.logical_speed = 0;
+ else if (!phba->sli4_hba.conf_trunk)
+ phba->sli4_hba.link_state.logical_speed =
bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
+ }
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2896 Async FC event - Speed:%dGBaud Topology:x%x "
"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
- "%dMbps Fault:%d\n",
+ "%dMbps Fault:x%x Link Status:x%x\n",
phba->sli4_hba.link_state.speed,
phba->sli4_hba.link_state.topology,
phba->sli4_hba.link_state.status,
phba->sli4_hba.link_state.type,
phba->sli4_hba.link_state.number,
phba->sli4_hba.link_state.logical_speed,
- phba->sli4_hba.link_state.fault);
+ phba->sli4_hba.link_state.fault,
+ phba->sli4_hba.link_state.link_status);
+
+ /*
+ * The following attention types are informational only, providing
+ * further details about link status. Overwrite the value of
+ * link_state.status appropriately. No further action is required.
+ */
+ if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
+ switch (phba->sli4_hba.link_state.status) {
+ case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
+ log_level = KERN_WARNING;
+ phba->sli4_hba.link_state.status =
+ LPFC_FC_LA_TYPE_LINK_DOWN;
+ break;
+ case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
+ /*
+ * During bb credit recovery establishment, receiving
+ * this attention type is normal. Link Up attention
+ * type is expected to occur before this informational
+ * attention type so keep the Link Up status.
+ */
+ log_level = KERN_INFO;
+ phba->sli4_hba.link_state.status =
+ LPFC_FC_LA_TYPE_LINK_UP;
+ break;
+ default:
+ log_level = KERN_INFO;
+ break;
+ }
+ lpfc_log_msg(phba, log_level, LOG_SLI,
+ "2992 Async FC event - Informational Link "
+ "Attention Type x%x\n",
+ bf_get(lpfc_acqe_fc_la_att_type, acqe_fc));
+ return;
+ }
+
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -13918,6 +13973,13 @@ fcponly:
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+ rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "6400 Can't set dma maximum segment size\n");
+ return rc;
+ }
+
/*
* Check whether the adapter supports an embedded copy of the
* FCP CMD IU within the WQE for FCP_Ixxx commands. In order
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7a1563564df7..e989f130434e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1689,7 +1689,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct lpfc_pde6 *pde6 = NULL;
struct lpfc_pde7 *pde7 = NULL;
dma_addr_t dataphysaddr, protphysaddr;
- unsigned short curr_data = 0, curr_prot = 0;
+ unsigned short curr_prot = 0;
unsigned int split_offset;
unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
unsigned int protgrp_blks, protgrp_bytes;
@@ -1858,7 +1858,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bpl->tus.w = le32_to_cpu(bpl->tus.w);
num_bde++;
- curr_data++;
if (split_offset)
break;
@@ -2119,7 +2118,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct scatterlist *sgpe = NULL; /* s/g prot entry */
struct sli4_sge_diseed *diseed = NULL;
dma_addr_t dataphysaddr, protphysaddr;
- unsigned short curr_data = 0, curr_prot = 0;
+ unsigned short curr_prot = 0;
unsigned int split_offset;
unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
unsigned int protgrp_blks, protgrp_bytes;
@@ -2364,7 +2363,6 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
dma_offset += dma_len;
num_sge++;
- curr_data++;
if (split_offset) {
sgl++;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 182aaae60386..edbd81c3b643 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -11270,6 +11270,30 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
}
}
+inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ /*
+ * Unlocking an irq is one of the entry point to check
+ * for re-schedule, but we are good for io submission
+ * path as midlayer does a get_cpu to glue us in. Flush
+ * out the invalidate queue so we can see the updated
+ * value for flag.
+ */
+ smp_rmb();
+
+ if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
+ /* We will not likely get the completion for the caller
+ * during this iteration but i guess that's fine.
+ * Future io's coming on this eq should be able to
+ * pick it up. As for the case of single io's, they
+ * will be handled through a sched from polling timer
+ * function which is currently triggered every 1msec.
+ */
+ lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
+}
+
/**
* lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
* @phba: Pointer to HBA context object.
@@ -11309,7 +11333,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
+ lpfc_sli4_poll_eq(eq);
} else {
/* For now, SLI2/3 will still use hbalock */
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -15625,12 +15649,11 @@ void lpfc_sli4_poll_hbtimer(struct timer_list *t)
{
struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
struct lpfc_queue *eq;
- int i = 0;
rcu_read_lock();
list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
- i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
+ lpfc_sli4_poll_eq(eq);
if (!list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
@@ -15638,33 +15661,6 @@ void lpfc_sli4_poll_hbtimer(struct timer_list *t)
rcu_read_unlock();
}
-inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
-{
- struct lpfc_hba *phba = eq->phba;
- int i = 0;
-
- /*
- * Unlocking an irq is one of the entry point to check
- * for re-schedule, but we are good for io submission
- * path as midlayer does a get_cpu to glue us in. Flush
- * out the invalidate queue so we can see the updated
- * value for flag.
- */
- smp_rmb();
-
- if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
- /* We will not likely get the completion for the caller
- * during this iteration but i guess that's fine.
- * Future io's coming on this eq should be able to
- * pick it up. As for the case of single io's, they
- * will be handled through a sched from polling timer
- * function which is currently triggered every 1msec.
- */
- i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
-
- return i;
-}
-
static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
{
struct lpfc_hba *phba = eq->phba;
@@ -20819,6 +20815,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
struct lpfc_mbx_wr_object *wr_object;
LPFC_MBOXQ_t *mbox;
int rc = 0, i = 0;
+ int mbox_status = 0;
uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
uint32_t shdr_change_status = 0, shdr_csf = 0;
uint32_t mbox_tmo;
@@ -20864,11 +20861,15 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
wr_object->u.request.bde_count = i;
bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
- rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
+
+ /* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */
+ rc = mbox_status;
+
/* The IOCTL status is embedded in the mailbox subheader. */
shdr_status = bf_get(lpfc_mbox_hdr_status,
&wr_object->header.cfg_shdr.response);
@@ -20883,10 +20884,6 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
&wr_object->u.response);
}
- if (!phba->sli4_hba.intr_enable)
- mempool_free(mbox, phba->mbox_mem_pool);
- else if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3025 Write Object mailbox failed with "
@@ -20904,6 +20901,12 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
shdr_add_status_2, shdr_change_status,
shdr_csf);
+
+ if (!phba->sli4_hba.intr_enable)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ else if (mbox_status != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+
return rc;
}
@@ -21276,7 +21279,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
+ lpfc_sli4_poll_eq(qp->hba_eq);
return 0;
}
@@ -21298,7 +21301,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
+ lpfc_sli4_poll_eq(qp->hba_eq);
return 0;
}
@@ -21328,7 +21331,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
+ lpfc_sli4_poll_eq(qp->hba_eq);
return 0;
}
return WQE_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index f927c2a25d54..3b62c4032c31 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -291,8 +291,9 @@ struct lpfc_sli4_link {
uint8_t type;
uint8_t number;
uint8_t fault;
- uint32_t logical_speed;
+ uint8_t link_status;
uint16_t topology;
+ uint32_t logical_speed;
};
struct lpfc_fcf_rec {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 41a1128f8651..0238208cdd11 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.9"
+#define LPFC_DRIVER_VERSION "14.2.0.10"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2022 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2023 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
index ed1d7f7b88a3..cf8ba840d0ea 100644
--- a/drivers/scsi/lpfc/lpfc_vmid.c
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -284,3 +284,42 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
}
return rc;
}
+
+/*
+ * lpfc_reinit_vmid - reinitializes the vmid data structure
+ * @vport: pointer to vport data structure
+ *
+ * This routine reinitializes the vmid post flogi completion
+ *
+ * Return codes
+ * None
+ */
+void
+lpfc_reinit_vmid(struct lpfc_vport *vport)
+{
+ u32 bucket, i, cpu;
+ struct lpfc_vmid *cur;
+ struct lpfc_vmid *vmp = NULL;
+ struct hlist_node *tmp;
+
+ write_lock(&vport->vmid_lock);
+ vport->cur_vmid_cnt = 0;
+
+ for (i = 0; i < vport->max_vmid; i++) {
+ vmp = &vport->vmid[i];
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ memset(vmp->host_vmid, 0, sizeof(vmp->host_vmid));
+ vmp->io_rd_cnt = 0;
+ vmp->io_wr_cnt = 0;
+
+ if (vmp->last_io_time)
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(vmp->last_io_time, cpu) = 0;
+ }
+
+ /* for all elements in the hash table */
+ if (!hash_empty(vport->hash_table))
+ hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode)
+ hash_del(&cur->hnode);
+ write_unlock(&vport->vmid_lock);
+}
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 4d171f5c213f..6c7559cf1a4b 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -534,7 +534,7 @@ disable_vport(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+ struct lpfc_nodelist *ndlp = NULL;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
/* Can't disable during an outstanding delete. */
@@ -546,17 +546,7 @@ disable_vport(struct fc_vport *fc_vport)
(void)lpfc_send_npiv_logo(vport, ndlp);
lpfc_sli_host_down(vport);
-
- /* Mark all nodes for discovery so we can remove them by
- * calling lpfc_cleanup_rpis(vport, 1)
- */
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
- continue;
- lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RECOVERY);
- }
- lpfc_cleanup_rpis(vport, 1);
+ lpfc_cleanup_rpis(vport, 0);
lpfc_stop_vport_timers(vport);
lpfc_unreg_all_rpis(vport);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index fe70f8f11435..6597e118c805 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3323,7 +3323,7 @@ static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
/* copy the io request frame as well as 8 SGEs data for r1 command*/
memcpy(r1_cmd->io_request, cmd->io_request,
(sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)));
- memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
+ memcpy(r1_cmd->io_request->SGLs, cmd->io_request->SGLs,
(fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
/*sense buffer is different for r1 command*/
r1_cmd->io_request->SenseBufferLowAddress =
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 49e9a9048ee7..b677d80e5874 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -526,7 +526,10 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
__le32 Control; /* 0x3C */
union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
union RAID_CONTEXT_UNION RaidContext; /* 0x60 */
- union MPI2_SGE_IO_UNION SGL; /* 0x80 */
+ union {
+ union MPI2_SGE_IO_UNION SGL; /* 0x80 */
+ DECLARE_FLEX_ARRAY(union MPI2_SGE_IO_UNION, SGLs);
+ };
};
/*
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 69061545d9d2..2ee9ea57554d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5849,6 +5849,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
+ kfree(ioc->pcie_sg_lookup);
+ ioc->pcie_sg_lookup = NULL;
+
if (ioc->config_page) {
dexitprintk(ioc,
ioc_info(ioc, "config_page(0x%p): free\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 0d8b1e942ded..efdb8178db32 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -1884,7 +1884,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
diag_register.requested_buffer_size>>10);
else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE]
& MPT3_DIAG_BUFFER_IS_REGISTERED) {
- ioc_err(ioc, "Trace buffer memory %d KB allocated\n",
+ ioc_info(ioc, "Trace buffer memory %d KB allocated\n",
diag_register.requested_buffer_size>>10);
if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
ioc->diag_buffer_status[
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index b3dcb8918618..60c65586f30e 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -1841,7 +1841,7 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
cmd->frame->request_id = mhba->io_seq++;
cmd->request_id = cmd->frame->request_id;
mhba->tag_cmd[cmd->frame->tag] = cmd;
- frame_len = sizeof(*ib_frame) - 4 +
+ frame_len = sizeof(*ib_frame) +
ib_frame->sg_counts * sizeof(struct mvumi_sgl);
if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
struct mvumi_dyn_list_entry *dle;
@@ -2387,7 +2387,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
struct Scsi_Host *host = mhba->shost;
struct scsi_device *sdev = NULL;
int ret;
- unsigned int max_sg = (mhba->ib_max_size + 4 -
+ unsigned int max_sg = (mhba->ib_max_size -
sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
host->irq = mhba->pdev->irq;
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
index a88c58787b68..1306a4abf19a 100644
--- a/drivers/scsi/mvumi.h
+++ b/drivers/scsi/mvumi.h
@@ -279,7 +279,7 @@ struct mvumi_msg_frame {
u16 request_id;
u16 reserved1;
u8 cdb[MAX_COMMAND_SIZE];
- u32 payload[1];
+ u32 payload[];
};
/*
@@ -294,7 +294,7 @@ struct mvumi_rsp_frame {
u8 req_status;
u8 rsp_flag; /* Indicates the type of Data_Payload.*/
u16 request_id;
- u32 payload[1];
+ u32 payload[];
};
struct mvumi_ob_data {
@@ -380,7 +380,7 @@ struct mvumi_hs_header {
u8 page_code;
u8 checksum;
u16 frame_length;
- u32 frame_content[1];
+ u32 frame_content[];
};
/*
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 73f036bed128..5c26a13ffbd2 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -61,10 +61,10 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev);
} else {
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev);
}
}
@@ -86,7 +86,7 @@ static ssize_t controller_fatal_error_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
pm8001_ha->controller_fatal_error);
}
static DEVICE_ATTR_RO(controller_fatal_error);
@@ -107,13 +107,13 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8),
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev));
} else {
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8),
@@ -138,7 +138,7 @@ static ssize_t pm8001_ctl_ila_version_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id != chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 8),
@@ -164,7 +164,7 @@ static ssize_t pm8001_ctl_inactive_fw_version_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id != chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 8),
@@ -191,10 +191,10 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io);
} else {
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io);
}
}
@@ -215,13 +215,11 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%04d\n",
- (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)
- );
+ return sysfs_emit(buf, "%04d\n",
+ (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16));
} else {
- return snprintf(buf, PAGE_SIZE, "%04d\n",
- (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)
- );
+ return sysfs_emit(buf, "%04d\n",
+ (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16));
}
}
static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
@@ -242,13 +240,11 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%04d\n",
- pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF
- );
+ return sysfs_emit(buf, "%04d\n",
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF);
} else {
- return snprintf(buf, PAGE_SIZE, "%04d\n",
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF
- );
+ return sysfs_emit(buf, "%04d\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF);
}
}
static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
@@ -315,7 +311,7 @@ static ssize_t pm8001_ctl_host_sas_address_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
- return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ return sysfs_emit(buf, "0x%016llx\n",
be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr));
}
static DEVICE_ATTR(host_sas_address, S_IRUGO,
@@ -336,7 +332,7 @@ static ssize_t pm8001_ctl_logging_level_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
- return snprintf(buf, PAGE_SIZE, "%08xh\n", pm8001_ha->logging_level);
+ return sysfs_emit(buf, "%08xh\n", pm8001_ha->logging_level);
}
static ssize_t pm8001_ctl_logging_level_store(struct device *cdev,
@@ -517,7 +513,7 @@ static ssize_t event_log_size_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
}
static DEVICE_ATTR_RO(event_log_size);
@@ -604,7 +600,7 @@ static ssize_t non_fatal_count_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
- return snprintf(buf, PAGE_SIZE, "%08x",
+ return sysfs_emit(buf, "%08x\n",
pm8001_ha->non_fatal_count);
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b67ad30d56e6..70cfc94c3d43 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2732,7 +2732,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
spin_lock_irqsave(host->host_lock, flags);
/* Confirm port has not reappeared before clearing pointers. */
if (rport->port_state != FC_PORTSTATE_ONLINE) {
- fcport->rport = fcport->drport = NULL;
+ fcport->rport = NULL;
*((fc_port_t **)rport->dd_data) = NULL;
}
spin_unlock_irqrestore(host->host_lock, flags);
@@ -3171,8 +3171,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
set_bit(VPORT_DELETE, &vha->dpc_flags);
- while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
- test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
+ while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
msleep(1000);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index cd75b179410d..dba7bba788d7 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -278,8 +278,8 @@ qla2x00_process_els(struct bsg_job *bsg_job)
const char *type;
int req_sg_cnt, rsp_sg_cnt;
int rval = (DID_ERROR << 16);
- uint16_t nextlid = 0;
uint32_t els_cmd = 0;
+ int qla_port_allocated = 0;
if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
rport = fc_bsg_to_rport(bsg_job);
@@ -329,9 +329,9 @@ qla2x00_process_els(struct bsg_job *bsg_job)
/* make sure the rport is logged in,
* if not perform fabric login
*/
- if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
+ if (atomic_read(&fcport->state) != FCS_ONLINE) {
ql_dbg(ql_dbg_user, vha, 0x7003,
- "Failed to login port %06X for ELS passthru.\n",
+ "Port %06X is not online for ELS passthru.\n",
fcport->d_id.b24);
rval = -EIO;
goto done;
@@ -348,6 +348,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
goto done;
}
+ qla_port_allocated = 1;
/* Initialize all required fields of fcport */
fcport->vha = vha;
fcport->d_id.b.al_pa =
@@ -432,7 +433,7 @@ done_unmap_sg:
goto done_free_fcport;
done_free_fcport:
- if (bsg_request->msgcode != FC_BSG_RPT_ELS)
+ if (qla_port_allocated)
qla2x00_free_fcport(fcport);
done:
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a26a373be9da..ec0e987b71fa 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -384,6 +384,13 @@ struct els_reject {
struct req_que;
struct qla_tgt_sess;
+struct qla_buf_dsc {
+ u16 tag;
+#define TAG_FREED 0xffff
+ void *buf;
+ dma_addr_t buf_dma;
+};
+
/*
* SCSI Request Block
*/
@@ -392,14 +399,16 @@ struct srb_cmd {
uint32_t request_sense_length;
uint32_t fw_sense_length;
uint8_t *request_sense_ptr;
- struct ct6_dsd *ct6_ctx;
struct crc_context *crc_ctx;
+ struct ct6_dsd ct6_ctx;
+ struct qla_buf_dsc buf_dsc;
};
/*
* SRB flag definitions
*/
#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */
+#define SRB_GOT_BUF BIT_1
#define SRB_FCP_CMND_DMA_VALID BIT_12 /* DIF: DSD List valid */
#define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */
#define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */
@@ -660,7 +669,7 @@ enum {
struct iocb_resource {
u8 res_type;
- u8 pad;
+ u8 exch_cnt;
u16 iocb_cnt;
};
@@ -2485,7 +2494,6 @@ struct ct_sns_desc {
enum discovery_state {
DSC_DELETED,
- DSC_GNN_ID,
DSC_GNL,
DSC_LOGIN_PEND,
DSC_LOGIN_FAILED,
@@ -2596,7 +2604,7 @@ typedef struct fc_port {
int login_retry;
- struct fc_rport *rport, *drport;
+ struct fc_rport *rport;
u32 supported_classes;
uint8_t fc4_type;
@@ -2699,7 +2707,6 @@ extern const char *const port_state_str[5];
static const char *const port_dstate_str[] = {
[DSC_DELETED] = "DELETED",
- [DSC_GNN_ID] = "GNN_ID",
[DSC_GNL] = "GNL",
[DSC_LOGIN_PEND] = "LOGIN_PEND",
[DSC_LOGIN_FAILED] = "LOGIN_FAILED",
@@ -3462,6 +3469,7 @@ struct qla_msix_entry {
int have_irq;
int in_use;
uint32_t vector;
+ uint32_t vector_base0;
uint16_t entry;
char name[30];
void *handle;
@@ -3479,7 +3487,6 @@ enum qla_work_type {
QLA_EVT_ASYNC_ADISC,
QLA_EVT_UEVENT,
QLA_EVT_AENFX,
- QLA_EVT_GPNID,
QLA_EVT_UNMAP,
QLA_EVT_NEW_SESS,
QLA_EVT_GPDB,
@@ -3493,7 +3500,6 @@ enum qla_work_type {
QLA_EVT_GPNFT,
QLA_EVT_GPNFT_DONE,
QLA_EVT_GNNFT_DONE,
- QLA_EVT_GNNID,
QLA_EVT_GFPNID,
QLA_EVT_SP_RETRY,
QLA_EVT_IIDMA,
@@ -3536,15 +3542,12 @@ struct qla_work_evt {
} iosb;
struct {
port_id_t id;
- } gpnid;
- struct {
- port_id_t id;
u8 port_name[8];
u8 node_name[8];
void *pla;
u8 fc4_type;
} new_sess;
- struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */
+ struct { /*Get PDB, Get Speed, update fcport, gnl */
fc_port_t *fcport;
u8 opt;
} fcport;
@@ -3721,10 +3724,27 @@ struct qla_fw_resources {
u16 iocbs_limit;
u16 iocbs_qp_limit;
u16 iocbs_used;
+ u16 exch_total;
+ u16 exch_limit;
+ u16 exch_used;
+ u16 pad;
};
#define QLA_IOCB_PCT_LIMIT 95
+struct qla_buf_pool {
+ u16 num_bufs;
+ u16 num_active;
+ u16 max_used;
+ u16 num_alloc;
+ u16 prev_max;
+ u16 pad;
+ uint32_t take_snapshot:1;
+ unsigned long *buf_map;
+ void **buf_array;
+ dma_addr_t *dma_array;
+};
+
/*Queue pair data structure */
struct qla_qpair {
spinlock_t qp_lock;
@@ -3778,6 +3798,7 @@ struct qla_qpair {
struct qla_tgt_counters tgt_counters;
uint16_t cpuid;
struct qla_fw_resources fwres ____cacheline_aligned;
+ struct qla_buf_pool buf_pool;
u32 cmd_cnt;
u32 cmd_completion_cnt;
u32 prev_completion_cnt;
@@ -3938,7 +3959,6 @@ struct qlt_hw_data {
__le32 __iomem *atio_q_out;
const struct qla_tgt_func_tmpl *tgt_ops;
- struct qla_tgt_vp_map *tgt_vp_map;
int saved_set;
__le16 saved_exchange_count;
@@ -4106,6 +4126,7 @@ struct qla_hw_data {
struct req_que **req_q_map;
struct rsp_que **rsp_q_map;
struct qla_qpair **queue_pair_map;
+ struct qla_qpair **qp_cpu_map;
unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8)
@@ -4762,6 +4783,7 @@ struct qla_hw_data {
spinlock_t sadb_lock; /* protects list */
struct els_reject elsrej;
u8 edif_post_stop_cnt_down;
+ struct qla_vp_map *vp_map;
};
#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
@@ -4857,6 +4879,7 @@ typedef struct scsi_qla_host {
#define LOOP_READY 5
#define LOOP_DEAD 6
+ unsigned long buf_expired;
unsigned long relogin_jif;
unsigned long dpc_flags;
#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
@@ -4872,7 +4895,6 @@ typedef struct scsi_qla_host {
#define ISP_ABORT_RETRY 10 /* ISP aborted. */
#define BEACON_BLINK_NEEDED 11
#define REGISTER_FDMI_NEEDED 12
-#define FCPORT_UPDATE_NEEDED 13
#define VP_DPC_NEEDED 14 /* wake up for VP dpc handling */
#define UNLOADING 15
#define NPIV_CONFIG_NEEDED 16
@@ -5022,7 +5044,6 @@ typedef struct scsi_qla_host {
uint8_t n2n_port_name[WWN_SIZE];
uint16_t n2n_id;
__le16 dport_data[4];
- struct list_head gpnid_list;
struct fab_scan scan;
uint8_t scm_fabric_connection_flags;
@@ -5064,7 +5085,7 @@ struct qla27xx_image_status {
#define SET_AL_PA 2
#define RESET_VP_IDX 3
#define RESET_AL_PA 4
-struct qla_tgt_vp_map {
+struct qla_vp_map {
uint8_t idx;
scsi_qla_host_t *vha;
};
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 777808af5634..1925cc6897b6 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -235,7 +235,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
uint16_t mb[MAX_IOCB_MB_REG];
int rc;
struct qla_hw_data *ha = vha->hw;
- u16 iocbs_used, i;
+ u16 iocbs_used, i, exch_used;
rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
if (rc != QLA_SUCCESS) {
@@ -263,13 +263,19 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
if (ql2xenforce_iocb_limit) {
/* lock is not require. It's an estimate. */
iocbs_used = ha->base_qpair->fwres.iocbs_used;
+ exch_used = ha->base_qpair->fwres.exch_used;
for (i = 0; i < ha->max_qpairs; i++) {
- if (ha->queue_pair_map[i])
+ if (ha->queue_pair_map[i]) {
iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
+ exch_used += ha->queue_pair_map[i]->fwres.exch_used;
+ }
}
seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
iocbs_used, ha->base_qpair->fwres.iocbs_limit);
+
+ seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
+ exch_used, ha->base_qpair->fwres.exch_limit);
}
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index e4240aae5f9e..ec0e20255bd3 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -480,6 +480,49 @@ void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport)
}
/**
+ * qla_delete_n2n_sess_and_wait: search for N2N session, tear it down and
+ * wait for tear down to complete. In N2N topology, there is only one
+ * session being active in tracking the remote device.
+ * @vha: host adapter pointer
+ * return code: 0 - found the session and completed the tear down.
+ * 1 - timeout occurred. Caller to use link bounce to reset.
+ */
+static int qla_delete_n2n_sess_and_wait(scsi_qla_host_t *vha)
+{
+ struct fc_port *fcport;
+ int rc = -EIO;
+ ulong expire = jiffies + 23 * HZ;
+
+ if (!N2N_TOPO(vha->hw))
+ return 0;
+
+ fcport = NULL;
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (!fcport->n2n_flag)
+ continue;
+
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2016,
+ "%s reset sess at app start \n", __func__);
+
+ qla_edif_sa_ctl_init(vha, fcport);
+ qlt_schedule_sess_for_deletion(fcport);
+
+ while (time_before_eq(jiffies, expire)) {
+ if (fcport->disc_state != DSC_DELETE_PEND) {
+ rc = 0;
+ break;
+ }
+ msleep(1);
+ }
+
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+
+ return rc;
+}
+
+/**
* qla_edif_app_start: application has announce its present
* @vha: host adapter pointer
* @bsg_job: user request
@@ -518,18 +561,17 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
fcport->n2n_link_reset_cnt = 0;
if (vha->hw->flags.n2n_fw_acc_sec) {
- list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
- qla_edif_sa_ctl_init(vha, fcport);
-
+ bool link_bounce = false;
/*
* While authentication app was not running, remote device
* could still try to login with this local port. Let's
- * clear the state and try again.
+ * reset the session, reconnect and re-authenticate.
*/
- qla2x00_wait_for_sess_deletion(vha);
+ if (qla_delete_n2n_sess_and_wait(vha))
+ link_bounce = true;
- /* bounce the link to get the other guy to relogin */
- if (!vha->hw->flags.n2n_bigger) {
+ /* bounce the link to start login */
+ if (!vha->hw->flags.n2n_bigger || link_bounce) {
set_bit(N2N_LINK_RESET, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
@@ -925,7 +967,9 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (!(fcport->flags & FCF_FCSP_DEVICE))
continue;
- tdid = app_req.remote_pid;
+ tdid.b.domain = app_req.remote_pid.domain;
+ tdid.b.area = app_req.remote_pid.area;
+ tdid.b.al_pa = app_req.remote_pid.al_pa;
ql_dbg(ql_dbg_edif, vha, 0x2058,
"APP request entry - portid=%06x.\n", tdid.b24);
@@ -2989,9 +3033,10 @@ qla28xx_start_scsi_edif(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- sp->iores.res_type = RESOURCE_INI;
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
sp->iores.iocb_cnt = req_cnt;
- if (qla_get_iocbs(sp->qpair, &sp->iores))
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
@@ -3006,26 +3051,16 @@ qla28xx_start_scsi_edif(srb_t *sp)
goto queuing_error;
}
- ctx = sp->u.scmd.ct6_ctx =
- mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
- if (!ctx) {
- ql_log(ql_log_fatal, vha, 0x3010,
- "Failed to allocate ctx for cmd=%p.\n", cmd);
- goto queuing_error;
- }
-
- memset(ctx, 0, sizeof(struct ct6_dsd));
- ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
- GFP_ATOMIC, &ctx->fcp_cmnd_dma);
- if (!ctx->fcp_cmnd) {
+ if (qla_get_buf(vha, sp->qpair, &sp->u.scmd.buf_dsc)) {
ql_log(ql_log_fatal, vha, 0x3011,
- "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ "Failed to allocate buf for fcp_cmnd for cmd=%p.\n", cmd);
goto queuing_error;
}
- /* Initialize the DSD list and dma handle */
- INIT_LIST_HEAD(&ctx->dsd_list);
- ctx->dsd_use_cnt = 0;
+ sp->flags |= SRB_GOT_BUF;
+ ctx = &sp->u.scmd.ct6_ctx;
+ ctx->fcp_cmnd = sp->u.scmd.buf_dsc.buf;
+ ctx->fcp_cmnd_dma = sp->u.scmd.buf_dsc.buf_dma;
if (cmd->cmd_len > 16) {
additional_cdb_len = cmd->cmd_len - 16;
@@ -3144,7 +3179,6 @@ no_dsds:
cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address);
- sp->flags |= SRB_FCP_CMND_DMA_VALID;
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
@@ -3176,16 +3210,12 @@ no_dsds:
return QLA_SUCCESS;
queuing_error_fcp_cmnd:
- dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- if (sp->u.scmd.ct6_ctx) {
- mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
- sp->u.scmd.ct6_ctx = NULL;
- }
- qla_put_iocbs(sp->qpair, &sp->iores);
+ qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(lock, flags);
return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h
index 7cdb89ccdc6e..aa566cdb77e5 100644
--- a/drivers/scsi/qla2xxx/qla_edif.h
+++ b/drivers/scsi/qla2xxx/qla_edif.h
@@ -145,4 +145,6 @@ struct enode {
(qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \
_s->disc_state == DSC_DELETED))
+#define EDIF_CAP(_ha) (ql2xsecenable && IS_QLA28XX(_ha))
+
#endif /* __QLA_EDIF_H */
diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
index 0931f4e4e127..514c265ba86e 100644
--- a/drivers/scsi/qla2xxx/qla_edif_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
@@ -89,7 +89,20 @@ struct app_plogi_reply {
struct app_pinfo_req {
struct app_id app_info;
uint8_t num_ports;
- port_id_t remote_pid;
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t domain;
+ uint8_t area;
+ uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+ uint8_t rsvd_1;
+ } remote_pid;
uint8_t version;
uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e3256e721be1..9142df876c73 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -257,6 +257,7 @@ struct edif_sa_ctl *qla_edif_find_sa_ctl_by_index(fc_port_t *fcport,
/*
* Global Functions in qla_mid.c source file.
*/
+extern void qla_update_vp_map(struct scsi_qla_host *, int);
extern struct scsi_host_template qla2xxx_driver_template;
extern struct scsi_transport_template *qla2xxx_transport_vport_template;
extern void qla2x00_timer(struct timer_list *);
@@ -292,6 +293,7 @@ extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
extern void qla2x00_async_event(scsi_qla_host_t *, struct rsp_que *,
uint16_t *);
extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
+void qla_adjust_buf(struct scsi_qla_host *);
/*
* Global Function Prototypes in qla_iocb.c source file.
@@ -721,10 +723,6 @@ extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
struct ct_sns_rsp *, const char *);
extern void qla2x00_async_iocb_timeout(void *data);
-extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
-extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
-void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
-
int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
@@ -734,9 +732,6 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool);
int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *);
void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
-int qla24xx_async_gnnid(scsi_qla_host_t *, fc_port_t *);
-void qla24xx_handle_gnnid_event(scsi_qla_host_t *, struct event_arg *);
-int qla24xx_post_gnnid_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
@@ -962,7 +957,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
void qla24xx_delete_sess_fn(struct work_struct *);
void qlt_unknown_atio_work_fn(struct work_struct *);
-void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
+void qla_update_host_map(struct scsi_qla_host *, port_id_t);
void qla_remove_hostmap(struct qla_hw_data *ha);
void qlt_clr_qp_table(struct scsi_qla_host *vha);
void qlt_set_mode(struct scsi_qla_host *);
@@ -975,6 +970,8 @@ extern void qla_nvme_abort_set_option
(struct abort_entry_24xx *abt, srb_t *sp);
extern void qla_nvme_abort_process_comp_status
(struct abort_entry_24xx *abt, srb_t *sp);
+struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha,
+ uint16_t vp_idx);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
@@ -1019,5 +1016,8 @@ int qla2xxx_enable_port(struct Scsi_Host *shost);
uint64_t qla2x00_get_num_tgts(scsi_qla_host_t *vha);
uint64_t qla2x00_count_set_bits(u32 num);
-
+int qla_create_buf_pool(struct scsi_qla_host *, struct qla_qpair *);
+void qla_free_buf_pool(struct qla_qpair *);
+int qla_get_buf(struct scsi_qla_host *, struct qla_qpair *, struct qla_buf_dsc *);
+void qla_put_buf(struct qla_qpair *, struct qla_buf_dsc *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 64ab070b8716..4738f8935f7f 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2949,22 +2949,6 @@ done:
return rval;
}
-int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
-{
- struct qla_work_evt *e;
-
- if (test_bit(UNLOADING, &vha->dpc_flags) ||
- (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)))
- return 0;
-
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
- if (!e)
- return QLA_FUNCTION_FAILED;
-
- e->u.gpnid.id = *id;
- return qla2x00_post_work(vha, e);
-}
-
void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
{
struct srb_iocb *c = &sp->u.iocb_cmd;
@@ -2997,287 +2981,6 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
-void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- fc_port_t *fcport, *conflict, *t;
- u16 data[2];
-
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d port_id: %06x\n",
- __func__, __LINE__, ea->id.b24);
-
- if (ea->rc) {
- /* cable is disconnected */
- list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
- if (fcport->d_id.b24 == ea->id.b24)
- fcport->scan_state = QLA_FCPORT_SCAN;
-
- qlt_schedule_sess_for_deletion(fcport);
- }
- } else {
- /* cable is connected */
- fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
- if (fcport) {
- list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
- list) {
- if ((conflict->d_id.b24 == ea->id.b24) &&
- (fcport != conflict))
- /*
- * 2 fcports with conflict Nport ID or
- * an existing fcport is having nport ID
- * conflict with new fcport.
- */
-
- conflict->scan_state = QLA_FCPORT_SCAN;
-
- qlt_schedule_sess_for_deletion(conflict);
- }
-
- fcport->scan_needed = 0;
- fcport->rscn_gen++;
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->flags |= FCF_FABRIC_DEVICE;
- if (fcport->login_retry == 0) {
- fcport->login_retry =
- vha->hw->login_retry_count;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
- fcport->port_name, fcport->loop_id,
- fcport->login_retry);
- }
- switch (fcport->disc_state) {
- case DSC_LOGIN_COMPLETE:
- /* recheck session is still intact. */
- ql_dbg(ql_dbg_disc, vha, 0x210d,
- "%s %d %8phC revalidate session with ADISC\n",
- __func__, __LINE__, fcport->port_name);
- data[0] = data[1] = 0;
- qla2x00_post_async_adisc_work(vha, fcport,
- data);
- break;
- case DSC_DELETED:
- ql_dbg(ql_dbg_disc, vha, 0x210d,
- "%s %d %8phC login\n", __func__, __LINE__,
- fcport->port_name);
- fcport->d_id = ea->id;
- qla24xx_fcport_handle_login(vha, fcport);
- break;
- case DSC_DELETE_PEND:
- fcport->d_id = ea->id;
- break;
- default:
- fcport->d_id = ea->id;
- break;
- }
- } else {
- list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
- list) {
- if (conflict->d_id.b24 == ea->id.b24) {
- /* 2 fcports with conflict Nport ID or
- * an existing fcport is having nport ID
- * conflict with new fcport.
- */
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC DS %d\n",
- __func__, __LINE__,
- conflict->port_name,
- conflict->disc_state);
-
- conflict->scan_state = QLA_FCPORT_SCAN;
- qlt_schedule_sess_for_deletion(conflict);
- }
- }
-
- /* create new fcport */
- ql_dbg(ql_dbg_disc, vha, 0x2065,
- "%s %d %8phC post new sess\n",
- __func__, __LINE__, ea->port_name);
- qla24xx_post_newsess_work(vha, &ea->id,
- ea->port_name, NULL, NULL, 0);
- }
- }
-}
-
-static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
-{
- struct scsi_qla_host *vha = sp->vha;
- struct ct_sns_req *ct_req =
- (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
- struct ct_sns_rsp *ct_rsp =
- (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
- struct event_arg ea;
- struct qla_work_evt *e;
- unsigned long flags;
-
- if (res)
- ql_dbg(ql_dbg_disc, vha, 0x2066,
- "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
- sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
- ct_rsp->rsp.gpn_id.port_name);
- else
- ql_dbg(ql_dbg_disc, vha, 0x2066,
- "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
- sp->name, sp->gen1, &ct_req->req.port_id.port_id,
- ct_rsp->rsp.gpn_id.port_name);
-
- memset(&ea, 0, sizeof(ea));
- memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
- ea.sp = sp;
- ea.id = be_to_port_id(ct_req->req.port_id.port_id);
- ea.rc = res;
-
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- list_del(&sp->elem);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
-
- if (res) {
- if (res == QLA_FUNCTION_TIMEOUT) {
- qla24xx_post_gpnid_work(sp->vha, &ea.id);
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
- return;
- }
- } else if (sp->gen1) {
- /* There was another RSCN for this Nport ID */
- qla24xx_post_gpnid_work(sp->vha, &ea.id);
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
- return;
- }
-
- qla24xx_handle_gpnid_event(vha, &ea);
-
- e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
- if (!e) {
- /* please ignore kernel warning. otherwise, we have mem leak. */
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.req_allocated_size,
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
-
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
-
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
- return;
- }
-
- e->u.iosb.sp = sp;
- qla2x00_post_work(vha, e);
-}
-
-/* Get WWPN with Nport ID. */
-int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
-{
- int rval = QLA_FUNCTION_FAILED;
- struct ct_sns_req *ct_req;
- srb_t *sp, *tsp;
- struct ct_sns_pkt *ct_sns;
- unsigned long flags;
-
- if (!vha->flags.online)
- goto done;
-
- /* ref: INIT */
- sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
- if (!sp)
- goto done;
-
- sp->type = SRB_CT_PTHRU_CMD;
- sp->name = "gpnid";
- sp->u.iocb_cmd.u.ctarg.id = *id;
- sp->gen1 = 0;
- qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
- qla2x00_async_gpnid_sp_done);
-
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- list_for_each_entry(tsp, &vha->gpnid_list, elem) {
- if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
- tsp->gen1++;
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
- goto done;
- }
- }
- list_add_tail(&sp->elem, &vha->gpnid_list);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
-
- sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
- GFP_KERNEL);
- sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
- if (!sp->u.iocb_cmd.u.ctarg.req) {
- ql_log(ql_log_warn, vha, 0xd041,
- "Failed to allocate ct_sns request.\n");
- goto done_free_sp;
- }
-
- sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
- GFP_KERNEL);
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
- if (!sp->u.iocb_cmd.u.ctarg.rsp) {
- ql_log(ql_log_warn, vha, 0xd042,
- "Failed to allocate ct_sns request.\n");
- goto done_free_sp;
- }
-
- ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
- memset(ct_sns, 0, sizeof(*ct_sns));
-
- ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
- /* CT_IU preamble */
- ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
-
- /* GPN_ID req */
- ct_req->req.port_id.port_id = port_id_to_be_id(*id);
-
- sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
- sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
- sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
-
- ql_dbg(ql_dbg_disc, vha, 0x2067,
- "Async-%s hdl=%x ID %3phC.\n", sp->name,
- sp->handle, &ct_req->req.port_id.port_id);
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
- return rval;
-
-done_free_sp:
- spin_lock_irqsave(&vha->hw->vport_slock, flags);
- list_del(&sp->elem);
- spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
-
- if (sp->u.iocb_cmd.u.ctarg.req) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
- }
- if (sp->u.iocb_cmd.u.ctarg.rsp) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
- }
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
-done:
- return rval;
-}
-
-
void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
{
struct scsi_qla_host *vha = sp->vha;
@@ -4190,116 +3893,6 @@ void qla_scan_work_fn(struct work_struct *work)
spin_unlock_irqrestore(&vha->work_lock, flags);
}
-/* GNN_ID */
-void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- qla24xx_post_gnl_work(vha, ea->fcport);
-}
-
-static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
-{
- struct scsi_qla_host *vha = sp->vha;
- fc_port_t *fcport = sp->fcport;
- u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
- struct event_arg ea;
- u64 wwnn;
-
- fcport->flags &= ~FCF_ASYNC_SENT;
- wwnn = wwn_to_u64(node_name);
- if (wwnn)
- memcpy(fcport->node_name, node_name, WWN_SIZE);
-
- memset(&ea, 0, sizeof(ea));
- ea.fcport = fcport;
- ea.sp = sp;
- ea.rc = res;
-
- ql_dbg(ql_dbg_disc, vha, 0x204f,
- "Async done-%s res %x, WWPN %8phC %8phC\n",
- sp->name, res, fcport->port_name, fcport->node_name);
-
- qla24xx_handle_gnnid_event(vha, &ea);
-
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
-}
-
-int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
-{
- int rval = QLA_FUNCTION_FAILED;
- struct ct_sns_req *ct_req;
- srb_t *sp;
-
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
- return rval;
-
- qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
- /* ref: INIT */
- sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
- if (!sp)
- goto done;
-
- fcport->flags |= FCF_ASYNC_SENT;
- sp->type = SRB_CT_PTHRU_CMD;
- sp->name = "gnnid";
- sp->gen1 = fcport->rscn_gen;
- sp->gen2 = fcport->login_gen;
- qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
- qla2x00_async_gnnid_sp_done);
-
- /* CT_IU preamble */
- ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
- GNN_ID_RSP_SIZE);
-
- /* GNN_ID req */
- ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
-
-
- /* req & rsp use the same buffer */
- sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
- sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
- sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
-
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
- sp->name, fcport->port_name,
- sp->handle, fcport->loop_id, fcport->d_id.b24);
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
- return rval;
-
-done_free_sp:
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
- fcport->flags &= ~FCF_ASYNC_SENT;
-done:
- return rval;
-}
-
-int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
-{
- struct qla_work_evt *e;
- int ls;
-
- ls = atomic_read(&vha->loop_state);
- if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
- test_bit(UNLOADING, &vha->dpc_flags))
- return 0;
-
- e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
- if (!e)
- return QLA_FUNCTION_FAILED;
-
- e->u.fcport.fcport = fcport;
- return qla2x00_post_work(vha, e);
-}
-
/* GPFN_ID */
void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 8d9ecabb1aac..1dbc1496ebed 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -128,12 +128,14 @@ static void qla24xx_abort_iocb_timeout(void *data)
sp->cmd_sp)) {
qpair->req->outstanding_cmds[handle] = NULL;
cmdsp_found = 1;
+ qla_put_fw_resources(qpair, &sp->cmd_sp->iores);
}
/* removing the abort */
if (qpair->req->outstanding_cmds[handle] == sp) {
qpair->req->outstanding_cmds[handle] = NULL;
sp_found = 1;
+ qla_put_fw_resources(qpair, &sp->iores);
break;
}
}
@@ -388,6 +390,12 @@ done_free_sp:
fcport->flags &= ~FCF_ASYNC_SENT;
done:
fcport->flags &= ~FCF_ASYNC_ACTIVE;
+
+ /*
+ * async login failed. Could be due to iocb/exchange resource
+ * being low. Set state DELETED for re-login process to start again.
+ */
+ qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
return rval;
}
@@ -1646,7 +1654,6 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
{
u16 data[2];
- u64 wwn;
u16 sec;
ql_dbg(ql_dbg_disc, vha, 0x20d8,
@@ -1686,7 +1693,6 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
switch (fcport->disc_state) {
case DSC_DELETED:
- wwn = wwn_to_u64(fcport->node_name);
switch (vha->hw->current_topology) {
case ISP_CFG_N:
if (fcport_is_smaller(fcport)) {
@@ -1710,12 +1716,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
}
break;
default:
- if (wwn == 0) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post GNNID\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gnnid_work(vha, fcport);
- } else if (fcport->loop_id == FC_NO_LOOP_ID) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
ql_dbg(ql_dbg_disc, vha, 0x20bd,
"%s %d %8phC post gnl\n",
__func__, __LINE__, fcport->port_name);
@@ -2000,6 +2001,7 @@ qla2x00_tmf_iocb_timeout(void *data)
for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
if (sp->qpair->req->outstanding_cmds[h] == sp) {
sp->qpair->req->outstanding_cmds[h] = NULL;
+ qla_put_fw_resources(sp->qpair, &sp->iores);
break;
}
}
@@ -2073,7 +2075,6 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
done_free_sp:
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
- fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
}
@@ -2315,7 +2316,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ea->fcport->login_pause = 1;
ql_dbg(ql_dbg_disc, vha, 0x20ed,
- "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
+ "%s %d %8phC NPortId %06x inuse with loopid 0x%x.\n",
__func__, __LINE__, ea->fcport->port_name,
ea->fcport->d_id.b24, lid);
} else {
@@ -3943,6 +3944,12 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
ha->base_qpair->fwres.iocbs_limit = limit;
ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
ha->base_qpair->fwres.iocbs_used = 0;
+
+ ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count;
+ ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
+ QLA_IOCB_PCT_LIMIT) / 100;
+ ha->base_qpair->fwres.exch_used = 0;
+
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i]) {
ha->queue_pair_map[i]->fwres.iocbs_total =
@@ -3951,6 +3958,10 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
limit / num_qps;
ha->queue_pair_map[i]->fwres.iocbs_used = 0;
+ ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count;
+ ha->queue_pair_map[i]->fwres.exch_limit =
+ (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
+ ha->queue_pair_map[i]->fwres.exch_used = 0;
}
}
}
@@ -4809,9 +4820,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (vha->hw->flags.edif_enabled) {
if (topo != 2)
- qlt_update_host_map(vha, id);
+ qla_update_host_map(vha, id);
} else if (!(topo == 2 && ha->flags.n2n_bigger))
- qlt_update_host_map(vha, id);
+ qla_update_host_map(vha, id);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!vha->flags.init_done)
@@ -5206,27 +5217,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
return (rval);
}
-static void
-qla2x00_rport_del(void *data)
-{
- fc_port_t *fcport = data;
- struct fc_rport *rport;
- unsigned long flags;
-
- spin_lock_irqsave(fcport->vha->host->host_lock, flags);
- rport = fcport->drport ? fcport->drport : fcport->rport;
- fcport->drport = NULL;
- spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
- if (rport) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
- "%s %8phN. rport %p roles %x\n",
- __func__, fcport->port_name, rport,
- rport->roles);
-
- fc_remote_port_delete(rport);
- }
-}
-
void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
{
int old_state;
@@ -6743,33 +6733,6 @@ int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
return rval;
}
-void
-qla2x00_update_fcports(scsi_qla_host_t *base_vha)
-{
- fc_port_t *fcport;
- struct scsi_qla_host *vha, *tvp;
- struct qla_hw_data *ha = base_vha->hw;
- unsigned long flags;
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- /* Go with deferred removal of rport references. */
- list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) {
- atomic_inc(&vha->vref_count);
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->drport &&
- atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
- spin_unlock_irqrestore(&ha->vport_slock, flags);
- qla2x00_rport_del(fcport);
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- }
- }
- atomic_dec(&vha->vref_count);
- wake_up(&vha->vref_waitq);
- }
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-}
-
/* Assumes idc_lock always held on entry */
void
qla83xx_reset_ownership(scsi_qla_host_t *vha)
@@ -9461,8 +9424,6 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
qpair->req = ha->req_q_map[req_id];
qpair->rsp->req = qpair->req;
qpair->rsp->qpair = qpair;
- /* init qpair to this cpu. Will adjust at run time. */
- qla_cpu_update(qpair, raw_smp_processor_id());
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4)
@@ -9477,6 +9438,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
goto fail_mempool;
}
+ if (qla_create_buf_pool(vha, qpair)) {
+ ql_log(ql_log_warn, vha, 0xd036,
+ "Failed to initialize buf pool for qpair %d\n",
+ qpair->id);
+ goto fail_bufpool;
+ }
+
/* Mark as online */
qpair->online = 1;
@@ -9492,7 +9460,10 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
}
return qpair;
+fail_bufpool:
+ mempool_destroy(qpair->srb_mempool);
fail_mempool:
+ qla25xx_delete_req_que(vha, qpair->req);
fail_req:
qla25xx_delete_rsp_que(vha, qpair->rsp);
fail_rsp:
@@ -9518,6 +9489,8 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
qpair->delete_in_progress = 1;
+ qla_free_buf_pool(qpair);
+
ret = qla25xx_delete_req_que(vha, qpair->req);
if (ret != QLA_SUCCESS)
goto fail;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 5185dc5daf80..cce6e425c121 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -380,24 +380,26 @@ qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
enum {
RESOURCE_NONE,
- RESOURCE_INI,
+ RESOURCE_IOCB = BIT_0,
+ RESOURCE_EXCH = BIT_1, /* exchange */
+ RESOURCE_FORCE = BIT_2,
};
static inline int
-qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
+qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
u16 iocbs_used, i;
+ u16 exch_used;
struct qla_hw_data *ha = qp->vha->hw;
if (!ql2xenforce_iocb_limit) {
iores->res_type = RESOURCE_NONE;
return 0;
}
+ if (iores->res_type & RESOURCE_FORCE)
+ goto force;
- if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
- qp->fwres.iocbs_used += iores->iocb_cnt;
- return 0;
- } else {
+ if ((iores->iocb_cnt + qp->fwres.iocbs_used) >= qp->fwres.iocbs_qp_limit) {
/* no need to acquire qpair lock. It's just rough calculation */
iocbs_used = ha->base_qpair->fwres.iocbs_used;
for (i = 0; i < ha->max_qpairs; i++) {
@@ -405,30 +407,49 @@ qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
}
- if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
- qp->fwres.iocbs_used += iores->iocb_cnt;
- return 0;
- } else {
+ if ((iores->iocb_cnt + iocbs_used) >= qp->fwres.iocbs_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+ }
+
+ if (iores->res_type & RESOURCE_EXCH) {
+ exch_used = ha->base_qpair->fwres.exch_used;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ exch_used += ha->queue_pair_map[i]->fwres.exch_used;
+ }
+
+ if ((exch_used + iores->exch_cnt) >= qp->fwres.exch_limit) {
iores->res_type = RESOURCE_NONE;
return -ENOSPC;
}
}
+force:
+ qp->fwres.iocbs_used += iores->iocb_cnt;
+ qp->fwres.exch_used += iores->exch_cnt;
+ return 0;
}
static inline void
-qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
+qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
- switch (iores->res_type) {
- case RESOURCE_NONE:
- break;
- default:
+ if (iores->res_type & RESOURCE_IOCB) {
if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
qp->fwres.iocbs_used -= iores->iocb_cnt;
} else {
- // should not happen
+ /* should not happen */
qp->fwres.iocbs_used = 0;
}
- break;
+ }
+
+ if (iores->res_type & RESOURCE_EXCH) {
+ if (qp->fwres.exch_used >= iores->exch_cnt) {
+ qp->fwres.exch_used -= iores->exch_cnt;
+ } else {
+ /* should not happen */
+ qp->fwres.exch_used = 0;
+ }
}
iores->res_type = RESOURCE_NONE;
}
@@ -494,3 +515,58 @@ fcport_is_bigger(fc_port_t *fcport)
{
return !fcport_is_smaller(fcport);
}
+
+static inline struct qla_qpair *
+qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair)
+{
+ int cpuid = smp_processor_id();
+
+ if (qpair->cpuid != cpuid &&
+ ha->qp_cpu_map[cpuid]) {
+ qpair = ha->qp_cpu_map[cpuid];
+ }
+ return qpair;
+}
+
+static inline void
+qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha,
+ struct qla_msix_entry *msix,
+ struct qla_qpair *qpair)
+{
+ const struct cpumask *mask;
+ unsigned int cpu;
+
+ if (!ha->qp_cpu_map)
+ return;
+ mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0);
+ qpair->cpuid = cpumask_first(mask);
+ for_each_cpu(cpu, mask) {
+ ha->qp_cpu_map[cpu] = qpair;
+ }
+ msix->cpuid = qpair->cpuid;
+}
+
+static inline void
+qla_mapq_free_qp_cpu_map(struct qla_hw_data *ha)
+{
+ if (ha->qp_cpu_map) {
+ kfree(ha->qp_cpu_map);
+ ha->qp_cpu_map = NULL;
+ }
+}
+
+static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ if (!ha->qp_cpu_map) {
+ ha->qp_cpu_map = kcalloc(NR_CPUS, sizeof(struct qla_qpair *),
+ GFP_KERNEL);
+ if (!ha->qp_cpu_map) {
+ ql_log(ql_log_fatal, vha, 0x0180,
+ "Unable to allocate memory for qp_cpu_map ptrs.\n");
+ return -1;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 42ce4e1fe744..b9b3e6f80ea9 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -623,7 +623,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
}
cur_seg = scsi_sglist(cmd);
- ctx = sp->u.scmd.ct6_ctx;
+ ctx = &sp->u.scmd.ct6_ctx;
while (tot_dsds) {
avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@@ -1589,9 +1589,10 @@ qla24xx_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- sp->iores.res_type = RESOURCE_INI;
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
sp->iores.iocb_cnt = req_cnt;
- if (qla_get_iocbs(sp->qpair, &sp->iores))
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
@@ -1678,7 +1679,7 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- qla_put_iocbs(sp->qpair, &sp->iores);
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -1793,9 +1794,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
tot_prot_dsds = nseg;
tot_dsds += nseg;
- sp->iores.res_type = RESOURCE_INI;
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- if (qla_get_iocbs(sp->qpair, &sp->iores))
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
@@ -1883,7 +1885,7 @@ queuing_error:
}
/* Cleanup will be performed by the caller (queuecommand) */
- qla_put_iocbs(sp->qpair, &sp->iores);
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -1952,9 +1954,10 @@ qla2xxx_start_scsi_mq(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- sp->iores.res_type = RESOURCE_INI;
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
sp->iores.iocb_cnt = req_cnt;
- if (qla_get_iocbs(sp->qpair, &sp->iores))
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
@@ -2041,7 +2044,7 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- qla_put_iocbs(sp->qpair, &sp->iores);
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -2171,9 +2174,10 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
tot_prot_dsds = nseg;
tot_dsds += nseg;
- sp->iores.res_type = RESOURCE_INI;
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- if (qla_get_iocbs(sp->qpair, &sp->iores))
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
@@ -2260,7 +2264,7 @@ queuing_error:
}
/* Cleanup will be performed by the caller (queuecommand) */
- qla_put_iocbs(sp->qpair, &sp->iores);
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -2916,7 +2920,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
conflict_fcport->conflict = fcport;
fcport->login_pause = 1;
ql_dbg(ql_dbg_disc, vha, 0x20ed,
- "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
+ "%s %d %8phC pid %06x inuse with lid %#x.\n",
__func__, __LINE__,
fcport->port_name,
fcport->d_id.b24, lid);
@@ -3455,13 +3459,7 @@ sufficient_dsds:
goto queuing_error;
}
- ctx = sp->u.scmd.ct6_ctx =
- mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
- if (!ctx) {
- ql_log(ql_log_fatal, vha, 0x3010,
- "Failed to allocate ctx for cmd=%p.\n", cmd);
- goto queuing_error;
- }
+ ctx = &sp->u.scmd.ct6_ctx;
memset(ctx, 0, sizeof(struct ct6_dsd));
ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
@@ -3813,6 +3811,65 @@ qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->vp_index = sp->fcport->vha->vp_idx;
}
+static int qla_get_iocbs_resource(struct srb *sp)
+{
+ bool get_exch;
+ bool push_it_through = false;
+
+ if (!ql2xenforce_iocb_limit) {
+ sp->iores.res_type = RESOURCE_NONE;
+ return 0;
+ }
+ sp->iores.res_type = RESOURCE_NONE;
+
+ switch (sp->type) {
+ case SRB_TM_CMD:
+ case SRB_PRLI_CMD:
+ case SRB_ADISC_CMD:
+ push_it_through = true;
+ fallthrough;
+ case SRB_LOGIN_CMD:
+ case SRB_ELS_CMD_RPT:
+ case SRB_ELS_CMD_HST:
+ case SRB_ELS_CMD_HST_NOLOGIN:
+ case SRB_CT_CMD:
+ case SRB_NVME_LS:
+ case SRB_ELS_DCMD:
+ get_exch = true;
+ break;
+
+ case SRB_FXIOCB_DCMD:
+ case SRB_FXIOCB_BCMD:
+ sp->iores.res_type = RESOURCE_NONE;
+ return 0;
+
+ case SRB_SA_UPDATE:
+ case SRB_SA_REPLACE:
+ case SRB_MB_IOCB:
+ case SRB_ABT_CMD:
+ case SRB_NACK_PLOGI:
+ case SRB_NACK_PRLI:
+ case SRB_NACK_LOGO:
+ case SRB_LOGOUT_CMD:
+ case SRB_CTRL_VP:
+ push_it_through = true;
+ fallthrough;
+ default:
+ get_exch = false;
+ }
+
+ sp->iores.res_type |= RESOURCE_IOCB;
+ sp->iores.iocb_cnt = 1;
+ if (get_exch) {
+ sp->iores.res_type |= RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
+ }
+ if (push_it_through)
+ sp->iores.res_type |= RESOURCE_FORCE;
+
+ return qla_get_fw_resources(sp->qpair, &sp->iores);
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -3827,6 +3884,12 @@ qla2x00_start_sp(srb_t *sp)
return -EIO;
spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ rval = qla_get_iocbs_resource(sp);
+ if (rval) {
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ return -EAGAIN;
+ }
+
pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
if (!pkt) {
rval = EAGAIN;
@@ -3927,6 +3990,8 @@ qla2x00_start_sp(srb_t *sp)
wmb();
qla2x00_start_iocbs(vha, qp->req);
done:
+ if (rval)
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index e19fde304e5c..46e8b38603f0 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3112,6 +3112,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
}
bsg_reply->reply_payload_rcv_len = 0;
+ qla_put_fw_resources(sp->qpair, &sp->iores);
done:
/* Return the vendor specific reply to API */
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
@@ -3197,7 +3198,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
return;
}
- qla_put_iocbs(sp->qpair, &sp->iores);
+ qla_put_fw_resources(sp->qpair, &sp->iores);
if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL;
@@ -3362,8 +3363,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
"Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
- vha->interface_err_cnt++;
-
res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
}
@@ -3618,7 +3617,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
default:
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
- qla_put_iocbs(sp->qpair, &sp->iores);
sp->done(sp, res);
return 0;
}
@@ -3771,7 +3769,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
rsp->qpair->rcv_intr = 1;
- qla_cpu_update(rsp->qpair, smp_processor_id());
}
#define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \
@@ -4379,6 +4376,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
qentry->vector = pci_irq_vector(ha->pdev, i);
+ qentry->vector_base0 = i;
qentry->entry = i;
qentry->have_irq = 0;
qentry->in_use = 0;
@@ -4606,5 +4604,6 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
}
msix->have_irq = 1;
msix->handle = qpair;
+ qla_mapq_init_qp_cpu_map(ha, msix, qpair);
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 359595a64664..254fd4c64262 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4010,7 +4010,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
ha->current_topology = ISP_CFG_NL;
- qlt_update_host_map(vha, id);
+ qla_update_host_map(vha, id);
} else if (rptid_entry->format == 1) {
/* fabric */
@@ -4126,7 +4126,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
WWN_SIZE);
}
- qlt_update_host_map(vha, id);
+ qla_update_host_map(vha, id);
}
set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
@@ -4153,7 +4153,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (!found)
return;
- qlt_update_host_map(vp, id);
+ qla_update_host_map(vp, id);
/*
* Cannot configure here as we are still sitting on the
@@ -4184,7 +4184,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
ha->flags.n2n_ae = 1;
spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
+ qla_update_vp_map(vha, SET_AL_PA);
spin_unlock_irqrestore(&ha->vport_slock, flags);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 16a9f22bb860..78661b658dcd 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -52,7 +52,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_update_vp_map(vha, SET_VP_IDX);
+ qla_update_vp_map(vha, SET_VP_IDX);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&ha->vport_lock);
@@ -80,7 +80,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
if (atomic_read(&vha->vref_count) == 0) {
list_del(&vha->list);
- qlt_update_vp_map(vha, RESET_VP_IDX);
+ qla_update_vp_map(vha, RESET_VP_IDX);
bailout = 1;
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
@@ -95,7 +95,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
"vha->vref_count=%u timeout\n", vha->vref_count.counter);
spin_lock_irqsave(&ha->vport_slock, flags);
list_del(&vha->list);
- qlt_update_vp_map(vha, RESET_VP_IDX);
+ qla_update_vp_map(vha, RESET_VP_IDX);
spin_unlock_irqrestore(&ha->vport_slock, flags);
}
@@ -187,7 +187,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
/* Remove port id from vp target map */
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
- qlt_update_vp_map(vha, RESET_AL_PA);
+ qla_update_vp_map(vha, RESET_AL_PA);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
qla2x00_mark_vp_devices_dead(vha);
@@ -384,15 +384,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
}
}
- if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
- ql_dbg(ql_dbg_dpc, vha, 0x4016,
- "FCPort update scheduled.\n");
- qla2x00_update_fcports(vha);
- clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
- ql_dbg(ql_dbg_dpc, vha, 0x4017,
- "FCPort update end.\n");
- }
-
if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -1014,3 +1005,288 @@ done:
kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
+
+struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, uint16_t vp_idx)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->vp_idx == vp_idx)
+ return vha;
+
+ BUG_ON(ha->vp_map == NULL);
+ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+ return ha->vp_map[vp_idx].vha;
+
+ return NULL;
+}
+
+/* vport_slock to be held by the caller */
+void
+qla_update_vp_map(struct scsi_qla_host *vha, int cmd)
+{
+ void *slot;
+ u32 key;
+ int rc;
+
+ if (!vha->hw->vp_map)
+ return;
+
+ key = vha->d_id.b24;
+
+ switch (cmd) {
+ case SET_VP_IDX:
+ vha->hw->vp_map[vha->vp_idx].vha = vha;
+ break;
+ case SET_AL_PA:
+ slot = btree_lookup32(&vha->hw->host_map, key);
+ if (!slot) {
+ ql_dbg(ql_dbg_disc, vha, 0xf018,
+ "Save vha in host_map %p %06x\n", vha, key);
+ rc = btree_insert32(&vha->hw->host_map,
+ key, vha, GFP_ATOMIC);
+ if (rc)
+ ql_log(ql_log_info, vha, 0xd03e,
+ "Unable to insert s_id into host_map: %06x\n",
+ key);
+ return;
+ }
+ ql_dbg(ql_dbg_disc, vha, 0xf019,
+ "replace existing vha in host_map %p %06x\n", vha, key);
+ btree_update32(&vha->hw->host_map, key, vha);
+ break;
+ case RESET_VP_IDX:
+ vha->hw->vp_map[vha->vp_idx].vha = NULL;
+ break;
+ case RESET_AL_PA:
+ ql_dbg(ql_dbg_disc, vha, 0xf01a,
+ "clear vha in host_map %p %06x\n", vha, key);
+ slot = btree_lookup32(&vha->hw->host_map, key);
+ if (slot)
+ btree_remove32(&vha->hw->host_map, key);
+ vha->d_id.b24 = 0;
+ break;
+ }
+}
+
+void qla_update_host_map(struct scsi_qla_host *vha, port_id_t id)
+{
+
+ if (!vha->d_id.b24) {
+ vha->d_id = id;
+ qla_update_vp_map(vha, SET_AL_PA);
+ } else if (vha->d_id.b24 != id.b24) {
+ qla_update_vp_map(vha, RESET_AL_PA);
+ vha->d_id = id;
+ qla_update_vp_map(vha, SET_AL_PA);
+ }
+}
+
+int qla_create_buf_pool(struct scsi_qla_host *vha, struct qla_qpair *qp)
+{
+ int sz;
+
+ qp->buf_pool.num_bufs = qp->req->length;
+
+ sz = BITS_TO_LONGS(qp->req->length);
+ qp->buf_pool.buf_map = kcalloc(sz, sizeof(long), GFP_KERNEL);
+ if (!qp->buf_pool.buf_map) {
+ ql_log(ql_log_warn, vha, 0x0186,
+ "Failed to allocate buf_map(%zd).\n", sz * sizeof(unsigned long));
+ return -ENOMEM;
+ }
+ sz = qp->req->length * sizeof(void *);
+ qp->buf_pool.buf_array = kcalloc(qp->req->length, sizeof(void *), GFP_KERNEL);
+ if (!qp->buf_pool.buf_array) {
+ ql_log(ql_log_warn, vha, 0x0186,
+ "Failed to allocate buf_array(%d).\n", sz);
+ kfree(qp->buf_pool.buf_map);
+ return -ENOMEM;
+ }
+ sz = qp->req->length * sizeof(dma_addr_t);
+ qp->buf_pool.dma_array = kcalloc(qp->req->length, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!qp->buf_pool.dma_array) {
+ ql_log(ql_log_warn, vha, 0x0186,
+ "Failed to allocate dma_array(%d).\n", sz);
+ kfree(qp->buf_pool.buf_map);
+ kfree(qp->buf_pool.buf_array);
+ return -ENOMEM;
+ }
+ set_bit(0, qp->buf_pool.buf_map);
+ return 0;
+}
+
+void qla_free_buf_pool(struct qla_qpair *qp)
+{
+ int i;
+ struct qla_hw_data *ha = qp->vha->hw;
+
+ for (i = 0; i < qp->buf_pool.num_bufs; i++) {
+ if (qp->buf_pool.buf_array[i] && qp->buf_pool.dma_array[i])
+ dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[i],
+ qp->buf_pool.dma_array[i]);
+ qp->buf_pool.buf_array[i] = NULL;
+ qp->buf_pool.dma_array[i] = 0;
+ }
+
+ kfree(qp->buf_pool.dma_array);
+ kfree(qp->buf_pool.buf_array);
+ kfree(qp->buf_pool.buf_map);
+}
+
+/* it is assume qp->qp_lock is held at this point */
+int qla_get_buf(struct scsi_qla_host *vha, struct qla_qpair *qp, struct qla_buf_dsc *dsc)
+{
+ u16 tag, i = 0;
+ void *buf;
+ dma_addr_t buf_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ dsc->tag = TAG_FREED;
+again:
+ tag = find_first_zero_bit(qp->buf_pool.buf_map, qp->buf_pool.num_bufs);
+ if (tag >= qp->buf_pool.num_bufs) {
+ ql_dbg(ql_dbg_io, vha, 0x00e2,
+ "qp(%d) ran out of buf resource.\n", qp->id);
+ return -EIO;
+ }
+ if (tag == 0) {
+ set_bit(0, qp->buf_pool.buf_map);
+ i++;
+ if (i == 5) {
+ ql_dbg(ql_dbg_io, vha, 0x00e3,
+ "qp(%d) unable to get tag.\n", qp->id);
+ return -EIO;
+ }
+ goto again;
+ }
+
+ if (!qp->buf_pool.buf_array[tag]) {
+ buf = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, GFP_ATOMIC, &buf_dma);
+ if (!buf) {
+ ql_log(ql_log_fatal, vha, 0x13b1,
+ "Failed to allocate buf.\n");
+ return -ENOMEM;
+ }
+
+ dsc->buf = qp->buf_pool.buf_array[tag] = buf;
+ dsc->buf_dma = qp->buf_pool.dma_array[tag] = buf_dma;
+ qp->buf_pool.num_alloc++;
+ } else {
+ dsc->buf = qp->buf_pool.buf_array[tag];
+ dsc->buf_dma = qp->buf_pool.dma_array[tag];
+ memset(dsc->buf, 0, FCP_CMND_DMA_POOL_SIZE);
+ }
+
+ qp->buf_pool.num_active++;
+ if (qp->buf_pool.num_active > qp->buf_pool.max_used)
+ qp->buf_pool.max_used = qp->buf_pool.num_active;
+
+ dsc->tag = tag;
+ set_bit(tag, qp->buf_pool.buf_map);
+ return 0;
+}
+
+static void qla_trim_buf(struct qla_qpair *qp, u16 trim)
+{
+ int i, j;
+ struct qla_hw_data *ha = qp->vha->hw;
+
+ if (!trim)
+ return;
+
+ for (i = 0; i < trim; i++) {
+ j = qp->buf_pool.num_alloc - 1;
+ if (test_bit(j, qp->buf_pool.buf_map)) {
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x300b,
+ "QP id(%d): trim active buf[%d]. Remain %d bufs\n",
+ qp->id, j, qp->buf_pool.num_alloc);
+ return;
+ }
+
+ if (qp->buf_pool.buf_array[j]) {
+ dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[j],
+ qp->buf_pool.dma_array[j]);
+ qp->buf_pool.buf_array[j] = NULL;
+ qp->buf_pool.dma_array[j] = 0;
+ }
+ qp->buf_pool.num_alloc--;
+ if (!qp->buf_pool.num_alloc)
+ break;
+ }
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x3010,
+ "QP id(%d): trimmed %d bufs. Remain %d bufs\n",
+ qp->id, trim, qp->buf_pool.num_alloc);
+}
+
+static void __qla_adjust_buf(struct qla_qpair *qp)
+{
+ u32 trim;
+
+ qp->buf_pool.take_snapshot = 0;
+ qp->buf_pool.prev_max = qp->buf_pool.max_used;
+ qp->buf_pool.max_used = qp->buf_pool.num_active;
+
+ if (qp->buf_pool.prev_max > qp->buf_pool.max_used &&
+ qp->buf_pool.num_alloc > qp->buf_pool.max_used) {
+ /* down trend */
+ trim = qp->buf_pool.num_alloc - qp->buf_pool.max_used;
+ trim = (trim * 10) / 100;
+ trim = trim ? trim : 1;
+ qla_trim_buf(qp, trim);
+ } else if (!qp->buf_pool.prev_max && !qp->buf_pool.max_used) {
+ /* 2 periods of no io */
+ qla_trim_buf(qp, qp->buf_pool.num_alloc);
+ }
+}
+
+/* it is assume qp->qp_lock is held at this point */
+void qla_put_buf(struct qla_qpair *qp, struct qla_buf_dsc *dsc)
+{
+ if (dsc->tag == TAG_FREED)
+ return;
+ lockdep_assert_held(qp->qp_lock_ptr);
+
+ clear_bit(dsc->tag, qp->buf_pool.buf_map);
+ qp->buf_pool.num_active--;
+ dsc->tag = TAG_FREED;
+
+ if (qp->buf_pool.take_snapshot)
+ __qla_adjust_buf(qp);
+}
+
+#define EXPIRE (60 * HZ)
+void qla_adjust_buf(struct scsi_qla_host *vha)
+{
+ unsigned long flags;
+ int i;
+ struct qla_qpair *qp;
+
+ if (vha->vp_idx)
+ return;
+
+ if (!vha->buf_expired) {
+ vha->buf_expired = jiffies + EXPIRE;
+ return;
+ }
+ if (time_before(jiffies, vha->buf_expired))
+ return;
+
+ vha->buf_expired = jiffies + EXPIRE;
+
+ for (i = 0; i < vha->hw->num_qpairs; i++) {
+ qp = vha->hw->queue_pair_map[i];
+ if (!qp)
+ continue;
+ if (!qp->buf_pool.num_alloc)
+ continue;
+
+ if (qp->buf_pool.take_snapshot) {
+ /* no io has gone through in the last EXPIRE period */
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ __qla_adjust_buf(qp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ } else {
+ qp->buf_pool.take_snapshot = 1;
+ }
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 02fdeb0d31ec..648e8f798606 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -170,18 +170,6 @@ out:
qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
-static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
-{
- if (sp->flags & SRB_DMA_VALID) {
- struct srb_iocb *nvme = &sp->u.iocb_cmd;
- struct qla_hw_data *ha = sp->fcport->vha->hw;
-
- dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
- fd->rqstlen, DMA_TO_DEVICE);
- sp->flags &= ~SRB_DMA_VALID;
- }
-}
-
static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
{
struct srb *sp = container_of(kref, struct srb, cmd_kref);
@@ -199,7 +187,6 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
fd = priv->fd;
- qla_nvme_ls_unmap(sp, fd);
fd->done(fd, priv->comp_status);
out:
qla2x00_rel_sp(sp);
@@ -365,13 +352,10 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
nvme->u.nvme.rsp_len = fd->rsplen;
nvme->u.nvme.rsp_dma = fd->rspdma;
nvme->u.nvme.timeout_sec = fd->timeout;
- nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
- fd->rqstlen, DMA_TO_DEVICE);
+ nvme->u.nvme.cmd_dma = fd->rqstdma;
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
fd->rqstlen, DMA_TO_DEVICE);
- sp->flags |= SRB_DMA_VALID;
-
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
@@ -379,7 +363,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
wake_up(&sp->nvme_ls_waitq);
sp->priv = NULL;
priv->sp = NULL;
- qla_nvme_ls_unmap(sp, fd);
qla2x00_rel_sp(sp);
return rval;
}
@@ -445,13 +428,24 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
goto queuing_error;
}
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_fw_resources(sp->qpair, &sp->iores)) {
+ rval = -EBUSY;
+ goto queuing_error;
+ }
+
if (req->cnt < (req_cnt + 2)) {
if (IS_SHADOW_REG_CAPABLE(ha)) {
cnt = *req->out_ptr;
} else {
cnt = rd_reg_dword_relaxed(req->req_q_out);
- if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt)) {
+ rval = -EBUSY;
goto queuing_error;
+ }
}
if (req->ring_index < cnt)
@@ -600,6 +594,8 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
qla24xx_process_response_queue(vha, rsp);
queuing_error:
+ if (rval)
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return rval;
@@ -613,6 +609,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
fc_port_t *fcport;
struct srb_iocb *nvme;
struct scsi_qla_host *vha;
+ struct qla_hw_data *ha;
int rval;
srb_t *sp;
struct qla_qpair *qpair = hw_queue_handle;
@@ -633,6 +630,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
return -ENODEV;
vha = fcport->vha;
+ ha = vha->hw;
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
return -EBUSY;
@@ -647,6 +645,8 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
if (fcport->nvme_flag & NVME_FLAG_RESETTING)
return -EBUSY;
+ qpair = qla_mapq_nvme_select_qpair(ha, qpair);
+
/* Alloc SRB structure */
sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
if (!sp)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7fb28c207ee5..545167627e48 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -472,6 +472,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
"Unable to allocate memory for queue pair ptrs.\n");
goto fail_qpair_map;
}
+ if (qla_mapq_alloc_qp_cpu_map(ha) != 0) {
+ kfree(ha->queue_pair_map);
+ ha->queue_pair_map = NULL;
+ goto fail_qpair_map;
+ }
}
/*
@@ -546,6 +551,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
ha->base_qpair = NULL;
}
+ qla_mapq_free_qp_cpu_map(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
@@ -733,15 +739,17 @@ void qla2x00_sp_free_dma(srb_t *sp)
}
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
- struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
+ struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
- mempool_free(ctx1, ha->ctx_mempool);
}
+
+ if (sp->flags & SRB_GOT_BUF)
+ qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
}
void qla2x00_sp_compl(srb_t *sp, int res)
@@ -817,14 +825,13 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp)
}
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
- struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
+ struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
- mempool_free(ctx1, ha->ctx_mempool);
sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
}
@@ -834,6 +841,9 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp)
dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
+
+ if (sp->flags & SRB_GOT_BUF)
+ qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
}
void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
@@ -4118,10 +4128,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
char name[16];
int rc;
+ if (QLA_TGT_MODE_ENABLED() || EDIF_CAP(ha)) {
+ ha->vp_map = kcalloc(MAX_MULTI_ID_FABRIC, sizeof(struct qla_vp_map), GFP_KERNEL);
+ if (!ha->vp_map)
+ goto fail;
+ }
+
ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
&ha->init_cb_dma, GFP_KERNEL);
if (!ha->init_cb)
- goto fail;
+ goto fail_free_vp_map;
rc = btree_init32(&ha->host_map);
if (rc)
@@ -4540,6 +4556,8 @@ fail_free_init_cb:
ha->init_cb_dma);
ha->init_cb = NULL;
ha->init_cb_dma = 0;
+fail_free_vp_map:
+ kfree(ha->vp_map);
fail:
ql_log(ql_log_fatal, NULL, 0x0030,
"Memory allocation failure.\n");
@@ -4981,6 +4999,9 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->sf_init_cb = NULL;
ha->sf_init_cb_dma = 0;
ha->loop_id_map = NULL;
+
+ kfree(ha->vp_map);
+ ha->vp_map = NULL;
}
struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -5016,7 +5037,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->plogi_ack_list);
INIT_LIST_HEAD(&vha->qp_list);
INIT_LIST_HEAD(&vha->gnl.fcports);
- INIT_LIST_HEAD(&vha->gpnid_list);
INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
INIT_LIST_HEAD(&vha->purex_list.head);
@@ -5461,9 +5481,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_AENFX:
qlafx00_process_aen(vha, e);
break;
- case QLA_EVT_GPNID:
- qla24xx_async_gpnid(vha, &e->u.gpnid.id);
- break;
case QLA_EVT_UNMAP:
qla24xx_sp_unmap(vha, e->u.iosb.sp);
break;
@@ -5506,9 +5523,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_GNNFT_DONE:
qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
break;
- case QLA_EVT_GNNID:
- qla24xx_async_gnnid(vha, e->u.fcport.fcport);
- break;
case QLA_EVT_GFPNID:
qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
break;
@@ -7025,11 +7039,6 @@ qla2x00_do_dpc(void *data)
}
}
- if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
- &base_vha->dpc_flags)) {
- qla2x00_update_fcports(base_vha);
- }
-
if (IS_QLAFX00(ha))
goto loop_resync_check;
@@ -7094,9 +7103,12 @@ qla2x00_do_dpc(void *data)
}
}
loop_resync_check:
- if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
+ if (!qla2x00_reset_active(base_vha) &&
+ test_and_clear_bit(LOOP_RESYNC_NEEDED,
&base_vha->dpc_flags)) {
-
+ /*
+ * Allow abort_isp to complete before moving on to scanning.
+ */
ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
"Loop resync scheduled.\n");
@@ -7447,7 +7459,7 @@ qla2x00_timer(struct timer_list *t)
/* if the loop has been down for 4 minutes, reinit adapter */
if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
- if (!(vha->device_flags & DFLG_NO_CABLE)) {
+ if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) {
ql_log(ql_log_warn, vha, 0x6009,
"Loop down - aborting ISP.\n");
@@ -7516,13 +7528,13 @@ qla2x00_timer(struct timer_list *t)
set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
start_dpc++;
}
+ qla_adjust_buf(vha);
/* borrowing w to signify dpc will run */
w = 0;
/* Schedule the DPC routine if needed */
if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
- test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
start_dpc ||
test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
@@ -7533,13 +7545,10 @@ qla2x00_timer(struct timer_list *t)
test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) {
ql_dbg(ql_dbg_timer, vha, 0x600b,
"isp_abort_needed=%d loop_resync_needed=%d "
- "fcport_update_needed=%d start_dpc=%d "
- "reset_marker_needed=%d",
+ "start_dpc=%d reset_marker_needed=%d",
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
- test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
- start_dpc,
- test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
+ start_dpc, test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
ql_dbg(ql_dbg_timer, vha, 0x600c,
"beacon_blink_needed=%d isp_unrecoverable=%d "
"fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 548f22705ddc..aa0cf5ca6c1c 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -198,22 +198,6 @@ struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
return host;
}
-static inline
-struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
- uint16_t vp_idx)
-{
- struct qla_hw_data *ha = vha->hw;
-
- if (vha->vp_idx == vp_idx)
- return vha;
-
- BUG_ON(ha->tgt.tgt_vp_map == NULL);
- if (likely(test_bit(vp_idx, ha->vp_idx_map)))
- return ha->tgt.tgt_vp_map[vp_idx].vha;
-
- return NULL;
-}
-
static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
{
unsigned long flags;
@@ -371,7 +355,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
if ((entry->u.isp24.vp_index != 0xFF) &&
(entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
- host = qlt_find_host_by_vp_idx(vha,
+ host = qla_find_host_by_vp_idx(vha,
entry->u.isp24.vp_index);
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe03f,
@@ -395,7 +379,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
{
struct abts_recv_from_24xx *entry =
(struct abts_recv_from_24xx *)atio;
- struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
entry->vp_index);
unsigned long flags;
@@ -438,7 +422,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
- struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
entry->vp_index);
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe041,
@@ -457,7 +441,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *entry =
(struct imm_ntfy_from_isp *)pkt;
- host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
+ host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe042,
"qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
@@ -475,7 +459,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
if (0xFF != entry->u.isp24.vp_index) {
- host = qlt_find_host_by_vp_idx(vha,
+ host = qla_find_host_by_vp_idx(vha,
entry->u.isp24.vp_index);
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe043,
@@ -495,7 +479,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
{
struct abts_recv_from_24xx *entry =
(struct abts_recv_from_24xx *)pkt;
- struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
entry->vp_index);
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe044,
@@ -512,7 +496,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
{
struct abts_resp_to_24xx *entry =
(struct abts_resp_to_24xx *)pkt;
- struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
entry->vp_index);
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe045,
@@ -1028,8 +1012,7 @@ void qlt_free_session_done(struct work_struct *work)
}
if (ha->flags.edif_enabled &&
- (!own || (own &&
- own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
sess->edif.authok = 0;
if (!ha->flags.host_shutting_down) {
ql_dbg(ql_dbg_edif, vha, 0x911e,
@@ -7145,7 +7128,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
qlt_clear_mode(base_vha);
- qlt_update_vp_map(base_vha, SET_VP_IDX);
+ qla_update_vp_map(base_vha, SET_VP_IDX);
}
irqreturn_t
@@ -7224,17 +7207,10 @@ qlt_mem_alloc(struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return 0;
- ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
- sizeof(struct qla_tgt_vp_map),
- GFP_KERNEL);
- if (!ha->tgt.tgt_vp_map)
- return -ENOMEM;
-
ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
(ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
&ha->tgt.atio_dma, GFP_KERNEL);
if (!ha->tgt.atio_ring) {
- kfree(ha->tgt.tgt_vp_map);
return -ENOMEM;
}
return 0;
@@ -7253,70 +7229,6 @@ qlt_mem_free(struct qla_hw_data *ha)
}
ha->tgt.atio_ring = NULL;
ha->tgt.atio_dma = 0;
- kfree(ha->tgt.tgt_vp_map);
- ha->tgt.tgt_vp_map = NULL;
-}
-
-/* vport_slock to be held by the caller */
-void
-qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
-{
- void *slot;
- u32 key;
- int rc;
-
- key = vha->d_id.b24;
-
- switch (cmd) {
- case SET_VP_IDX:
- if (!QLA_TGT_MODE_ENABLED())
- return;
- vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
- break;
- case SET_AL_PA:
- slot = btree_lookup32(&vha->hw->host_map, key);
- if (!slot) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
- "Save vha in host_map %p %06x\n", vha, key);
- rc = btree_insert32(&vha->hw->host_map,
- key, vha, GFP_ATOMIC);
- if (rc)
- ql_log(ql_log_info, vha, 0xd03e,
- "Unable to insert s_id into host_map: %06x\n",
- key);
- return;
- }
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
- "replace existing vha in host_map %p %06x\n", vha, key);
- btree_update32(&vha->hw->host_map, key, vha);
- break;
- case RESET_VP_IDX:
- if (!QLA_TGT_MODE_ENABLED())
- return;
- vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
- break;
- case RESET_AL_PA:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
- "clear vha in host_map %p %06x\n", vha, key);
- slot = btree_lookup32(&vha->hw->host_map, key);
- if (slot)
- btree_remove32(&vha->hw->host_map, key);
- vha->d_id.b24 = 0;
- break;
- }
-}
-
-void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
-{
-
- if (!vha->d_id.b24) {
- vha->d_id = id;
- qlt_update_vp_map(vha, SET_AL_PA);
- } else if (vha->d_id.b24 != id.b24) {
- qlt_update_vp_map(vha, RESET_AL_PA);
- vha->d_id = id;
- qlt_update_vp_map(vha, SET_AL_PA);
- }
}
static int __init qlt_parse_ini_mode(void)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 7df86578214f..354fca2e7feb 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -1017,7 +1017,6 @@ extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
extern int __init qlt_init(void);
extern void qlt_exit(void);
-extern void qlt_update_vp_map(struct scsi_qla_host *, int);
extern void qlt_free_session_done(struct work_struct *);
/*
* This macro is used during early initializations when host->active_mode
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 03f3e2cd62b5..42d69d89834f 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.07.900-k"
+#define QLA2XXX_VERSION "10.02.08.200-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
-#define QLA_DRIVER_PATCH_VER 7
-#define QLA_DRIVER_BETA_VER 900
+#define QLA_DRIVER_PATCH_VER 8
+#define QLA_DRIVER_BETA_VER 200
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 8fa0056b56dd..8024322c9c5a 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1552,6 +1552,7 @@ static const struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
{
int rc;
+ size_t map_sz;
rc = btree_init32(&lport->lport_fcport_map);
if (rc) {
@@ -1559,17 +1560,15 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
return rc;
}
- lport->lport_loopid_map =
- vzalloc(array_size(65536,
- sizeof(struct tcm_qla2xxx_fc_loopid)));
+ map_sz = array_size(65536, sizeof(struct tcm_qla2xxx_fc_loopid));
+
+ lport->lport_loopid_map = vzalloc(map_sz);
if (!lport->lport_loopid_map) {
- pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
- sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+ pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n", map_sz);
btree_destroy32(&lport->lport_fcport_map);
return -ENOMEM;
}
- pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
- sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+ pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n", map_sz);
return 0;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 9feb0323bc44..7d2210a006f0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -309,8 +309,8 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
* I'm not convinced we need to try quite this hard to get VPD, but
* all the existing users tried this hard.
*/
- result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
- len, NULL, 30 * HZ, 3, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len,
+ 30 * HZ, 3, NULL);
if (result)
return -EIO;
@@ -510,6 +510,9 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
unsigned char cmd[16];
struct scsi_sense_hdr sshdr;
int result, request_len;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
return -EINVAL;
@@ -531,9 +534,8 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
put_unaligned_be32(request_len, &cmd[6]);
memset(buffer, 0, len);
- result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
- request_len, &sshdr, 30 * HZ, 3, NULL);
-
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer,
+ request_len, 30 * HZ, 3, &exec_args);
if (result < 0)
return result;
if (result && scsi_sense_valid(&sshdr) &&
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 1126a265d5ee..e3b31d32b6a9 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -69,12 +69,15 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
{
int result;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
SCSI_LOG_IOCTL(1, sdev_printk(KERN_INFO, sdev,
"Trying ioctl with scsi command %d\n", *cmd));
- result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0,
- &sshdr, timeout, retries, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0, timeout,
+ retries, &exec_args);
SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
"Ioctl returned 0x%x\n", result));
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ed1ebcb7443..abe93ec8b7d0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -185,39 +185,37 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
__scsi_queue_insert(cmd, reason, true);
}
-
/**
- * __scsi_execute - insert request and wait for the result
- * @sdev: scsi device
+ * scsi_execute_cmd - insert request and wait for the result
+ * @sdev: scsi_device
* @cmd: scsi command
- * @data_direction: data direction
+ * @opf: block layer request cmd_flags
* @buffer: data buffer
* @bufflen: len of buffer
- * @sense: optional sense buffer
- * @sshdr: optional decoded sense header
* @timeout: request timeout in HZ
* @retries: number of times to retry request
- * @flags: flags for ->cmd_flags
- * @rq_flags: flags for ->rq_flags
- * @resid: optional residual length
+ * @args: Optional args. See struct definition for field descriptions
*
* Returns the scsi_cmnd result field if a command was executed, or a negative
* Linux error code if we didn't get that far.
*/
-int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
- int data_direction, void *buffer, unsigned bufflen,
- unsigned char *sense, struct scsi_sense_hdr *sshdr,
- int timeout, int retries, blk_opf_t flags,
- req_flags_t rq_flags, int *resid)
+int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
+ blk_opf_t opf, void *buffer, unsigned int bufflen,
+ int timeout, int retries,
+ const struct scsi_exec_args *args)
{
+ static const struct scsi_exec_args default_args;
struct request *req;
struct scsi_cmnd *scmd;
int ret;
- req = scsi_alloc_request(sdev->request_queue,
- data_direction == DMA_TO_DEVICE ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
- rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
+ if (!args)
+ args = &default_args;
+ else if (WARN_ON_ONCE(args->sense &&
+ args->sense_len != SCSI_SENSE_BUFFERSIZE))
+ return -EINVAL;
+
+ req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -232,8 +230,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
memcpy(scmd->cmnd, cmd, scmd->cmd_len);
scmd->allowed = retries;
req->timeout = timeout;
- req->cmd_flags |= flags;
- req->rq_flags |= rq_flags | RQF_QUIET;
+ req->rq_flags |= RQF_QUIET;
/*
* head injection *required* here otherwise quiesce won't work
@@ -249,20 +246,21 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen))
memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len);
- if (resid)
- *resid = scmd->resid_len;
- if (sense && scmd->sense_len)
- memcpy(sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
- if (sshdr)
+ if (args->resid)
+ *args->resid = scmd->resid_len;
+ if (args->sense)
+ memcpy(args->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+ if (args->sshdr)
scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
- sshdr);
+ args->sshdr);
+
ret = scmd->result;
out:
blk_mq_free_request(req);
return ret;
}
-EXPORT_SYMBOL(__scsi_execute);
+EXPORT_SYMBOL(scsi_execute_cmd);
/*
* Wake up the error handler if necessary. Avoid as follows that the error
@@ -2086,6 +2084,9 @@ int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
{
unsigned char cmd[10];
unsigned char *real_buffer;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = sshdr,
+ };
int ret;
memset(cmd, 0, sizeof(cmd));
@@ -2135,8 +2136,8 @@ int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
cmd[4] = len;
}
- ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
- sshdr, timeout, retries, NULL);
+ ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, real_buffer, len,
+ timeout, retries, &exec_args);
kfree(real_buffer);
return ret;
}
@@ -2167,6 +2168,10 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
int header_length;
int result, retry_count = retries;
struct scsi_sense_hdr my_sshdr;
+ const struct scsi_exec_args exec_args = {
+ /* caller might not be interested in sense, but we need it */
+ .sshdr = sshdr ? : &my_sshdr,
+ };
memset(data, 0, sizeof(*data));
memset(&cmd[0], 0, 12);
@@ -2175,9 +2180,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
cmd[2] = modepage;
- /* caller might not be interested in sense, but we need it */
- if (!sshdr)
- sshdr = &my_sshdr;
+ sshdr = exec_args.sshdr;
retry:
use_10_for_ms = sdev->use_10_for_ms || len > 255;
@@ -2200,8 +2203,8 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
memset(buffer, 0, len);
- result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
- sshdr, timeout, retries, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len,
+ timeout, retries, &exec_args);
if (result < 0)
return result;
@@ -2281,12 +2284,15 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
char cmd[] = {
TEST_UNIT_READY, 0, 0, 0, 0, 0,
};
+ const struct scsi_exec_args exec_args = {
+ .sshdr = sshdr,
+ };
int result;
/* try to eat the UNIT_ATTENTION if there are enough retries */
do {
- result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
- timeout, 1, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0,
+ timeout, 1, &exec_args);
if (sdev->removable && scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION)
sdev->changed = 1;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index f9b18fdc7b3c..4e842d79de31 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -210,7 +210,7 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
scsi_cmd[3] = 0;
scsi_cmd[4] = 0x2a; /* size */
scsi_cmd[5] = 0;
- scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
+ scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a,
SCSI_TIMEOUT, 3, NULL);
}
@@ -646,8 +646,12 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
int first_inquiry_len, try_inquiry_len, next_inquiry_len;
int response_len = 0;
- int pass, count, result;
+ int pass, count, result, resid;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ .resid = &resid,
+ };
*bflags = 0;
@@ -665,18 +669,16 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
/* Each pass gets up to three chances to ignore Unit Attention */
for (count = 0; count < 3; ++count) {
- int resid;
-
memset(scsi_cmd, 0, 6);
scsi_cmd[0] = INQUIRY;
scsi_cmd[4] = (unsigned char) try_inquiry_len;
memset(inq_result, 0, try_inquiry_len);
- result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
- inq_result, try_inquiry_len, &sshdr,
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
+ inq_result, try_inquiry_len,
HZ / 2 + HZ * scsi_inq_timeout, 3,
- &resid);
+ &exec_args);
SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
"scsi scan: INQUIRY %s with code 0x%x\n",
@@ -1401,6 +1403,9 @@ static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflag
struct scsi_sense_hdr sshdr;
struct scsi_device *sdev;
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
int ret = 0;
/*
@@ -1475,9 +1480,10 @@ retry:
"scsi scan: Sending REPORT LUNS to (try %d)\n",
retries));
- result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
- lun_data, length, &sshdr,
- SCSI_REPORT_LUNS_TIMEOUT, 3, NULL);
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
+ lun_data, length,
+ SCSI_REPORT_LUNS_TIMEOUT, 3,
+ &exec_args);
SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
"scsi scan: REPORT LUNS"
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f569cf0095c2..2442d4d2e3f3 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -105,28 +105,27 @@ static int sprint_frac(char *dest, int value, int denom)
}
static int spi_execute(struct scsi_device *sdev, const void *cmd,
- enum dma_data_direction dir,
- void *buffer, unsigned bufflen,
+ enum req_op op, void *buffer, unsigned int bufflen,
struct scsi_sense_hdr *sshdr)
{
int i, result;
- unsigned char sense[SCSI_SENSE_BUFFERSIZE];
struct scsi_sense_hdr sshdr_tmp;
+ blk_opf_t opf = op | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .req_flags = BLK_MQ_REQ_PM,
+ .sshdr = sshdr ? : &sshdr_tmp,
+ };
- if (!sshdr)
- sshdr = &sshdr_tmp;
+ sshdr = exec_args.sshdr;
for(i = 0; i < DV_RETRIES; i++) {
/*
* The purpose of the RQF_PM flag below is to bypass the
* SDEV_QUIESCE state.
*/
- result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
- sshdr, DV_TIMEOUT, /* retries */ 1,
- REQ_FAILFAST_DEV |
- REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER,
- RQF_PM, NULL);
+ result = scsi_execute_cmd(sdev, cmd, opf, buffer, bufflen,
+ DV_TIMEOUT, 1, &exec_args);
if (result < 0 || !scsi_sense_valid(sshdr) ||
sshdr->sense_key != UNIT_ATTENTION)
break;
@@ -675,7 +674,7 @@ spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
}
for (r = 0; r < retries; r++) {
- result = spi_execute(sdev, spi_write_buffer, DMA_TO_DEVICE,
+ result = spi_execute(sdev, spi_write_buffer, REQ_OP_DRV_OUT,
buffer, len, &sshdr);
if(result || !scsi_device_online(sdev)) {
@@ -697,7 +696,7 @@ spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
}
memset(ptr, 0, len);
- spi_execute(sdev, spi_read_buffer, DMA_FROM_DEVICE,
+ spi_execute(sdev, spi_read_buffer, REQ_OP_DRV_IN,
ptr, len, NULL);
scsi_device_set_state(sdev, SDEV_QUIESCE);
@@ -722,7 +721,7 @@ spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer,
for (r = 0; r < retries; r++) {
memset(ptr, 0, len);
- result = spi_execute(sdev, spi_inquiry, DMA_FROM_DEVICE,
+ result = spi_execute(sdev, spi_inquiry, REQ_OP_DRV_IN,
ptr, len, NULL);
if(result || !scsi_device_online(sdev)) {
@@ -828,7 +827,7 @@ spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer)
* (reservation conflict, device not ready, etc) just
* skip the write tests */
for (l = 0; ; l++) {
- result = spi_execute(sdev, spi_test_unit_ready, DMA_NONE,
+ result = spi_execute(sdev, spi_test_unit_ready, REQ_OP_DRV_IN,
NULL, 0, NULL);
if(result) {
@@ -841,7 +840,7 @@ spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer)
}
result = spi_execute(sdev, spi_read_buffer_descriptor,
- DMA_FROM_DEVICE, buffer, 4, NULL);
+ REQ_OP_DRV_IN, buffer, 4, NULL);
if (result)
/* Device has no echo buffer */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 277960decc10..a38c71511bc9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -664,6 +664,9 @@ static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
struct scsi_disk *sdkp = data;
struct scsi_device *sdev = sdkp->device;
u8 cdb[12] = { 0, };
+ const struct scsi_exec_args exec_args = {
+ .req_flags = BLK_MQ_REQ_PM,
+ };
int ret;
cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
@@ -671,9 +674,9 @@ static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
put_unaligned_be16(spsp, &cdb[2]);
put_unaligned_be32(len, &cdb[6]);
- ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
- buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0,
- RQF_PM, NULL);
+ ret = scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
+ buffer, len, SD_TIMEOUT, sdkp->max_retries,
+ &exec_args);
return ret <= 0 ? ret : -EIO;
}
#endif /* CONFIG_BLK_SED_OPAL */
@@ -1581,13 +1584,16 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
const int timeout = sdp->request_queue->rq_timeout
* SD_FLUSH_TIMEOUT_MULTIPLIER;
struct scsi_sense_hdr my_sshdr;
+ const struct scsi_exec_args exec_args = {
+ .req_flags = BLK_MQ_REQ_PM,
+ /* caller might not be interested in sense, but we need it */
+ .sshdr = sshdr ? : &my_sshdr,
+ };
if (!scsi_device_online(sdp))
return -ENODEV;
- /* caller might not be interested in sense, but we need it */
- if (!sshdr)
- sshdr = &my_sshdr;
+ sshdr = exec_args.sshdr;
for (retries = 3; retries > 0; --retries) {
unsigned char cmd[16] = { 0 };
@@ -1600,8 +1606,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
* Leave the rest of the command zero to indicate
* flush everything.
*/
- res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
- timeout, sdkp->max_retries, 0, RQF_PM, NULL);
+ res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
+ timeout, sdkp->max_retries, &exec_args);
if (res == 0)
break;
}
@@ -1743,6 +1749,9 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
struct scsi_device *sdev = sdkp->device;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
int result;
u8 cmd[16] = { 0, };
u8 data[24] = { 0, };
@@ -1756,8 +1765,9 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
put_unaligned_be64(sa_key, &data[8]);
data[20] = flags;
- result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
- &sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, &data,
+ sizeof(data), SD_TIMEOUT, sdkp->max_retries,
+ &exec_args);
if (scsi_status_is_check_condition(result) &&
scsi_sense_valid(&sshdr)) {
@@ -2086,6 +2096,9 @@ sd_spinup_disk(struct scsi_disk *sdkp)
int retries, spintime;
unsigned int the_result;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
int sense_valid = 0;
spintime = 0;
@@ -2101,10 +2114,11 @@ sd_spinup_disk(struct scsi_disk *sdkp)
cmd[0] = TEST_UNIT_READY;
memset((void *) &cmd[1], 0, 9);
- the_result = scsi_execute_req(sdkp->device, cmd,
- DMA_NONE, NULL, 0,
- &sshdr, SD_TIMEOUT,
- sdkp->max_retries, NULL);
+ the_result = scsi_execute_cmd(sdkp->device, cmd,
+ REQ_OP_DRV_IN, NULL, 0,
+ SD_TIMEOUT,
+ sdkp->max_retries,
+ &exec_args);
/*
* If the drive has indicated to us that it
@@ -2161,10 +2175,10 @@ sd_spinup_disk(struct scsi_disk *sdkp)
cmd[4] = 1; /* Start spin cycle */
if (sdkp->device->start_stop_pwr_cond)
cmd[4] |= 1 << 4;
- scsi_execute_req(sdkp->device, cmd, DMA_NONE,
- NULL, 0, &sshdr,
+ scsi_execute_cmd(sdkp->device, cmd,
+ REQ_OP_DRV_IN, NULL, 0,
SD_TIMEOUT, sdkp->max_retries,
- NULL);
+ &exec_args);
spintime_expire = jiffies + 100 * HZ;
spintime = 1;
}
@@ -2294,6 +2308,9 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
{
unsigned char cmd[16];
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
int sense_valid = 0;
int the_result;
int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
@@ -2311,9 +2328,9 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
cmd[13] = RC16_LEN;
memset(buffer, 0, RC16_LEN);
- the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
- buffer, RC16_LEN, &sshdr,
- SD_TIMEOUT, sdkp->max_retries, NULL);
+ the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN,
+ buffer, RC16_LEN, SD_TIMEOUT,
+ sdkp->max_retries, &exec_args);
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
@@ -2385,6 +2402,9 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
{
unsigned char cmd[16];
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
int sense_valid = 0;
int the_result;
int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
@@ -2396,9 +2416,9 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
memset(&cmd[1], 0, 9);
memset(buffer, 0, 8);
- the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
- buffer, 8, &sshdr,
- SD_TIMEOUT, sdkp->max_retries, NULL);
+ the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
+ 8, SD_TIMEOUT, sdkp->max_retries,
+ &exec_args);
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
@@ -3635,6 +3655,10 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
{
unsigned char cmd[6] = { START_STOP }; /* START_VALID */
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ .req_flags = BLK_MQ_REQ_PM,
+ };
struct scsi_device *sdp = sdkp->device;
int res;
@@ -3647,8 +3671,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
if (!scsi_device_online(sdp))
return -ENODEV;
- res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
+ res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT,
+ sdkp->max_retries, &exec_args);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
if (res > 0 && scsi_sense_valid(&sshdr)) {
@@ -3788,10 +3812,13 @@ static int sd_resume_runtime(struct device *dev)
if (sdp->ignore_media_change) {
/* clear the device's sense data */
static const u8 cmd[10] = { REQUEST_SENSE };
+ const struct scsi_exec_args exec_args = {
+ .req_flags = BLK_MQ_REQ_PM,
+ };
- if (scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL,
- NULL, sdp->request_queue->rq_timeout, 1, 0,
- RQF_PM, NULL))
+ if (scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
+ sdp->request_queue->rq_timeout, 1,
+ &exec_args))
sd_printk(KERN_NOTICE, sdkp,
"Failed to clear sense data\n");
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 62abebbaf2e7..6b3a02d4406c 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -148,6 +148,9 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
struct scsi_device *sdp = sdkp->device;
const int timeout = sdp->request_queue->rq_timeout;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
unsigned char cmd[16];
unsigned int rep_len;
int result;
@@ -160,9 +163,8 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
if (partial)
cmd[14] = ZBC_REPORT_ZONE_PARTIAL;
- result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
- buf, buflen, &sshdr,
- timeout, SD_MAX_RETRIES, NULL);
+ result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buf, buflen,
+ timeout, SD_MAX_RETRIES, &exec_args);
if (result) {
sd_printk(KERN_ERR, sdkp,
"REPORT ZONES start lba %llu failed\n", lba);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 0a1734f34587..869ca9c7f23f 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -89,10 +89,13 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
unsigned char recv_page_code;
unsigned int retries = SES_RETRIES;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
do {
- ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
- &sshdr, SES_TIMEOUT, 1, NULL);
+ ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, bufflen,
+ SES_TIMEOUT, 1, &exec_args);
} while (ret > 0 && --retries && scsi_sense_valid(&sshdr) &&
(sshdr.sense_key == NOT_READY ||
(sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
@@ -130,10 +133,13 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
};
struct scsi_sense_hdr sshdr;
unsigned int retries = SES_RETRIES;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
do {
- result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
- &sshdr, SES_TIMEOUT, 1, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, buf,
+ bufflen, SES_TIMEOUT, 1, &exec_args);
} while (result > 0 && --retries && scsi_sense_valid(&sshdr) &&
(sshdr.sense_key == NOT_READY ||
(sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index af27bb0f3133..228838eb3686 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -954,7 +954,7 @@ struct report_log_lun {
struct report_log_lun_list {
struct report_lun_header header;
- struct report_log_lun lun_entries[1];
+ struct report_log_lun lun_entries[];
};
struct report_phys_lun_8byte_wwid {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index d0446d4d4465..49a8f91810b6 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1259,7 +1259,8 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
"report logical LUNs failed\n");
/*
- * Tack the controller itself onto the end of the logical device list.
+ * Tack the controller itself onto the end of the logical device list
+ * by adding a list entry that is all zeros.
*/
logdev_data = *logdev_list;
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 57bdc3ba49d9..9dd975b36b5b 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -437,6 +437,6 @@ void snic_trc_debugfs_init(void)
void
snic_trc_debugfs_term(void)
{
- debugfs_remove(debugfs_lookup(TRC_FILE, snic_glob->trc_root));
- debugfs_remove(debugfs_lookup(TRC_ENABLE_FILE, snic_glob->trc_root));
+ debugfs_lookup_and_remove(TRC_FILE, snic_glob->trc_root);
+ debugfs_lookup_and_remove(TRC_ENABLE_FILE, snic_glob->trc_root);
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index a278b739d0c5..9e51dcd30bfd 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -170,10 +170,13 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
struct event_header *eh = (void *)buf;
struct media_event_desc *med = (void *)(buf + 4);
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
int result;
- result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, sizeof(buf),
- &sshdr, SR_TIMEOUT, MAX_RETRIES, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, sizeof(buf),
+ SR_TIMEOUT, MAX_RETRIES, &exec_args);
if (scsi_sense_valid(&sshdr) && sshdr.sense_key == UNIT_ATTENTION)
return DISK_EVENT_MEDIA_CHANGE;
@@ -730,8 +733,8 @@ static void get_sectorsize(struct scsi_cd *cd)
memset(buffer, 0, sizeof(buffer));
/* Do the command and wait.. */
- the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
- buffer, sizeof(buffer), NULL,
+ the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN,
+ buffer, sizeof(buffer),
SR_TIMEOUT, MAX_RETRIES, NULL);
retries--;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index fbdb5124d7f7..5b0b35e60e61 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -188,13 +188,15 @@ static int sr_play_trkind(struct cdrom_device_info *cdi,
int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
{
struct scsi_device *SDev;
- struct scsi_sense_hdr local_sshdr, *sshdr = &local_sshdr;
+ struct scsi_sense_hdr local_sshdr, *sshdr;
int result, err = 0, retries = 0;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = cgc->sshdr ? : &local_sshdr,
+ };
SDev = cd->device;
- if (cgc->sshdr)
- sshdr = cgc->sshdr;
+ sshdr = exec_args.sshdr;
retry:
if (!scsi_block_when_processing_errors(SDev)) {
@@ -202,10 +204,11 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
goto out;
}
- result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
- cgc->buffer, cgc->buflen, NULL, sshdr,
- cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
-
+ result = scsi_execute_cmd(SDev, cgc->cmd,
+ cgc->data_direction == DMA_TO_DEVICE ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, cgc->buffer,
+ cgc->buflen, cgc->timeout, IOCTL_RETRIES,
+ &exec_args);
/* Minimal error checking. Ignore cases we know about, and report the rest. */
if (result < 0) {
err = result;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index d07d24c06b54..b221c3c99320 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -347,8 +347,8 @@ static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
memset(inq_result, 0, inq_result_len);
- result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
- inq_result, inquiry_len, NULL,
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
+ inq_result, inquiry_len,
SD_TIMEOUT, SD_MAX_RETRIES, NULL);
if (result == 0 && inq_result[0] >> 5) {
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 69a4c9581e80..e7425549e39c 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -144,8 +144,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
cdb[0] = MODE_SENSE;
cdb[4] = 0x0c; /* 12 bytes */
- ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
- HZ, 1, NULL);
+ ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf, 12, HZ, 1, NULL);
if (ret)
goto out_free;
@@ -195,8 +194,8 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
cdb[2] = 0x80; /* Unit Serial Number */
put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]);
- ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
- INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
+ ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf,
+ INQUIRY_VPD_SERIAL_LEN, HZ, 1, NULL);
if (ret)
goto out_free;
@@ -230,9 +229,8 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
cdb[2] = 0x83; /* Device Identifier */
put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]);
- ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
- INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
- NULL, HZ, 1, NULL);
+ ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf,
+ INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, HZ, 1, NULL);
if (ret)
goto out;
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
index 62f38c5bf857..4d02e0f2de10 100644
--- a/drivers/ufs/core/Makefile
+++ b/drivers/ufs/core/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
new file mode 100644
index 000000000000..31df052fbc41
--- /dev/null
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
+ *
+ * Authors:
+ * Asutosh Das <quic_asutoshd@quicinc.com>
+ * Can Guo <quic_cang@quicinc.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "ufshcd-priv.h"
+
+#define MAX_QUEUE_SUP GENMASK(7, 0)
+#define UFS_MCQ_MIN_RW_QUEUES 2
+#define UFS_MCQ_MIN_READ_QUEUES 0
+#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
+#define UFS_MCQ_MIN_POLL_QUEUES 0
+#define QUEUE_EN_OFFSET 31
+#define QUEUE_ID_OFFSET 16
+
+#define MAX_DEV_CMD_ENTRIES 2
+#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
+#define MCQ_QCFG_SIZE 0x40
+#define MCQ_ENTRY_SIZE_IN_DWORD 8
+#define CQE_UCD_BA GENMASK_ULL(63, 7)
+
+static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops rw_queue_count_ops = {
+ .set = rw_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int rw_queues;
+module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
+MODULE_PARM_DESC(rw_queues,
+ "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
+
+static int read_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops read_queue_count_ops = {
+ .set = read_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int read_queues;
+module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
+MODULE_PARM_DESC(read_queues,
+ "Number of interrupt driven read queues used for read. Default value is 0");
+
+static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops poll_queue_count_ops = {
+ .set = poll_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int poll_queues = 1;
+module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
+MODULE_PARM_DESC(poll_queues,
+ "Number of poll queues used for r/w. Default value is 1");
+
+/**
+ * ufshcd_mcq_config_mac - Set the #Max Activ Cmds.
+ * @hba: per adapter instance
+ * @max_active_cmds: maximum # of active commands to the device at any time.
+ *
+ * The controller won't send more than the max_active_cmds to the device at
+ * any time.
+ */
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
+{
+ u32 val;
+
+ val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
+ val &= ~MCQ_CFG_MAC_MASK;
+ val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
+ ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
+}
+
+/**
+ * ufshcd_mcq_req_to_hwq - find the hardware queue on which the
+ * request would be issued.
+ * @hba: per adapter instance
+ * @req: pointer to the request to be issued
+ *
+ * Returns the hardware queue instance on which the request would
+ * be queued.
+ */
+struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
+ struct request *req)
+{
+ u32 utag = blk_mq_unique_tag(req);
+ u32 hwq = blk_mq_unique_tag_to_hwq(utag);
+
+ /* uhq[0] is used to serve device commands */
+ return &hba->uhq[hwq + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+}
+
+/**
+ * ufshcd_mcq_decide_queue_depth - decide the queue depth
+ * @hba: per adapter instance
+ *
+ * Returns queue-depth on success, non-zero on error
+ *
+ * MAC - Max. Active Command of the Host Controller (HC)
+ * HC wouldn't send more than this commands to the device.
+ * It is mandatory to implement get_hba_mac() to enable MCQ mode.
+ * Calculates and adjusts the queue depth based on the depth
+ * supported by the HC and ufs device.
+ */
+int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
+{
+ int mac;
+
+ /* Mandatory to implement get_hba_mac() */
+ mac = ufshcd_mcq_vops_get_hba_mac(hba);
+ if (mac < 0) {
+ dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
+ return mac;
+ }
+
+ WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
+ /*
+ * max. value of bqueuedepth = 256, mac is host dependent.
+ * It is mandatory for UFS device to define bQueueDepth if
+ * shared queuing architecture is enabled.
+ */
+ return min_t(int, mac, hba->dev_info.bqueuedepth);
+}
+
+static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
+{
+ int i;
+ u32 hba_maxq, rem, tot_queues;
+ struct Scsi_Host *host = hba->host;
+
+ hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities);
+
+ tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
+ rw_queues;
+
+ if (hba_maxq < tot_queues) {
+ dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
+ tot_queues, hba_maxq);
+ return -EOPNOTSUPP;
+ }
+
+ rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES;
+
+ if (rw_queues) {
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
+ rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ rw_queues = num_possible_cpus();
+ }
+
+ if (poll_queues) {
+ hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
+ rem -= hba->nr_queues[HCTX_TYPE_POLL];
+ }
+
+ if (read_queues) {
+ hba->nr_queues[HCTX_TYPE_READ] = read_queues;
+ rem -= hba->nr_queues[HCTX_TYPE_READ];
+ }
+
+ if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
+ num_possible_cpus());
+
+ for (i = 0; i < HCTX_MAX_TYPES; i++)
+ host->nr_hw_queues += hba->nr_queues[i];
+
+ hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES;
+ return 0;
+}
+
+int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ size_t utrdl_size, cqe_size;
+ int i;
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+
+ utrdl_size = sizeof(struct utp_transfer_req_desc) *
+ hwq->max_entries;
+ hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
+ &hwq->sqe_dma_addr,
+ GFP_KERNEL);
+ if (!hwq->sqe_dma_addr) {
+ dev_err(hba->dev, "SQE allocation failed\n");
+ return -ENOMEM;
+ }
+
+ cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
+ hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
+ &hwq->cqe_dma_addr,
+ GFP_KERNEL);
+ if (!hwq->cqe_dma_addr) {
+ dev_err(hba->dev, "CQE allocation failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+
+/* Operation and runtime registers configuration */
+#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
+#define MCQ_OPR_OFFSET_n(p, i) \
+ (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
+
+static void __iomem *mcq_opr_base(struct ufs_hba *hba,
+ enum ufshcd_mcq_opr n, int i)
+{
+ struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n];
+
+ return opr->base + opr->stride * i;
+}
+
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
+{
+ return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
+}
+
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
+{
+ writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
+
+/*
+ * Current MCQ specification doesn't provide a Task Tag or its equivalent in
+ * the Completion Queue Entry. Find the Task Tag using an indirect method.
+ */
+static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq,
+ struct cq_entry *cqe)
+{
+ u64 addr;
+
+ /* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
+ BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
+
+ /* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
+ addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
+ hba->ucdl_dma_addr;
+
+ return div_u64(addr, sizeof(struct utp_transfer_cmd_desc));
+}
+
+static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
+ int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
+
+ ufshcd_compl_one_cqe(hba, tag, cqe);
+}
+
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long completed_reqs = 0;
+
+ ufshcd_mcq_update_cq_tail_slot(hwq);
+ while (!ufshcd_mcq_is_cq_empty(hwq)) {
+ ufshcd_mcq_process_cqe(hba, hwq);
+ ufshcd_mcq_inc_cq_head_slot(hwq);
+ completed_reqs++;
+ }
+
+ if (completed_reqs)
+ ufshcd_mcq_update_cq_head(hwq);
+
+ return completed_reqs;
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
+
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long completed_reqs;
+
+ spin_lock(&hwq->cq_lock);
+ completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ spin_unlock(&hwq->cq_lock);
+
+ return completed_reqs;
+}
+
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ u16 qsize;
+ int i;
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+ hwq->id = i;
+ qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1;
+
+ /* Submission Queue Lower Base Address */
+ ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
+ MCQ_CFG_n(REG_SQLBA, i));
+ /* Submission Queue Upper Base Address */
+ ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
+ MCQ_CFG_n(REG_SQUBA, i));
+ /* Submission Queue Doorbell Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
+ MCQ_CFG_n(REG_SQDAO, i));
+ /* Submission Queue Interrupt Status Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
+ MCQ_CFG_n(REG_SQISAO, i));
+
+ /* Completion Queue Lower Base Address */
+ ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
+ MCQ_CFG_n(REG_CQLBA, i));
+ /* Completion Queue Upper Base Address */
+ ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
+ MCQ_CFG_n(REG_CQUBA, i));
+ /* Completion Queue Doorbell Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
+ MCQ_CFG_n(REG_CQDAO, i));
+ /* Completion Queue Interrupt Status Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
+ MCQ_CFG_n(REG_CQISAO, i));
+
+ /* Save the base addresses for quicker access */
+ hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
+ hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP;
+ hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP;
+ hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP;
+
+ /* Reinitializing is needed upon HC reset */
+ hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0;
+
+ /* Enable Tail Entry Push Status interrupt only for non-poll queues */
+ if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL])
+ writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE);
+
+ /* Completion Queue Enable|Size to Completion Queue Attribute */
+ ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
+ MCQ_CFG_n(REG_CQATTR, i));
+
+ /*
+ * Submission Qeueue Enable|Size|Completion Queue ID to
+ * Submission Queue Attribute
+ */
+ ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
+ (i << QUEUE_ID_OFFSET),
+ MCQ_CFG_n(REG_SQATTR, i));
+ }
+}
+
+void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2,
+ REG_UFS_MEM_CFG);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
+
+void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
+{
+ ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
+ ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi);
+
+int ufshcd_mcq_init(struct ufs_hba *hba)
+{
+ struct Scsi_Host *host = hba->host;
+ struct ufs_hw_queue *hwq;
+ int ret, i;
+
+ ret = ufshcd_mcq_config_nr_queues(hba);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_vops_mcq_config_resource(hba);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_mcq_vops_op_runtime_config(hba);
+ if (ret) {
+ dev_err(hba->dev, "Operation runtime config failed, ret=%d\n",
+ ret);
+ return ret;
+ }
+ hba->uhq = devm_kzalloc(hba->dev,
+ hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
+ GFP_KERNEL);
+ if (!hba->uhq) {
+ dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+ hwq->max_entries = hba->nutrs;
+ spin_lock_init(&hwq->sq_lock);
+ spin_lock_init(&hwq->cq_lock);
+ }
+
+ /* The very first HW queue serves device commands */
+ hba->dev_cmd_queue = &hba->uhq[0];
+ /* Give dev_cmd_queue the minimal number of entries */
+ hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
+
+ host->host_tagset = 1;
+ return 0;
+}
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index b99e3f3dc4ef..0d38e7fa34cc 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -6,6 +6,7 @@
*/
#include <linux/bsg-lib.h>
+#include <linux/dma-mapping.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include "ufs_bsg.h"
@@ -16,31 +17,11 @@ static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
struct utp_upiu_query *qr)
{
int desc_size = be16_to_cpu(qr->length);
- int desc_id = qr->idn;
if (desc_size <= 0)
return -EINVAL;
- ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
- if (!*desc_len)
- return -EINVAL;
-
- *desc_len = min_t(int, *desc_len, desc_size);
-
- return 0;
-}
-
-static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
- unsigned int request_len,
- unsigned int reply_len)
-{
- int min_req_len = sizeof(struct ufs_bsg_request);
- int min_rsp_len = sizeof(struct ufs_bsg_reply);
-
- if (min_req_len > request_len || min_rsp_len > reply_len) {
- dev_err(hba->dev, "not enough space assigned\n");
- return -EINVAL;
- }
+ *desc_len = min_t(int, QUERY_DESC_MAX_SIZE, desc_size);
return 0;
}
@@ -83,23 +64,84 @@ out:
return 0;
}
+static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job)
+{
+ struct ufs_rpmb_request *rpmb_request = job->request;
+ struct ufs_rpmb_reply *rpmb_reply = job->reply;
+ struct bsg_buffer *payload = NULL;
+ enum dma_data_direction dir;
+ struct scatterlist *sg_list = NULL;
+ int rpmb_req_type;
+ int sg_cnt = 0;
+ int ret;
+ int data_len;
+
+ if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en ||
+ !(hba->capabilities & MASK_EHSLUTRD_SUPPORTED))
+ return -EINVAL;
+
+ if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1)
+ return -EINVAL;
+
+ rpmb_req_type = be16_to_cpu(rpmb_request->ehs_req.meta.req_resp_type);
+
+ switch (rpmb_req_type) {
+ case UFS_RPMB_WRITE_KEY:
+ case UFS_RPMB_READ_CNT:
+ case UFS_RPMB_PURGE_ENABLE:
+ dir = DMA_NONE;
+ break;
+ case UFS_RPMB_WRITE:
+ case UFS_RPMB_SEC_CONF_WRITE:
+ dir = DMA_TO_DEVICE;
+ break;
+ case UFS_RPMB_READ:
+ case UFS_RPMB_SEC_CONF_READ:
+ case UFS_RPMB_PURGE_STATUS_READ:
+ dir = DMA_FROM_DEVICE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (dir != DMA_NONE) {
+ payload = &job->request_payload;
+ if (!payload || !payload->payload_len || !payload->sg_cnt)
+ return -EINVAL;
+
+ sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
+ if (unlikely(!sg_cnt))
+ return -ENOMEM;
+ sg_list = payload->sg_list;
+ data_len = payload->payload_len;
+ }
+
+ ret = ufshcd_advanced_rpmb_req_handler(hba, &rpmb_request->bsg_request.upiu_req,
+ &rpmb_reply->bsg_reply.upiu_rsp, &rpmb_request->ehs_req,
+ &rpmb_reply->ehs_rsp, sg_cnt, sg_list, dir);
+
+ if (dir != DMA_NONE) {
+ dma_unmap_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
+
+ if (!ret)
+ rpmb_reply->bsg_reply.reply_payload_rcv_len = data_len;
+ }
+
+ return ret;
+}
+
static int ufs_bsg_request(struct bsg_job *job)
{
struct ufs_bsg_request *bsg_request = job->request;
struct ufs_bsg_reply *bsg_reply = job->reply;
struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
- unsigned int req_len = job->request_len;
- unsigned int reply_len = job->reply_len;
struct uic_command uc = {};
int msgcode;
- uint8_t *desc_buff = NULL;
+ uint8_t *buff = NULL;
int desc_len = 0;
enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
int ret;
-
- ret = ufs_bsg_verify_query_size(hba, req_len, reply_len);
- if (ret)
- goto out;
+ bool rpmb = false;
bsg_reply->reply_payload_rcv_len = 0;
@@ -109,34 +151,39 @@ static int ufs_bsg_request(struct bsg_job *job)
switch (msgcode) {
case UPIU_TRANSACTION_QUERY_REQ:
desc_op = bsg_request->upiu_req.qr.opcode;
- ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
- &desc_len, desc_op);
- if (ret) {
- ufshcd_rpm_put_sync(hba);
+ ret = ufs_bsg_alloc_desc_buffer(hba, job, &buff, &desc_len, desc_op);
+ if (ret)
goto out;
- }
-
fallthrough;
case UPIU_TRANSACTION_NOP_OUT:
case UPIU_TRANSACTION_TASK_REQ:
ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
&bsg_reply->upiu_rsp, msgcode,
- desc_buff, &desc_len, desc_op);
+ buff, &desc_len, desc_op);
if (ret)
- dev_err(hba->dev,
- "exe raw upiu: error code %d\n", ret);
-
+ dev_err(hba->dev, "exe raw upiu: error code %d\n", ret);
+ else if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ buff, desc_len);
+ }
break;
case UPIU_TRANSACTION_UIC_CMD:
memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
ret = ufshcd_send_uic_cmd(hba, &uc);
if (ret)
- dev_err(hba->dev,
- "send uic cmd: error code %d\n", ret);
+ dev_err(hba->dev, "send uic cmd: error code %d\n", ret);
memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
break;
+ case UPIU_TRANSACTION_ARPMB_CMD:
+ rpmb = true;
+ ret = ufs_bsg_exec_advanced_rpmb_req(hba, job);
+ if (ret)
+ dev_err(hba->dev, "ARPMB OP failed: error code %d\n", ret);
+ break;
default:
ret = -ENOTSUPP;
dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
@@ -144,22 +191,11 @@ static int ufs_bsg_request(struct bsg_job *job)
break;
}
- ufshcd_rpm_put_sync(hba);
-
- if (!desc_buff)
- goto out;
-
- if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len)
- bsg_reply->reply_payload_rcv_len =
- sg_copy_from_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- desc_buff, desc_len);
-
- kfree(desc_buff);
-
out:
+ ufshcd_rpm_put_sync(hba);
+ kfree(buff);
bsg_reply->result = ret;
- job->reply_len = sizeof(struct ufs_bsg_reply);
+ job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
/* complete the job here only if no error */
if (ret == 0)
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index a9e8e1f5afe7..529f8507a5e4 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -61,7 +61,24 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-
+void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ struct cq_entry *cqe);
+int ufshcd_mcq_init(struct ufs_hba *hba);
+int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
+int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
+void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
+struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
+ struct request *req);
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
+
+#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
@@ -70,9 +87,6 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_length);
-
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
@@ -226,6 +240,53 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
hba->vops->config_scaling_param(hba, p, data);
}
+static inline void ufshcd_vops_reinit_notify(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->reinit_notify)
+ hba->vops->reinit_notify(hba);
+}
+
+static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->mcq_config_resource)
+ return hba->vops->mcq_config_resource(hba);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->get_hba_mac)
+ return hba->vops->get_hba_mac(hba);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->op_runtime_config)
+ return hba->vops->op_runtime_config(hba);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_vops_get_outstanding_cqs(struct ufs_hba *hba,
+ unsigned long *ocqs)
+{
+ if (hba->vops && hba->vops->get_outstanding_cqs)
+ return hba->vops->get_outstanding_cqs(hba, ocqs);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_mcq_vops_config_esi(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->config_esi)
+ return hba->vops->config_esi(hba);
+
+ return -EOPNOTSUPP;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
@@ -302,4 +363,44 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
}
+static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
+{
+ u32 mask = q->max_entries - 1;
+ u32 val;
+
+ q->sq_tail_slot = (q->sq_tail_slot + 1) & mask;
+ val = q->sq_tail_slot * sizeof(struct utp_transfer_req_desc);
+ writel(val, q->mcq_sq_tail);
+}
+
+static inline void ufshcd_mcq_update_cq_tail_slot(struct ufs_hw_queue *q)
+{
+ u32 val = readl(q->mcq_cq_tail);
+
+ q->cq_tail_slot = val / sizeof(struct cq_entry);
+}
+
+static inline bool ufshcd_mcq_is_cq_empty(struct ufs_hw_queue *q)
+{
+ return q->cq_head_slot == q->cq_tail_slot;
+}
+
+static inline void ufshcd_mcq_inc_cq_head_slot(struct ufs_hw_queue *q)
+{
+ q->cq_head_slot++;
+ if (q->cq_head_slot == q->max_entries)
+ q->cq_head_slot = 0;
+}
+
+static inline void ufshcd_mcq_update_cq_head(struct ufs_hw_queue *q)
+{
+ writel(q->cq_head_slot * sizeof(struct cq_entry), q->mcq_cq_head);
+}
+
+static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
+{
+ struct cq_entry *cqe = q->cqe_base_addr;
+
+ return cqe + q->cq_head_slot;
+}
#endif /* _UFSHCD_PRIV_H_ */
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 3a1c4d31e010..276a82b2e5ee 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -43,6 +43,12 @@
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
+
+#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
+ UFSHCD_ERROR_MASK |\
+ MCQ_CQ_EVENT_STATUS)
+
+
/* UIC command timeout, unit: ms */
#define UIC_CMD_TIMEOUT 500
@@ -56,6 +62,9 @@
/* Query request timeout */
#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+/* Advanced RPMB request timeout */
+#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
+
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
@@ -89,6 +98,33 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
+/* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */
+static bool use_mcq_mode = true;
+
+static bool is_mcq_supported(struct ufs_hba *hba)
+{
+ return hba->mcq_sup && use_mcq_mode;
+}
+
+static int param_set_mcq_mode(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(val, kp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct kernel_param_ops mcq_mode_ops = {
+ .set = param_set_mcq_mode,
+ .get = param_get_bool,
+};
+
+module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644);
+MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -528,7 +564,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
prdt_length = le16_to_cpu(
lrbp->utr_descriptor_ptr->prd_table_length);
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
- prdt_length /= sizeof(struct ufshcd_sg_entry);
+ prdt_length /= ufshcd_sg_entry_size(hba);
dev_err(hba->dev,
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
@@ -537,7 +573,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
if (pr_prdt)
ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
- sizeof(struct ufshcd_sg_entry) * prdt_length);
+ ufshcd_sg_entry_size(hba) * prdt_length);
}
}
@@ -740,12 +776,17 @@ static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
/**
* ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
* @lrbp: pointer to local command reference block
+ * @cqe: pointer to the completion queue entry
*
* This function is used to get the OCS field from UTRD
* Returns the OCS field in the UTRD
*/
-static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
+ struct cq_entry *cqe)
{
+ if (cqe)
+ return le32_to_cpu(cqe->status) & MASK_OCS;
+
return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
}
@@ -1121,6 +1162,12 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
return pending;
}
+/*
+ * Wait until all pending SCSI commands and TMFs have finished or the timeout
+ * has expired.
+ *
+ * Return: 0 upon success; -EBUSY upon timeout.
+ */
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
@@ -1154,7 +1201,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- schedule();
+ io_schedule_timeout(msecs_to_jiffies(20));
if (ktime_to_us(ktime_sub(ktime_get(), start)) >
wait_timeout_us) {
timeout = true;
@@ -1225,9 +1272,14 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
return ret;
}
-static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+/*
+ * Wait until all pending SCSI commands and TMFs have finished or the timeout
+ * has expired.
+ *
+ * Return: 0 upon success; -EBUSY upon timeout.
+ */
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
{
- #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
int ret = 0;
/*
* make sure that there are no outstanding requests when
@@ -1238,7 +1290,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
- ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
@@ -1280,7 +1332,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
- ret = ufshcd_clock_scaling_prepare(hba);
+ ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret)
return ret;
@@ -2136,9 +2188,11 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
* @task_tag: Task tag of the command
+ * @hwq: pointer to hardware queue instance
*/
static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
+ struct ufs_hw_queue *hwq)
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
unsigned long flags;
@@ -2152,12 +2206,24 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp);
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- if (hba->vops && hba->vops->setup_xfer_req)
- hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
- __set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ if (is_mcq_enabled(hba)) {
+ int utrd_size = sizeof(struct utp_transfer_req_desc);
+
+ spin_lock(&hwq->sq_lock);
+ memcpy(hwq->sqe_base_addr + (hwq->sq_tail_slot * utrd_size),
+ lrbp->utr_descriptor_ptr, utrd_size);
+ ufshcd_inc_sq_tail(hwq);
+ spin_unlock(&hwq->sq_lock);
+ } else {
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ if (hba->vops && hba->vops->setup_xfer_req)
+ hba->vops->setup_xfer_req(hba, lrbp->task_tag,
+ !!lrbp->cmd);
+ __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << lrbp->task_tag,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ }
}
/**
@@ -2245,6 +2311,14 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
if (err)
dev_err(hba->dev, "crypto setup failed\n");
+ hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
+ if (!hba->mcq_sup)
+ return err;
+
+ hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
+ hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
+ hba->mcq_capabilities);
+
return err;
}
@@ -2397,38 +2471,30 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
}
/**
- * ufshcd_map_sg - Map scatter-gather list to prdt
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- *
- * Returns 0 in case of success, non-zero value in case of failure
+ * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
+ * @hba: per-adapter instance
+ * @lrbp: pointer to local reference block
+ * @sg_entries: The number of sg lists actually used
+ * @sg_list: Pointer to SG list
*/
-static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
+ struct scatterlist *sg_list)
{
- struct ufshcd_sg_entry *prd_table;
+ struct ufshcd_sg_entry *prd;
struct scatterlist *sg;
- struct scsi_cmnd *cmd;
- int sg_segments;
int i;
- cmd = lrbp->cmd;
- sg_segments = scsi_dma_map(cmd);
- if (sg_segments < 0)
- return sg_segments;
-
- if (sg_segments) {
+ if (sg_entries) {
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((sg_segments *
- sizeof(struct ufshcd_sg_entry)));
+ cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
else
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16(sg_segments);
+ lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
- prd_table = lrbp->ucd_prdt_ptr;
+ prd = lrbp->ucd_prdt_ptr;
- scsi_for_each_sg(cmd, sg, sg_segments, i) {
+ for_each_sg(sg_list, sg, sg_entries, i) {
const unsigned int len = sg_dma_len(sg);
/*
@@ -2440,13 +2506,32 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* indicates 4 bytes, '7' indicates 8 bytes, etc."
*/
WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
- prd_table[i].size = cpu_to_le32(len - 1);
- prd_table[i].addr = cpu_to_le64(sg->dma_address);
- prd_table[i].reserved = 0;
+ prd->size = cpu_to_le32(len - 1);
+ prd->addr = cpu_to_le64(sg->dma_address);
+ prd->reserved = 0;
+ prd = (void *)prd + ufshcd_sg_entry_size(hba);
}
} else {
lrbp->utr_descriptor_ptr->prd_table_length = 0;
}
+}
+
+/**
+ * ufshcd_map_sg - Map scatter-gather list to prdt
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ *
+ * Returns 0 in case of success, non-zero value in case of failure
+ */
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct scsi_cmnd *cmd = lrbp->cmd;
+ int sg_segments = scsi_dma_map(cmd);
+
+ if (sg_segments < 0)
+ return sg_segments;
+
+ ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
return 0;
}
@@ -2494,14 +2579,15 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
}
/**
- * ufshcd_prepare_req_desc_hdr() - Fills the requests header
+ * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
* descriptor according to request
* @lrbp: pointer to local reference block
* @upiu_flags: flags required in the header
* @cmd_dir: requests data direction
+ * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
*/
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
- u8 *upiu_flags, enum dma_data_direction cmd_dir)
+static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
+ enum dma_data_direction cmd_dir, int ehs_length)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
u32 data_direction;
@@ -2520,8 +2606,8 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
*upiu_flags = UPIU_CMD_FLAGS_NONE;
}
- dword_0 = data_direction | (lrbp->command_type
- << UPIU_COMMAND_TYPE_OFFSET);
+ dword_0 = data_direction | (lrbp->command_type << UPIU_COMMAND_TYPE_OFFSET) |
+ ehs_length << 8;
if (lrbp->intr_cmd)
dword_0 |= UTP_REQ_DESC_INT_CMD;
@@ -2576,8 +2662,7 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
}
/**
- * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
- * for query requsts
+ * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
* @hba: UFS hba
* @lrbp: local reference block pointer
* @upiu_flags: flags
@@ -2648,7 +2733,7 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
else
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
@@ -2676,8 +2761,7 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
if (likely(lrbp->cmd)) {
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
- lrbp->cmd->sc_data_direction);
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
} else {
ret = -EINVAL;
@@ -2709,33 +2793,38 @@ static inline bool is_device_wlun(struct scsi_device *sdev)
*/
static void ufshcd_map_queues(struct Scsi_Host *shost)
{
- int i;
+ struct ufs_hba *hba = shost_priv(shost);
+ int i, queue_offset = 0;
+
+ if (!is_mcq_supported(hba)) {
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
+ hba->nr_queues[HCTX_TYPE_READ] = 0;
+ hba->nr_queues[HCTX_TYPE_POLL] = 1;
+ hba->nr_hw_queues = 1;
+ }
for (i = 0; i < shost->nr_maps; i++) {
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
- switch (i) {
- case HCTX_TYPE_DEFAULT:
- case HCTX_TYPE_POLL:
- map->nr_queues = 1;
- break;
- case HCTX_TYPE_READ:
- map->nr_queues = 0;
+ map->nr_queues = hba->nr_queues[i];
+ if (!map->nr_queues)
continue;
- default:
- WARN_ON_ONCE(true);
- }
- map->queue_offset = 0;
+ map->queue_offset = queue_offset;
+ if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
+ map->queue_offset = 0;
+
blk_mq_map_queues(map);
+ queue_offset += map->nr_queues;
}
}
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
- struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
+ struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
+ i * sizeof_utp_transfer_cmd_desc(hba);
struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
- i * sizeof(struct utp_transfer_cmd_desc);
+ i * sizeof_utp_transfer_cmd_desc(hba);
u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
response_upiu);
u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
@@ -2743,11 +2832,11 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
lrb->utr_descriptor_ptr = utrdlp + i;
lrb->utrd_dma_addr = hba->utrdl_dma_addr +
i * sizeof(struct utp_transfer_req_desc);
- lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
+ lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
lrb->ucd_req_dma_addr = cmd_desc_element_addr;
- lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
- lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
+ lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
}
@@ -2764,6 +2853,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp;
int err = 0;
+ struct ufs_hw_queue *hwq = NULL;
WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
@@ -2848,7 +2938,10 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
- ufshcd_send_command(hba, tag);
+ if (is_mcq_enabled(hba))
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+
+ ufshcd_send_command(hba, tag, hwq);
out:
rcu_read_unlock();
@@ -2943,6 +3036,12 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
__func__);
break;
+ case UPIU_TRANSACTION_RESPONSE:
+ if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
+ }
+ break;
default:
err = -EINVAL;
dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
@@ -2972,7 +3071,7 @@ retry:
* not trigger any race conditions.
*/
hba->dev_cmd.complete = NULL;
- err = ufshcd_get_tr_ocs(lrbp);
+ err = ufshcd_get_tr_ocs(lrbp, hba->dev_cmd.cqe);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
} else {
@@ -3008,6 +3107,22 @@ retry:
} else {
dev_err(hba->dev, "%s: failed to clear tag %d\n",
__func__, lrbp->task_tag);
+
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ pending = test_bit(lrbp->task_tag,
+ &hba->outstanding_reqs);
+ if (pending)
+ hba->dev_cmd.complete = NULL;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (!pending) {
+ /*
+ * The completion handler ran while we tried to
+ * clear the command.
+ */
+ time_left = 1;
+ goto retry;
+ }
}
}
@@ -3043,10 +3158,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
goto out;
hba->dev_cmd.complete = &wait;
+ hba->dev_cmd.cqe = NULL;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- ufshcd_send_command(hba, tag);
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
@@ -3367,37 +3483,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
}
/**
- * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
- * @hba: Pointer to adapter instance
- * @desc_id: descriptor idn value
- * @desc_len: mapped desc length (out)
- */
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_len)
-{
- if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
- desc_id == QUERY_DESC_IDN_RFU_1)
- *desc_len = 0;
- else
- *desc_len = hba->desc_size[desc_id];
-}
-EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
-
-static void ufshcd_update_desc_length(struct ufs_hba *hba,
- enum desc_idn desc_id, int desc_index,
- unsigned char desc_len)
-{
- if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
- desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
- /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
- * than the RPMB unit, however, both descriptors share the same
- * desc_idn, to cover both unit descriptors with one length, we
- * choose the normal unit descriptor length by desc_index.
- */
- hba->desc_size[desc_id] = desc_len;
-}
-
-/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
* @desc_id: descriptor idn value
@@ -3417,26 +3502,13 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
{
int ret;
u8 *desc_buf;
- int buff_len;
+ int buff_len = QUERY_DESC_MAX_SIZE;
bool is_kmalloc = true;
/* Safety check */
if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
return -EINVAL;
- /* Get the length of descriptor */
- ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
- if (!buff_len) {
- dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
- return -EINVAL;
- }
-
- if (param_offset >= buff_len) {
- dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
- __func__, param_offset, desc_id, buff_len);
- return -EINVAL;
- }
-
/* Check whether we need temp memory */
if (param_offset != 0 || param_size < buff_len) {
desc_buf = kzalloc(buff_len, GFP_KERNEL);
@@ -3449,15 +3521,24 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
/* Request for full descriptor */
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0,
- desc_buf, &buff_len);
-
+ desc_id, desc_index, 0,
+ desc_buf, &buff_len);
if (ret) {
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
__func__, desc_id, desc_index, param_offset, ret);
goto out;
}
+ /* Update descriptor length */
+ buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
+
+ if (param_offset >= buff_len) {
+ dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
+ __func__, param_offset, desc_id, buff_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
/* Sanity check */
if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
@@ -3466,10 +3547,6 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
goto out;
}
- /* Update descriptor length */
- buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
- ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
-
if (is_kmalloc) {
/* Make sure we don't copy more data than available */
if (param_offset >= buff_len)
@@ -3656,7 +3733,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
size_t utmrdl_size, utrdl_size, ucdl_size;
/* Allocate memory for UTP command descriptors */
- ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
ucdl_size,
&hba->ucdl_dma_addr,
@@ -3664,12 +3741,9 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/*
* UFSHCI requires UTP command descriptor to be 128 byte aligned.
- * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
- * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
- * be aligned to 128 bytes as well
*/
if (!hba->ucdl_base_addr ||
- WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
+ WARN_ON(hba->ucdl_dma_addr & (128 - 1))) {
dev_err(hba->dev,
"Command Descriptor Memory allocation failed\n");
goto out;
@@ -3685,13 +3759,21 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utrdl_dma_addr,
GFP_KERNEL);
if (!hba->utrdl_base_addr ||
- WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
+ WARN_ON(hba->utrdl_dma_addr & (1024 - 1))) {
dev_err(hba->dev,
"Transfer Descriptor Memory allocation failed\n");
goto out;
}
/*
+ * Skip utmrdl allocation; it may have been
+ * allocated during first pass and not released during
+ * MCQ memory allocation.
+ * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
+ */
+ if (hba->utmrdl_base_addr)
+ goto skip_utmrdl;
+ /*
* Allocate memory for UTP Task Management descriptors
* UFSHCI requires 1024 byte alignment of UTMRD
*/
@@ -3701,12 +3783,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utmrdl_dma_addr,
GFP_KERNEL);
if (!hba->utmrdl_base_addr ||
- WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
+ WARN_ON(hba->utmrdl_dma_addr & (1024 - 1))) {
dev_err(hba->dev,
"Task Management Descriptor Memory allocation failed\n");
goto out;
}
+skip_utmrdl:
/* Allocate memory for local reference block */
hba->lrb = devm_kcalloc(hba->dev,
hba->nutrs, sizeof(struct ufshcd_lrb),
@@ -3750,7 +3833,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
prdt_offset =
offsetof(struct utp_transfer_cmd_desc, prd_table);
- cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+ cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
cmd_desc_dma_addr = hba->ucdl_dma_addr;
for (i = 0; i < hba->nutrs; i++) {
@@ -4907,7 +4990,7 @@ static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
*/
static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
{
- int len = hba->desc_size[QUERY_DESC_IDN_UNIT];
+ int len = QUERY_DESC_MAX_SIZE;
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
u8 lun_qdepth = hba->nutrs;
u8 *desc_buf;
@@ -4942,6 +5025,12 @@ static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
hba->dev_info.is_lu_power_on_wp = true;
+ /* In case of RPMB LU, check if advanced RPMB mode is enabled */
+ if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
+ desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
+ hba->dev_info.b_advanced_rpmb_en = true;
+
+
kfree(desc_buf);
set_qdepth:
/*
@@ -5030,8 +5119,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
ufshcd_hpb_configure(hba, sdev);
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
- if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
- blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
+ if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
+ blk_queue_update_dma_alignment(q, 4096 - 1);
/*
* Block runtime-pm until all consumers are added.
* Refer ufshcd_setup_links().
@@ -5130,18 +5219,20 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
* ufshcd_transfer_rsp_status - Get overall status of the response
* @hba: per adapter instance
* @lrbp: pointer to local reference block of completed command
+ * @cqe: pointer to the completion queue entry
*
* Returns result of the command to notify SCSI midlayer
*/
static inline int
-ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ struct cq_entry *cqe)
{
int result = 0;
int scsi_status;
enum utp_ocs ocs;
/* overall command status of utrd */
- ocs = ufshcd_get_tr_ocs(lrbp);
+ ocs = ufshcd_get_tr_ocs(lrbp, cqe);
if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
@@ -5306,42 +5397,53 @@ static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
}
/**
- * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * ufshcd_compl_one_cqe - handle a completion queue entry
* @hba: per adapter instance
- * @completed_reqs: bitmask that indicates which requests to complete
+ * @task_tag: the task tag of the request to be completed
+ * @cqe: pointer to the completion queue entry
*/
-static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
- unsigned long completed_reqs)
+void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ struct cq_entry *cqe)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
- int index;
-
- for_each_set_bit(index, &completed_reqs, hba->nutrs) {
- lrbp = &hba->lrb[index];
- lrbp->compl_time_stamp = ktime_get();
- lrbp->compl_time_stamp_local_clock = local_clock();
- cmd = lrbp->cmd;
- if (cmd) {
- if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
- ufshcd_update_monitor(hba, lrbp);
- ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
- ufshcd_release_scsi_cmd(hba, lrbp);
- /* Do not touch lrbp after scsi done */
- scsi_done(cmd);
- } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
- lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
- if (hba->dev_cmd.complete) {
- ufshcd_add_command_trace(hba, index,
- UFS_DEV_COMP);
- complete(hba->dev_cmd.complete);
- ufshcd_clk_scaling_update_busy(hba);
- }
+
+ lrbp = &hba->lrb[task_tag];
+ lrbp->compl_time_stamp = ktime_get();
+ cmd = lrbp->cmd;
+ if (cmd) {
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+ ufshcd_update_monitor(hba, lrbp);
+ ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
+ cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
+ ufshcd_release_scsi_cmd(hba, lrbp);
+ /* Do not touch lrbp after scsi done */
+ scsi_done(cmd);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
+ lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+ if (hba->dev_cmd.complete) {
+ hba->dev_cmd.cqe = cqe;
+ ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP);
+ complete(hba->dev_cmd.complete);
+ ufshcd_clk_scaling_update_busy(hba);
}
}
}
+/**
+ * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ * @completed_reqs: bitmask that indicates which requests to complete
+ */
+static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ unsigned long completed_reqs)
+{
+ int tag;
+
+ for_each_set_bit(tag, &completed_reqs, hba->nutrs)
+ ufshcd_compl_one_cqe(hba, tag, NULL);
+}
+
/* Any value that is not an existing queue number is fine for this constant. */
enum {
UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
@@ -5371,6 +5473,13 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct ufs_hba *hba = shost_priv(shost);
unsigned long completed_reqs, flags;
u32 tr_doorbell;
+ struct ufs_hw_queue *hwq;
+
+ if (is_mcq_enabled(hba)) {
+ hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+
+ return ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ }
spin_lock_irqsave(&hba->outstanding_lock, flags);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -6604,6 +6713,40 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
}
/**
+ * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
+ * @hba: per adapter instance
+ *
+ * Returns IRQ_HANDLED if interrupt is handled
+ */
+static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ unsigned long outstanding_cqs;
+ unsigned int nr_queues;
+ int i, ret;
+ u32 events;
+
+ ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
+ if (ret)
+ outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
+
+ /* Exclude the poll queues */
+ nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ for_each_set_bit(i, &outstanding_cqs, nr_queues) {
+ hwq = &hba->uhq[i];
+
+ events = ufshcd_mcq_read_cqis(hba, i);
+ if (events)
+ ufshcd_mcq_write_cqis(hba, events, i);
+
+ if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
+ ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
@@ -6628,6 +6771,9 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
if (intr_status & UTP_TRANSFER_REQ_COMPL)
retval |= ufshcd_transfer_req_compl(hba);
+ if (intr_status & MCQ_CQ_EVENT_STATUS)
+ retval |= ufshcd_handle_mcq_cq_events(hba);
+
return retval;
}
@@ -6876,7 +7022,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
/* update the task tag in the request upiu */
req_upiu->header.dword_0 |= cpu_to_be32(tag);
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
/* just copy the upiu request as it is */
memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
@@ -6895,7 +7041,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- ufshcd_send_command(hba, tag);
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
/*
* ignore the returning value here - ufshcd_check_query_response is
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
@@ -7000,6 +7146,100 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
}
/**
+ * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
+ * @hba: per adapter instance
+ * @req_upiu: upiu request
+ * @rsp_upiu: upiu reply
+ * @req_ehs: EHS field which contains Advanced RPMB Request Message
+ * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
+ * @sg_cnt: The number of sg lists actually used
+ * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
+ * @dir: DMA direction
+ *
+ * Returns zero on success, non-zero on failure
+ */
+int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
+ struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
+ enum dma_data_direction dir)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ const u32 tag = hba->reserved_slot;
+ struct ufshcd_lrb *lrbp;
+ int err = 0;
+ int result;
+ u8 upiu_flags;
+ u8 *ehs_data;
+ u16 ehs_len;
+
+ /* Protects use of hba->reserved_slot. */
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ down_read(&hba->clk_scaling_lock);
+
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+ lrbp->cmd = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = UFS_UPIU_RPMB_WLUN;
+
+ lrbp->intr_cmd = true;
+ ufshcd_prepare_lrbp_crypto(NULL, lrbp);
+ hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
+
+ /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
+
+ /* update the task tag and LUN in the request upiu */
+ req_upiu->header.dword_0 |= cpu_to_be32(upiu_flags << 16 | UFS_UPIU_RPMB_WLUN << 8 | tag);
+
+ /* copy the UPIU(contains CDB) request as it is */
+ memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
+ /* Copy EHS, starting with byte32, immediately after the CDB package */
+ memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
+
+ if (dir != DMA_NONE && sg_list)
+ ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
+ hba->dev_cmd.complete = &wait;
+
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
+
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
+
+ if (!err) {
+ /* Just copy the upiu response as it is */
+ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
+ /* Get the response UPIU result */
+ result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+ ehs_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) >> 24;
+ /*
+ * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
+ * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
+ * Message is 02h
+ */
+ if (ehs_len == 2 && rsp_ehs) {
+ /*
+ * ucd_rsp_ptr points to a buffer with a length of 512 bytes
+ * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
+ */
+ ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
+ memcpy(rsp_ehs, ehs_data, ehs_len * 32);
+ }
+ }
+
+ up_read(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+ return err ? : result;
+}
+
+/**
* ufshcd_eh_device_reset_handler() - Reset a single logical unit.
* @cmd: SCSI command pointer
*
@@ -7441,12 +7681,11 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
* In case regulators are not initialized we'll return 0
* @hba: per-adapter instance
* @desc_buf: power descriptor buffer to extract ICC levels from.
- * @len: length of desc_buff
*
* Returns calculated ICC level
*/
static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
- const u8 *desc_buf, int len)
+ const u8 *desc_buf)
{
u32 icc_level = 0;
@@ -7488,25 +7727,23 @@ out:
static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
{
int ret;
- int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
u8 *desc_buf;
u32 icc_level;
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf)
return;
ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
- desc_buf, buff_len);
+ desc_buf, QUERY_DESC_MAX_SIZE);
if (ret) {
dev_err(hba->dev,
- "%s: Failed reading power descriptor.len = %d ret = %d",
- __func__, buff_len, ret);
+ "%s: Failed reading power descriptor ret = %d",
+ __func__, ret);
goto out;
}
- icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
- buff_len);
+ icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
@@ -7616,10 +7853,6 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
(hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
goto wb_disabled;
- if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
- goto wb_disabled;
-
ext_ufs_feature = get_unaligned_be32(desc_buf +
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
@@ -7690,6 +7923,31 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
+static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u32 ext_ufs_feature;
+ u32 ext_iid_en = 0;
+ int err;
+
+ /* Only UFS-4.0 and above may support EXT_IID */
+ if (dev_info->wspecversion < 0x400)
+ goto out;
+
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
+ goto out;
+
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
+ if (err)
+ dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
+
+out:
+ dev_info->b_ext_iid_en = ext_iid_en;
+}
+
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
const struct ufs_dev_quirk *fixups)
{
@@ -7727,14 +7985,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
u8 *desc_buf;
struct ufs_dev_info *dev_info = &hba->dev_info;
- desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
+ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
err = -ENOMEM;
goto out;
}
err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
- hba->desc_size[QUERY_DESC_IDN_DEVICE]);
+ QUERY_DESC_MAX_SIZE);
if (err) {
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
__func__, err);
@@ -7751,6 +8009,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
/* getting Specification Version in big endian format */
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+ dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
@@ -7788,6 +8047,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_temp_notif_probe(hba, desc_buf);
+ if (hba->ext_iid_sup)
+ ufshcd_ext_iid_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -7981,18 +8243,16 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
{
int err;
- size_t buff_len;
u8 *desc_buf;
- buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
err = -ENOMEM;
goto out;
}
err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
- desc_buf, buff_len);
+ desc_buf, QUERY_DESC_MAX_SIZE);
if (err) {
dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
__func__, err);
@@ -8004,7 +8264,7 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
- if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
+ if (desc_buf[QUERY_DESC_LENGTH_OFFSET] >=
GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
ufshpb_get_geo_info(hba, desc_buf);
@@ -8089,11 +8349,7 @@ out:
static int ufshcd_device_params_init(struct ufs_hba *hba)
{
bool flag;
- int ret, i;
-
- /* Init device descriptor sizes */
- for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
- hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
+ int ret;
/* Init UFS geometry descriptor related parameters */
ret = ufshcd_device_geo_params_init(hba);
@@ -8161,27 +8417,96 @@ out:
return ret;
}
-/**
- * ufshcd_probe_hba - probe hba to detect device and initialize it
- * @hba: per-adapter instance
- * @init_dev_params: whether or not to call ufshcd_device_params_init().
- *
- * Execute link-startup and verify device initialization
- */
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+/* SDB - Single Doorbell */
+static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
+{
+ size_t ucdl_size, utrdl_size;
+
+ ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
+ dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
+ hba->ucdl_dma_addr);
+
+ utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
+ dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
+ hba->utrdl_dma_addr);
+
+ devm_kfree(hba->dev, hba->lrb);
+}
+
+static int ufshcd_alloc_mcq(struct ufs_hba *hba)
{
int ret;
- unsigned long flags;
- ktime_t start = ktime_get();
+ int old_nutrs = hba->nutrs;
+
+ ret = ufshcd_mcq_decide_queue_depth(hba);
+ if (ret < 0)
+ return ret;
+
+ hba->nutrs = ret;
+ ret = ufshcd_mcq_init(hba);
+ if (ret)
+ goto err;
+
+ /*
+ * Previously allocated memory for nutrs may not be enough in MCQ mode.
+ * Number of supported tags in MCQ mode may be larger than SDB mode.
+ */
+ if (hba->nutrs != old_nutrs) {
+ ufshcd_release_sdb_queue(hba, old_nutrs);
+ ret = ufshcd_memory_alloc(hba);
+ if (ret)
+ goto err;
+ ufshcd_host_memory_configure(hba);
+ }
+
+ ret = ufshcd_mcq_memory_alloc(hba);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ hba->nutrs = old_nutrs;
+ return ret;
+}
+
+static void ufshcd_config_mcq(struct ufs_hba *hba)
+{
+ int ret;
+
+ ret = ufshcd_mcq_vops_config_esi(hba);
+ dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
+
+ ufshcd_enable_intr(hba, UFSHCD_ENABLE_MCQ_INTRS);
+ ufshcd_mcq_make_queues_operational(hba);
+ ufshcd_mcq_config_mac(hba, hba->nutrs);
+
+ hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
+ hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
+
+ /* Select MCQ mode */
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
+ REG_UFS_MEM_CFG);
+ hba->mcq_enabled = true;
+
+ dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
+ hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
+ hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
+ hba->nutrs);
+}
+
+static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
+{
+ int ret;
+ struct Scsi_Host *host = hba->host;
hba->ufshcd_state = UFSHCD_STATE_RESET;
ret = ufshcd_link_startup(hba);
if (ret)
- goto out;
+ return ret;
if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
- goto out;
+ return ret;
/* Debug counters initialization */
ufshcd_clear_dbg_ufs_stats(hba);
@@ -8189,15 +8514,19 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
/* UniPro link is active now */
ufshcd_set_link_active(hba);
+ /* Reconfigure MCQ upon reset */
+ if (is_mcq_enabled(hba) && !init_dev_params)
+ ufshcd_config_mcq(hba);
+
/* Verify device initialization by sending NOP OUT UPIU */
ret = ufshcd_verify_dev_init(hba);
if (ret)
- goto out;
+ return ret;
/* Initiate UFS initialization, and waiting until completion */
ret = ufshcd_complete_dev_init(hba);
if (ret)
- goto out;
+ return ret;
/*
* Initialize UFS device parameters used by driver, these
@@ -8206,7 +8535,25 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (init_dev_params) {
ret = ufshcd_device_params_init(hba);
if (ret)
- goto out;
+ return ret;
+ if (is_mcq_supported(hba) && !hba->scsi_host_added) {
+ ret = ufshcd_alloc_mcq(hba);
+ if (ret) {
+ /* Continue with SDB mode */
+ use_mcq_mode = false;
+ dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
+ ret);
+ }
+ ret = scsi_add_host(host, hba->dev);
+ if (ret) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ return ret;
+ }
+ hba->scsi_host_added = true;
+ }
+ /* MCQ may be disabled if ufshcd_alloc_mcq() fails */
+ if (is_mcq_supported(hba) && use_mcq_mode)
+ ufshcd_config_mcq(hba);
}
ufshcd_tune_unipro_params(hba);
@@ -8227,11 +8574,51 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_probe_hba - probe hba to detect device and initialize it
+ * @hba: per-adapter instance
+ * @init_dev_params: whether or not to call ufshcd_device_params_init().
+ *
+ * Execute link-startup and verify device initialization
+ */
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+{
+ ktime_t start = ktime_get();
+ unsigned long flags;
+ int ret;
+
+ ret = ufshcd_device_init(hba, init_dev_params);
+ if (ret)
+ goto out;
+
+ if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
+ /* Reset the device and controller before doing reinit */
+ ufshcd_device_reset(hba);
+ ufshcd_hba_stop(hba);
+ ufshcd_vops_reinit_notify(hba);
+ ret = ufshcd_hba_enable(hba);
+ if (ret) {
+ dev_err(hba->dev, "Host controller enable failed\n");
+ ufshcd_print_evt_hist(hba);
+ ufshcd_print_host_state(hba);
goto out;
}
- ufshcd_print_pwr_info(hba);
+
+ /* Reinit the device */
+ ret = ufshcd_device_init(hba, init_dev_params);
+ if (ret)
+ goto out;
}
+ ufshcd_print_pwr_info(hba);
+
/*
* bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
* and for removable UFS card as well, hence always set the parameter.
@@ -8359,7 +8746,6 @@ static struct scsi_host_template ufshcd_driver_template = {
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
- .dma_boundary = PAGE_SIZE - 1,
.rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
};
@@ -9453,6 +9839,7 @@ static int ufshcd_resume(struct ufs_hba *hba)
/* enable the host irq as host controller would be active soon */
ufshcd_enable_irq(hba);
+
goto out;
disable_vreg:
@@ -9616,6 +10003,56 @@ void ufshcd_remove(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
+#ifdef CONFIG_PM_SLEEP
+int ufshcd_system_freeze(struct device *dev)
+{
+
+ return ufshcd_system_suspend(dev);
+
+}
+EXPORT_SYMBOL_GPL(ufshcd_system_freeze);
+
+int ufshcd_system_restore(struct device *dev)
+{
+
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ufshcd_system_resume(dev);
+ if (ret)
+ return ret;
+
+ /* Configure UTRL and UTMRL base address registers */
+ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+ /*
+ * Make sure that UTRL and UTMRL base address registers
+ * are updated with the latest queue addresses. Only after
+ * updating these addresses, we can queue the new commands.
+ */
+ mb();
+
+ /* Resuming from hibernate, assume that link was OFF */
+ ufshcd_set_link_off(hba);
+
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(ufshcd_system_restore);
+
+int ufshcd_system_thaw(struct device *dev)
+{
+ return ufshcd_system_resume(dev);
+}
+EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
+#endif /* CONFIG_PM_SLEEP */
+
/**
* ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
* @hba: pointer to Host Bus Adapter (HBA)
@@ -9674,6 +10111,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
hba->dev = dev;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
hba->nop_out_timeout = NOP_OUT_TIMEOUT;
+ ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
INIT_LIST_HEAD(&hba->clk_list_head);
spin_lock_init(&hba->outstanding_lock);
@@ -9823,10 +10261,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->is_irq_enabled = true;
}
- err = scsi_add_host(host, hba->dev);
- if (err) {
- dev_err(hba->dev, "scsi_add_host failed\n");
- goto out_disable;
+ if (!is_mcq_supported(hba)) {
+ err = scsi_add_host(host, hba->dev);
+ if (err) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ goto out_disable;
+ }
}
hba->tmf_tag_set = (struct blk_mq_tag_set) {
@@ -10054,11 +10494,6 @@ static int __init ufshcd_core_init(void)
{
int ret;
- /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
- static_assert(sizeof(struct utp_transfer_cmd_desc) ==
- 2 * ALIGNED_UPIU_SIZE +
- SG_ALL * sizeof(struct ufshcd_sg_entry));
-
ufs_debugfs_init();
ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
index 994f4ac9df5a..a46a7666c891 100644
--- a/drivers/ufs/core/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -2382,12 +2382,10 @@ static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
{
u16 max_active_rgns;
u8 lu_enable;
- int size;
+ int size = QUERY_DESC_MAX_SIZE;
int ret;
char desc_buf[QUERY_DESC_MAX_SIZE];
- ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
-
ufshcd_rpm_get_sync(hba);
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
QUERY_DESC_IDN_UNIT, lun, 0,
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
index 4cc2dbd79ed0..663881437921 100644
--- a/drivers/ufs/host/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -57,8 +57,9 @@ config SCSI_UFS_DWC_TC_PLATFORM
config SCSI_UFS_QCOM
tristate "QCOM specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
+ depends on GENERIC_MSI_IRQ
+ depends on RESET_CONTROLLER
select QCOM_SCM if SCSI_UFS_CRYPTO
- select RESET_CONTROLLER
help
This selects the QCOM specific additions to UFSHCD platform driver.
UFS host on QCOM needs some vendor specific configuration before
@@ -124,3 +125,19 @@ config SCSI_UFS_EXYNOS
Select this if you have UFS host controller on Samsung Exynos SoC.
If unsure, say N.
+
+config SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
+ bool
+ default y if SCSI_UFS_EXYNOS && SCSI_UFS_CRYPTO
+
+config SCSI_UFS_SPRD
+ tristate "Unisoc specific hooks to UFS controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM && (ARCH_SPRD || COMPILE_TEST)
+ help
+ This selects the Unisoc specific additions to UFSHCD platform driver.
+ UFS host on Unisoc needs some vendor specific configuration before
+ accessing the hardware which includes PHY configuration and vendor
+ specific registers.
+
+ Select this if you have UFS controller on Unisoc chipset.
+ If unsure, say N.
diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile
index 7717ca93e7d5..d7c5bf7fa512 100644
--- a/drivers/ufs/host/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -12,4 +12,5 @@ obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
+obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index c3628a8645a5..7c985fc38db1 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -1300,6 +1300,14 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
+ /*
+ * The maximum segment size must be set after scsi_host_alloc()
+ * has been called and before LUN scanning starts
+ * (ufshcd_async_scan()). Note: this callback may also be called
+ * from other functions than ufshcd_init().
+ */
+ hba->host->max_segment_size = 4096;
+
if (ufs->drv_data->pre_hce_enable) {
ret = ufs->drv_data->pre_hce_enable(ufs);
if (ret)
@@ -1673,7 +1681,7 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
- UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE,
+ UFSHCD_QUIRK_4KB_DMA_ALIGNMENT,
.opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 8ad1415e10b6..34fc453f3eb1 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -22,8 +22,11 @@
#include <ufs/ufshci.h>
#include <ufs/ufs_quirks.h>
-#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
- (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+#define MCQ_QCFGPTR_MASK GENMASK(7, 0)
+#define MCQ_QCFGPTR_UNIT 0x200
+#define MCQ_SQATTR_OFFSET(c) \
+ ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
+#define MCQ_QCFG_SIZE 0x40
enum {
TSTBUS_UAWM,
@@ -52,12 +55,6 @@ static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
return container_of(rcd, struct ufs_qcom_host, rcdev);
}
-static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
- const char *prefix, void *priv)
-{
- ufshcd_dump_regs(hba, offset, len * 4, prefix);
-}
-
static int ufs_qcom_host_clk_get(struct device *dev,
const char *name, struct clk **clk_out, bool optional)
{
@@ -110,7 +107,7 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
{
- int err = 0;
+ int err;
struct device *dev = host->hba->dev;
if (host->is_lane_clks_enabled)
@@ -119,7 +116,7 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
host->rx_l0_sync_clk);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
host->tx_l0_sync_clk);
@@ -137,7 +134,8 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
goto disable_rx_l1;
host->is_lane_clks_enabled = true;
- goto out;
+
+ return 0;
disable_rx_l1:
clk_disable_unprepare(host->rx_l1_sync_clk);
@@ -145,7 +143,7 @@ disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
clk_disable_unprepare(host->rx_l0_sync_clk);
-out:
+
return err;
}
@@ -160,25 +158,25 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
&host->rx_l0_sync_clk, false);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
&host->tx_l0_sync_clk, false);
if (err)
- goto out;
+ return err;
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
&host->rx_l1_sync_clk, false);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
&host->tx_l1_sync_clk, true);
}
-out:
- return err;
+
+ return 0;
}
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
@@ -226,6 +224,10 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
ufshcd_rmwl(host->hba, QUNIPRO_SEL,
ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
REG_UFS_CFG1);
+
+ if (host->hw_ver.major == 0x05)
+ ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
+
/* make sure above configuration is applied before we return */
mb();
}
@@ -241,7 +243,7 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
if (!host->core_reset) {
dev_warn(hba->dev, "%s: reset control not set\n", __func__);
- goto out;
+ return 0;
}
reenable_intr = hba->is_irq_enabled;
@@ -252,7 +254,7 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
if (ret) {
dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
__func__, ret);
- goto out;
+ return ret;
}
/*
@@ -274,16 +276,35 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
hba->is_irq_enabled = true;
}
-out:
- return ret;
+ return 0;
+}
+
+static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (host->hw_ver.major == 0x1) {
+ /*
+ * HS-G3 operations may not reliably work on legacy QCOM
+ * UFS host controller hardware even though capability
+ * exchange during link startup phase may end up
+ * negotiating maximum supported gear as G3.
+ * Hence downgrade the maximum supported gear to HS-G2.
+ */
+ return UFS_HS_G2;
+ } else if (host->hw_ver.major >= 0x4) {
+ return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
+ }
+
+ /* Default is HS-G3 */
+ return UFS_HS_G3;
}
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
- int ret = 0;
- bool is_rate_B = UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B;
+ int ret;
/* Reset UFS Host Controller and PHY */
ret = ufs_qcom_host_reset(hba);
@@ -291,17 +312,16 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
dev_warn(hba->dev, "%s: host reset returned %d\n",
__func__, ret);
- if (is_rate_B)
- phy_set_mode(phy, PHY_MODE_UFS_HS_B);
-
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
- goto out;
+ return ret;
}
+ phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->hs_gear);
+
/* power on phy - start serdes and phy's power and clocks */
ret = phy_power_on(phy);
if (ret) {
@@ -316,7 +336,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
out_disable_phy:
phy_exit(phy);
-out:
+
return ret;
}
@@ -374,7 +394,6 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
u32 hs, u32 rate, bool update_link_startup_timer)
{
- int ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
u32 core_clk_period_in_ns;
@@ -409,11 +428,11 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
* Aggregation logic.
*/
if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
- goto out;
+ return 0;
if (gear == 0) {
dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
- goto out_error;
+ return -EINVAL;
}
list_for_each_entry(clki, &hba->clk_list_head, list) {
@@ -436,7 +455,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
}
if (ufs_qcom_cap_qunipro(host))
- goto out;
+ return 0;
core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
@@ -451,7 +470,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rA));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
} else if (rate == PA_HS_MODE_B) {
@@ -460,13 +479,13 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rB));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
} else {
dev_err(hba->dev, "%s: invalid rate = %d\n",
__func__, rate);
- goto out_error;
+ return -EINVAL;
}
break;
case SLOWAUTO_MODE:
@@ -476,14 +495,14 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(pwm_fr_table));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
break;
case UNCHANGED:
default:
dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
- goto out_error;
+ return -EINVAL;
}
if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
@@ -498,21 +517,17 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
mb();
}
- if (update_link_startup_timer) {
+ if (update_link_startup_timer && host->hw_ver.major != 0x5) {
ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
- REG_UFS_PA_LINK_STARTUP_TIMER);
+ REG_UFS_CFG0);
/*
* make sure that this configuration is applied before
* we return
*/
mb();
}
- goto out;
-out_error:
- ret = -EINVAL;
-out:
- return ret;
+ return 0;
}
static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
@@ -527,8 +542,7 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
0, true)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (ufs_qcom_cap_qunipro(host))
@@ -554,7 +568,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
break;
}
-out:
return err;
}
@@ -691,8 +704,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
if (!dev_req_params) {
pr_err("%s: incoming dev_req_params is NULL\n", __func__);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
switch (status) {
@@ -700,29 +712,21 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
- if (host->hw_ver.major == 0x1) {
- /*
- * HS-G3 operations may not reliably work on legacy QCOM
- * UFS host controller hardware even though capability
- * exchange during link startup phase may end up
- * negotiating maximum supported gear as G3.
- * Hence downgrade the maximum supported gear to HS-G2.
- */
- if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
- if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
- }
+ /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
+ ufs_qcom_cap.hs_tx_gear = ufs_qcom_cap.hs_rx_gear = ufs_qcom_get_hs_gear(hba);
ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
dev_max_params,
dev_req_params);
if (ret) {
- pr_err("%s: failed to determine capabilities\n",
+ dev_err(hba->dev, "%s: failed to determine capabilities\n",
__func__);
- goto out;
+ return ret;
}
+ /* Use the agreed gear */
+ host->hs_gear = dev_req_params->gear_tx;
+
/* enable the device ref clock before changing to HS mode */
if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
ufshcd_is_hs_mode(dev_req_params))
@@ -761,7 +765,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
ret = -EINVAL;
break;
}
-out:
+
return ret;
}
@@ -773,14 +777,11 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
&pa_vs_config_reg1);
if (err)
- goto out;
+ return err;
/* Allow extension of MSB bits of PA_SaveConfigTime attribute */
- err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
(pa_vs_config_reg1 | (1 << 12)));
-
-out:
- return err;
}
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
@@ -839,6 +840,9 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
}
+
+ if (host->hw_ver.major > 0x3)
+ hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
}
static void ufs_qcom_set_caps(struct ufs_hba *hba)
@@ -906,8 +910,6 @@ ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
- /* Currently this code only knows about a single reset. */
- WARN_ON(id);
ufs_qcom_assert_reset(host->hba);
/* provide 1ms delay to let the reset pulse propagate. */
usleep_range(1000, 1100);
@@ -919,8 +921,6 @@ ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
- /* Currently this code only knows about a single reset. */
- WARN_ON(id);
ufs_qcom_deassert_reset(host->hba);
/*
@@ -957,9 +957,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
- err = -ENOMEM;
dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
- goto out;
+ return -ENOMEM;
}
/* Make a two way bind between the qcom host and the hba */
@@ -980,10 +979,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->rcdev.owner = dev->driver->owner;
host->rcdev.nr_resets = 1;
err = devm_reset_controller_register(dev, &host->rcdev);
- if (err) {
+ if (err)
dev_warn(dev, "Failed to register reset controller\n");
- err = 0;
- }
if (!has_acpi_companion(dev)) {
host->generic_phy = devm_phy_get(dev, "ufsphy");
@@ -1046,20 +1043,24 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
ufs_qcom_hosts[hba->dev->id] = host;
- host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
ufs_qcom_get_default_testbus_cfg(host);
err = ufs_qcom_testbus_config(host);
- if (err) {
+ if (err)
+ /* Failure is non-fatal */
dev_warn(dev, "%s: failed to configure the testbus %d\n",
__func__, err);
- err = 0;
- }
- goto out;
+ /*
+ * Power up the PHY using the minimum supported gear (UFS_HS_G2).
+ * Switching to max gear will be performed during reinit if supported.
+ */
+ host->hs_gear = UFS_HS_G2;
+
+ return 0;
out_variant_clear:
ufshcd_set_variant(hba, NULL);
-out:
+
return err;
}
@@ -1085,7 +1086,7 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
if (err)
- goto out;
+ return err;
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
core_clk_ctrl_reg |= clk_cycles;
@@ -1093,11 +1094,9 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
/* Clear CORE_CLK_DIV_EN */
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
- err = ufshcd_dme_set(hba,
+ return ufshcd_dme_set(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
core_clk_ctrl_reg);
-out:
- return err;
}
static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
@@ -1180,7 +1179,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (err || !dev_req_params) {
ufshcd_uic_hibern8_exit(hba);
- goto out;
+ return err;
}
ufs_qcom_cfg_timers(hba,
@@ -1191,81 +1190,14 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
ufshcd_uic_hibern8_exit(hba);
}
-out:
- return err;
-}
-
-static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
- void *priv, void (*print_fn)(struct ufs_hba *hba,
- int offset, int num_regs, const char *str, void *priv))
-{
- u32 reg;
- struct ufs_qcom_host *host;
-
- if (unlikely(!hba)) {
- pr_err("%s: hba is NULL\n", __func__);
- return;
- }
- if (unlikely(!print_fn)) {
- dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
- return;
- }
-
- host = ufshcd_get_variant(hba);
- if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
- return;
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
- print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
-
- reg = ufshcd_readl(hba, REG_UFS_CFG1);
- reg |= UTP_DBG_RAMS_EN;
- ufshcd_writel(hba, reg, REG_UFS_CFG1);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
- print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
- print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
- print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
-
- /* clear bit 17 - UTP_DBG_RAMS_EN */
- ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
- print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
- print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
- print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
- print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
- print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
- print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
- print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
+ return 0;
}
static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
{
- if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
- ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
- UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
- } else {
- ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
- }
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
+ UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
+ ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
}
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
@@ -1374,10 +1306,53 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
+ u32 reg;
+ struct ufs_qcom_host *host;
+
+ host = ufshcd_get_variant(hba);
+
ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
"HCI Vendor Specific Registers ");
- ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
+ ufshcd_dump_regs(hba, reg, 44 * 4, "UFS_UFS_DBG_RD_REG_OCSC ");
+
+ reg = ufshcd_readl(hba, REG_UFS_CFG1);
+ reg |= UTP_DBG_RAMS_EN;
+ ufshcd_writel(hba, reg, REG_UFS_CFG1);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
+ ufshcd_dump_regs(hba, reg, 32 * 4, "UFS_UFS_DBG_RD_EDTL_RAM ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
+ ufshcd_dump_regs(hba, reg, 128 * 4, "UFS_UFS_DBG_RD_DESC_RAM ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
+ ufshcd_dump_regs(hba, reg, 64 * 4, "UFS_UFS_DBG_RD_PRDT_RAM ");
+
+ /* clear bit 17 - UTP_DBG_RAMS_EN */
+ ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
+ ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UAWM ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
+ ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UARM ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
+ ufshcd_dump_regs(hba, reg, 48 * 4, "UFS_DBG_RD_REG_TXUC ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
+ ufshcd_dump_regs(hba, reg, 27 * 4, "UFS_DBG_RD_REG_RXUC ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
+ ufshcd_dump_regs(hba, reg, 19 * 4, "UFS_DBG_RD_REG_DFC ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
+ ufshcd_dump_regs(hba, reg, 34 * 4, "UFS_DBG_RD_REG_TRLUT ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
+ ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
}
/**
@@ -1424,6 +1399,236 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
}
#endif
+static void ufs_qcom_reinit_notify(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ phy_power_off(host->generic_phy);
+}
+
+/* Resources */
+static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
+ {.name = "ufs_mem",},
+ {.name = "mcq",},
+ /* Submission Queue DAO */
+ {.name = "mcq_sqd",},
+ /* Submission Queue Interrupt Status */
+ {.name = "mcq_sqis",},
+ /* Completion Queue DAO */
+ {.name = "mcq_cqd",},
+ /* Completion Queue Interrupt Status */
+ {.name = "mcq_cqis",},
+ /* MCQ vendor specific */
+ {.name = "mcq_vs",},
+};
+
+static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
+{
+ struct platform_device *pdev = to_platform_device(hba->dev);
+ struct ufshcd_res_info *res;
+ struct resource *res_mem, *res_mcq;
+ int i, ret = 0;
+
+ memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
+
+ for (i = 0; i < RES_MAX; i++) {
+ res = &hba->res[i];
+ res->resource = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ res->name);
+ if (!res->resource) {
+ dev_info(hba->dev, "Resource %s not provided\n", res->name);
+ if (i == RES_UFS)
+ return -ENOMEM;
+ continue;
+ } else if (i == RES_UFS) {
+ res_mem = res->resource;
+ res->base = hba->mmio_base;
+ continue;
+ }
+
+ res->base = devm_ioremap_resource(hba->dev, res->resource);
+ if (IS_ERR(res->base)) {
+ dev_err(hba->dev, "Failed to map res %s, err=%d\n",
+ res->name, (int)PTR_ERR(res->base));
+ res->base = NULL;
+ ret = PTR_ERR(res->base);
+ return ret;
+ }
+ }
+
+ /* MCQ resource provided in DT */
+ res = &hba->res[RES_MCQ];
+ /* Bail if MCQ resource is provided */
+ if (res->base)
+ goto out;
+
+ /* Explicitly allocate MCQ resource from ufs_mem */
+ res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
+ if (!res_mcq)
+ return ret;
+
+ res_mcq->start = res_mem->start +
+ MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
+ res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
+ res_mcq->flags = res_mem->flags;
+ res_mcq->name = "mcq";
+
+ ret = insert_resource(&iomem_resource, res_mcq);
+ if (ret) {
+ dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
+ ret);
+ goto insert_res_err;
+ }
+
+ res->base = devm_ioremap_resource(hba->dev, res_mcq);
+ if (IS_ERR(res->base)) {
+ dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
+ (int)PTR_ERR(res->base));
+ ret = PTR_ERR(res->base);
+ goto ioremap_err;
+ }
+
+out:
+ hba->mcq_base = res->base;
+ return 0;
+ioremap_err:
+ res->base = NULL;
+ remove_resource(res_mcq);
+insert_res_err:
+ devm_kfree(hba->dev, res_mcq);
+ return ret;
+}
+
+static int ufs_qcom_op_runtime_config(struct ufs_hba *hba)
+{
+ struct ufshcd_res_info *mem_res, *sqdao_res;
+ struct ufshcd_mcq_opr_info_t *opr;
+ int i;
+
+ mem_res = &hba->res[RES_UFS];
+ sqdao_res = &hba->res[RES_MCQ_SQD];
+
+ if (!mem_res->base || !sqdao_res->base)
+ return -EINVAL;
+
+ for (i = 0; i < OPR_MAX; i++) {
+ opr = &hba->mcq_opr[i];
+ opr->offset = sqdao_res->resource->start -
+ mem_res->resource->start + 0x40 * i;
+ opr->stride = 0x100;
+ opr->base = sqdao_res->base + 0x40 * i;
+ }
+
+ return 0;
+}
+
+static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
+{
+ /* Qualcomm HC supports up to 64 */
+ return MAX_SUPP_MAC;
+}
+
+static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
+ unsigned long *ocqs)
+{
+ struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
+
+ if (!mcq_vs_res->base)
+ return -EINVAL;
+
+ *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
+
+ return 0;
+}
+
+static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufshcd_mcq_config_esi(hba, msg);
+}
+
+static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba)
+{
+ struct ufs_hba *hba = __hba;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ u32 id = irq - host->esi_base;
+ struct ufs_hw_queue *hwq = &hba->uhq[id];
+
+ ufshcd_mcq_write_cqis(hba, 0x1, id);
+ ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+
+ return IRQ_HANDLED;
+}
+
+static int ufs_qcom_config_esi(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct msi_desc *desc;
+ struct msi_desc *failed_desc = NULL;
+ int nr_irqs, ret;
+
+ if (host->esi_enabled)
+ return 0;
+ else if (host->esi_base < 0)
+ return -EINVAL;
+
+ /*
+ * 1. We only handle CQs as of now.
+ * 2. Poll queues do not need ESI.
+ */
+ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
+ ufs_qcom_write_msi_msg);
+ if (ret)
+ goto out;
+
+ msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
+ if (!desc->msi_index)
+ host->esi_base = desc->irq;
+
+ ret = devm_request_irq(hba->dev, desc->irq,
+ ufs_qcom_mcq_esi_handler,
+ IRQF_SHARED, "qcom-mcq-esi", hba);
+ if (ret) {
+ dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
+ __func__, desc->irq, ret);
+ failed_desc = desc;
+ break;
+ }
+ }
+
+ if (ret) {
+ /* Rewind */
+ msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
+ if (desc == failed_desc)
+ break;
+ devm_free_irq(hba->dev, desc->irq, hba);
+ }
+ platform_msi_domain_free_irqs(hba->dev);
+ } else {
+ if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
+ host->hw_ver.step == 0) {
+ ufshcd_writel(hba,
+ ufshcd_readl(hba, REG_UFS_CFG3) | 0x1F000,
+ REG_UFS_CFG3);
+ }
+ ufshcd_mcq_enable_esi(hba);
+ }
+
+out:
+ if (ret) {
+ host->esi_base = -1;
+ dev_warn(hba->dev, "Failed to request Platform MSI %d\n", ret);
+ } else {
+ host->esi_enabled = true;
+ }
+
+ return ret;
+}
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1447,6 +1652,12 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.device_reset = ufs_qcom_device_reset,
.config_scaling_param = ufs_qcom_config_scaling_param,
.program_key = ufs_qcom_ice_program_key,
+ .reinit_notify = ufs_qcom_reinit_notify,
+ .mcq_config_resource = ufs_qcom_mcq_config_resource,
+ .get_hba_mac = ufs_qcom_get_hba_mac,
+ .op_runtime_config = ufs_qcom_op_runtime_config,
+ .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
+ .config_esi = ufs_qcom_config_esi,
};
/**
@@ -1463,9 +1674,9 @@ static int ufs_qcom_probe(struct platform_device *pdev)
/* Perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
if (err)
- dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+ return dev_err_probe(dev, err, "ufshcd_pltfrm_init() failed\n");
- return err;
+ return 0;
}
/**
@@ -1480,6 +1691,7 @@ static int ufs_qcom_remove(struct platform_device *pdev)
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
+ platform_msi_domain_free_irqs(hba->dev);
return 0;
}
@@ -1498,10 +1710,16 @@ MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
#endif
static const struct dev_pm_ops ufs_qcom_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_system_suspend,
+ .resume = ufshcd_system_resume,
+ .freeze = ufshcd_system_freeze,
+ .restore = ufshcd_system_restore,
+ .thaw = ufshcd_system_thaw,
+#endif
};
static struct platform_driver ufs_qcom_pltform = {
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 44466a395bb5..39e774254fb2 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -16,13 +16,11 @@
#define HBRN8_POLL_TOUT_MS 100
#define DEFAULT_CLK_RATE_HZ 1000000
#define BUS_VECTOR_NAME_LEN 32
+#define MAX_SUPP_MAC 64
-#define UFS_HW_VER_MAJOR_SHFT (28)
-#define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT)
-#define UFS_HW_VER_MINOR_SHFT (16)
-#define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT)
-#define UFS_HW_VER_STEP_SHFT (0)
-#define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT)
+#define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28)
+#define UFS_HW_VER_MINOR_MASK GENMASK(27, 16)
+#define UFS_HW_VER_STEP_MASK GENMASK(15, 0)
/* vendor specific pre-defined parameters */
#define SLOW 1
@@ -36,8 +34,10 @@ enum {
REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4,
REG_UFS_LOCAL_PORT_ID_REG = 0xC8,
REG_UFS_PA_ERR_CODE = 0xCC,
- REG_UFS_RETRY_TIMER_REG = 0xD0,
- REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8,
+ /* On older UFS revisions, this register is called "RETRY_TIMER_REG" */
+ REG_UFS_PARAM0 = 0xD0,
+ /* On older UFS revisions, this register is called "REG_UFS_PA_LINK_STARTUP_TIMER" */
+ REG_UFS_CFG0 = 0xD8,
REG_UFS_CFG1 = 0xDC,
REG_UFS_CFG2 = 0xE0,
REG_UFS_HW_VERSION = 0xE4,
@@ -53,6 +53,8 @@ enum {
* added in HW Version 3.0.0
*/
UFS_AH8_CFG = 0xFC,
+
+ REG_UFS_CFG3 = 0x271C,
};
/* QCOM UFS host controller vendor specific debug registers */
@@ -72,28 +74,43 @@ enum {
UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
};
+enum {
+ UFS_MEM_CQIS_VS = 0x8,
+};
+
#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x)
#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
+/* bit definitions for REG_UFS_CFG0 register */
+#define QUNIPRO_G4_SEL BIT(5)
+
/* bit definitions for REG_UFS_CFG1 register */
-#define QUNIPRO_SEL 0x1
-#define UTP_DBG_RAMS_EN 0x20000
+#define QUNIPRO_SEL BIT(0)
+#define UFS_PHY_SOFT_RESET BIT(1)
+#define UTP_DBG_RAMS_EN BIT(17)
#define TEST_BUS_EN BIT(18)
#define TEST_BUS_SEL GENMASK(22, 19)
#define UFS_REG_TEST_BUS_EN BIT(30)
+#define UFS_PHY_RESET_ENABLE 1
+#define UFS_PHY_RESET_DISABLE 0
+
/* bit definitions for REG_UFS_CFG2 register */
-#define UAWM_HW_CGC_EN (1 << 0)
-#define UARM_HW_CGC_EN (1 << 1)
-#define TXUC_HW_CGC_EN (1 << 2)
-#define RXUC_HW_CGC_EN (1 << 3)
-#define DFC_HW_CGC_EN (1 << 4)
-#define TRLUT_HW_CGC_EN (1 << 5)
-#define TMRLUT_HW_CGC_EN (1 << 6)
-#define OCSC_HW_CGC_EN (1 << 7)
+#define UAWM_HW_CGC_EN BIT(0)
+#define UARM_HW_CGC_EN BIT(1)
+#define TXUC_HW_CGC_EN BIT(2)
+#define RXUC_HW_CGC_EN BIT(3)
+#define DFC_HW_CGC_EN BIT(4)
+#define TRLUT_HW_CGC_EN BIT(5)
+#define TMRLUT_HW_CGC_EN BIT(6)
+#define OCSC_HW_CGC_EN BIT(7)
+
+/* bit definitions for REG_UFS_PARAM0 */
+#define MAX_HS_GEAR_MASK GENMASK(6, 4)
+#define UFS_QCOM_MAX_GEAR(x) FIELD_GET(MAX_HS_GEAR_MASK, (x))
/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */
-#define TEST_BUS_SUB_SEL_MASK 0x1F /* All XXX_SEL fields are 5 bits wide */
+#define TEST_BUS_SUB_SEL_MASK GENMASK(4, 0) /* All XXX_SEL fields are 5 bits wide */
#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
@@ -101,26 +118,11 @@ enum {
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
/* bit offset */
-enum {
- OFFSET_UFS_PHY_SOFT_RESET = 1,
- OFFSET_CLK_NS_REG = 10,
-};
+#define OFFSET_CLK_NS_REG 0xa
/* bit masks */
-enum {
- MASK_UFS_PHY_SOFT_RESET = 0x2,
- MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF,
- MASK_CLK_NS_REG = 0xFFFC00,
-};
-
-/* QCOM UFS debug print bit mask */
-#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0)
-#define UFS_QCOM_DBG_PRINT_ICE_REGS_EN BIT(1)
-#define UFS_QCOM_DBG_PRINT_TEST_BUS_EN BIT(2)
-
-#define UFS_QCOM_DBG_PRINT_ALL \
- (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_ICE_REGS_EN | \
- UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+#define MASK_TX_SYMBOL_CLK_1US_REG GENMASK(9, 0)
+#define MASK_CLK_NS_REG GENMASK(23, 10)
/* QUniPro Vendor specific attributes */
#define PA_VS_CONFIG_REG1 0x9000
@@ -135,15 +137,15 @@ ufs_qcom_get_controller_revision(struct ufs_hba *hba,
{
u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION);
- *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT;
- *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT;
- *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT;
+ *major = FIELD_GET(UFS_HW_VER_MAJOR_MASK, ver);
+ *minor = FIELD_GET(UFS_HW_VER_MINOR_MASK, ver);
+ *step = FIELD_GET(UFS_HW_VER_STEP_MASK, ver);
};
static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
{
- ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
- 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_ENABLE),
+ REG_UFS_CFG1);
/*
* Make sure assertion of ufs phy reset is written to
@@ -154,8 +156,8 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
{
- ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
- 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_DISABLE),
+ REG_UFS_CFG1);
/*
* Make sure de-assertion of ufs phy reset is written to
@@ -212,8 +214,6 @@ struct ufs_qcom_host {
u32 dev_ref_clk_en_mask;
- /* Bitmask for enabling debug prints */
- u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
/* Reset control of HCI */
@@ -221,6 +221,11 @@ struct ufs_qcom_host {
struct reset_controller_dev rcdev;
struct gpio_desc *device_reset;
+
+ u32 hs_gear;
+
+ int esi_base;
+ bool esi_enabled;
};
static inline u32
diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c
new file mode 100644
index 000000000000..051f3f40d92c
--- /dev/null
+++ b/drivers/ufs/host/ufs-sprd.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UNISOC UFS Host Controller driver
+ *
+ * Copyright (C) 2022 Unisoc, Inc.
+ * Author: Zhe Wang <zhe.wang1@unisoc.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
+
+#include <ufs/ufshcd.h>
+#include "ufshcd-pltfrm.h"
+#include "ufs-sprd.h"
+
+static const struct of_device_id ufs_sprd_of_match[];
+
+static struct ufs_sprd_priv *ufs_sprd_get_priv_data(struct ufs_hba *hba)
+{
+ struct ufs_sprd_host *host = ufshcd_get_variant(hba);
+
+ WARN_ON(!host->priv);
+ return host->priv;
+}
+
+static void ufs_sprd_regmap_update(struct ufs_sprd_priv *priv, unsigned int index,
+ unsigned int reg, unsigned int bits, unsigned int val)
+{
+ regmap_update_bits(priv->sysci[index].regmap, reg, bits, val);
+}
+
+static void ufs_sprd_regmap_read(struct ufs_sprd_priv *priv, unsigned int index,
+ unsigned int reg, unsigned int *val)
+{
+ regmap_read(priv->sysci[index].regmap, reg, val);
+}
+
+static void ufs_sprd_get_unipro_ver(struct ufs_hba *hba)
+{
+ struct ufs_sprd_host *host = ufshcd_get_variant(hba);
+
+ if (ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &host->unipro_ver))
+ host->unipro_ver = 0;
+}
+
+static void ufs_sprd_ctrl_uic_compl(struct ufs_hba *hba, bool enable)
+{
+ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ if (enable == true)
+ set |= UIC_COMMAND_COMPL;
+ else
+ set &= ~UIC_COMMAND_COMPL;
+ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+}
+
+static int ufs_sprd_get_reset_ctrl(struct device *dev, struct ufs_sprd_rst *rci)
+{
+ rci->rc = devm_reset_control_get(dev, rci->name);
+ if (IS_ERR(rci->rc)) {
+ dev_err(dev, "failed to get reset ctrl:%s\n", rci->name);
+ return PTR_ERR(rci->rc);
+ }
+
+ return 0;
+}
+
+static int ufs_sprd_get_syscon_reg(struct device *dev, struct ufs_sprd_syscon *sysci)
+{
+ sysci->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, sysci->name);
+ if (IS_ERR(sysci->regmap)) {
+ dev_err(dev, "failed to get ufs syscon:%s\n", sysci->name);
+ return PTR_ERR(sysci->regmap);
+ }
+
+ return 0;
+}
+
+static int ufs_sprd_get_vreg(struct device *dev, struct ufs_sprd_vreg *vregi)
+{
+ vregi->vreg = devm_regulator_get(dev, vregi->name);
+ if (IS_ERR(vregi->vreg)) {
+ dev_err(dev, "failed to get vreg:%s\n", vregi->name);
+ return PTR_ERR(vregi->vreg);
+ }
+
+ return 0;
+}
+
+static int ufs_sprd_parse_dt(struct device *dev, struct ufs_hba *hba, struct ufs_sprd_host *host)
+{
+ u32 i;
+ struct ufs_sprd_priv *priv = host->priv;
+ int ret = 0;
+
+ /* Parse UFS reset ctrl info */
+ for (i = 0; i < SPRD_UFS_RST_MAX; i++) {
+ if (!priv->rci[i].name)
+ continue;
+ ret = ufs_sprd_get_reset_ctrl(dev, &priv->rci[i]);
+ if (ret)
+ goto out;
+ }
+
+ /* Parse UFS syscon reg info */
+ for (i = 0; i < SPRD_UFS_SYSCON_MAX; i++) {
+ if (!priv->sysci[i].name)
+ continue;
+ ret = ufs_sprd_get_syscon_reg(dev, &priv->sysci[i]);
+ if (ret)
+ goto out;
+ }
+
+ /* Parse UFS vreg info */
+ for (i = 0; i < SPRD_UFS_VREG_MAX; i++) {
+ if (!priv->vregi[i].name)
+ continue;
+ ret = ufs_sprd_get_vreg(dev, &priv->vregi[i]);
+ if (ret)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ufs_sprd_common_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ struct ufs_sprd_host *host;
+ struct platform_device __maybe_unused *pdev = to_platform_device(dev);
+ const struct of_device_id *of_id;
+ int ret = 0;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ of_id = of_match_node(ufs_sprd_of_match, pdev->dev.of_node);
+ if (of_id->data != NULL)
+ host->priv = container_of(of_id->data, struct ufs_sprd_priv,
+ ufs_hba_sprd_vops);
+
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ hba->caps |= UFSHCD_CAP_CLK_GATING |
+ UFSHCD_CAP_CRYPTO |
+ UFSHCD_CAP_WB_EN;
+ hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
+
+ ret = ufs_sprd_parse_dt(dev, hba, host);
+
+ return ret;
+}
+
+static int sprd_ufs_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_sprd_host *host = ufshcd_get_variant(hba);
+
+ if (status == PRE_CHANGE) {
+ memcpy(dev_req_params, dev_max_params,
+ sizeof(struct ufs_pa_layer_attr));
+ if (host->unipro_ver >= UFS_UNIPRO_VER_1_8)
+ ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx,
+ PA_INITIAL_ADAPT);
+ }
+
+ return 0;
+}
+
+static int ufs_sprd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ enum ufs_notify_change_status status)
+{
+ unsigned long flags;
+
+ if (status == PRE_CHANGE) {
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+ }
+
+ return 0;
+}
+
+static void ufs_sprd_n6_host_reset(struct ufs_hba *hba)
+{
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ dev_info(hba->dev, "ufs host reset!\n");
+
+ reset_control_assert(priv->rci[SPRD_UFSHCI_SOFT_RST].rc);
+ usleep_range(1000, 1100);
+ reset_control_deassert(priv->rci[SPRD_UFSHCI_SOFT_RST].rc);
+}
+
+static int ufs_sprd_n6_device_reset(struct ufs_hba *hba)
+{
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ dev_info(hba->dev, "ufs device reset!\n");
+
+ reset_control_assert(priv->rci[SPRD_UFS_DEV_RST].rc);
+ usleep_range(1000, 1100);
+ reset_control_deassert(priv->rci[SPRD_UFS_DEV_RST].rc);
+
+ return 0;
+}
+
+static void ufs_sprd_n6_key_acc_enable(struct ufs_hba *hba)
+{
+ u32 val;
+ u32 retry = 10;
+ struct arm_smccc_res res;
+
+check_hce:
+ /* Key access only can be enabled under HCE enable */
+ val = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
+ if (!(val & CONTROLLER_ENABLE)) {
+ ufs_sprd_n6_host_reset(hba);
+ val |= CONTROLLER_ENABLE;
+ ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
+ usleep_range(1000, 1100);
+ if (retry) {
+ retry--;
+ goto check_hce;
+ }
+ goto disable_crypto;
+ }
+
+ arm_smccc_smc(SPRD_SIP_SVC_STORAGE_UFS_CRYPTO_ENABLE,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ if (!res.a0)
+ return;
+
+disable_crypto:
+ dev_err(hba->dev, "key reg access enable fail, disable crypto\n");
+ hba->caps &= ~UFSHCD_CAP_CRYPTO;
+}
+
+static int ufs_sprd_n6_init(struct ufs_hba *hba)
+{
+ struct ufs_sprd_priv *priv;
+ int ret = 0;
+
+ ret = ufs_sprd_common_init(hba);
+ if (ret != 0)
+ return ret;
+
+ priv = ufs_sprd_get_priv_data(hba);
+
+ ret = regulator_enable(priv->vregi[SPRD_UFS_VDD_MPHY].vreg);
+ if (ret)
+ return -ENODEV;
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ ufs_sprd_n6_key_acc_enable(hba);
+
+ return 0;
+}
+
+static int ufs_sprd_n6_phy_init(struct ufs_hba *hba)
+{
+ int ret = 0;
+ uint32_t val = 0;
+ uint32_t retry = 10;
+ uint32_t offset;
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBREFCLKCTRL2), 0x90);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCRCTRL), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RXSQCONTROL,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RXSQCONTROL,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(1)), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), 0x01);
+
+ do {
+ /* phy_sram_init_done */
+ ufs_sprd_regmap_read(priv, SPRD_UFS_ANLG, 0xc, &val);
+ if ((val & 0x1) == 0x1) {
+ for (offset = 0x40; offset < 0x42; offset++) {
+ /* Lane afe calibration */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGADDRLSB), 0x1c);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGADDRMSB), offset);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGWRLSB), 0x04);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGWRMSB), 0x00);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGRDWRSEL), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ }
+
+ goto update_phy;
+ }
+ udelay(1000);
+ retry--;
+ } while (retry > 0);
+
+ ret = -ETIMEDOUT;
+ goto out;
+
+update_phy:
+ /* phy_sram_ext_ld_done */
+ ufs_sprd_regmap_update(priv, SPRD_UFS_ANLG, 0xc, 0x2, 0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0x0);
+out:
+ return ret;
+}
+
+
+static int sprd_ufs_n6_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ if (status == PRE_CHANGE) {
+ /* phy_sram_ext_ld_done */
+ ufs_sprd_regmap_update(priv, SPRD_UFS_ANLG, 0xc, 0x2, 0x2);
+ /* phy_sram_bypass */
+ ufs_sprd_regmap_update(priv, SPRD_UFS_ANLG, 0xc, 0x4, 0x4);
+
+ ufs_sprd_n6_host_reset(hba);
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ ufs_sprd_n6_key_acc_enable(hba);
+ }
+
+ if (status == POST_CHANGE) {
+ err = ufs_sprd_n6_phy_init(hba);
+ if (err) {
+ dev_err(hba->dev, "Phy setup failed (%d)\n", err);
+ goto out;
+ }
+
+ ufs_sprd_get_unipro_ver(hba);
+ }
+out:
+ return err;
+}
+
+static void sprd_ufs_n6_h8_notify(struct ufs_hba *hba,
+ enum uic_cmd_dme cmd,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ if (status == PRE_CHANGE) {
+ if (cmd == UIC_CMD_DME_HIBER_ENTER)
+ /*
+ * Disable UIC COMPL INTR to prevent access to UFSHCI after
+ * checking HCS.UPMCRS
+ */
+ ufs_sprd_ctrl_uic_compl(hba, false);
+
+ if (cmd == UIC_CMD_DME_HIBER_EXIT) {
+ ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_UFSDEV_REG,
+ APB_UFSDEV_REFCLK_EN, APB_UFSDEV_REFCLK_EN);
+ ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_USB31PLL_CTRL,
+ APB_USB31PLLV_REF2MPHY, APB_USB31PLLV_REF2MPHY);
+ }
+ }
+
+ if (status == POST_CHANGE) {
+ if (cmd == UIC_CMD_DME_HIBER_EXIT)
+ ufs_sprd_ctrl_uic_compl(hba, true);
+
+ if (cmd == UIC_CMD_DME_HIBER_ENTER) {
+ ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_UFSDEV_REG,
+ APB_UFSDEV_REFCLK_EN, 0);
+ ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_USB31PLL_CTRL,
+ APB_USB31PLLV_REF2MPHY, 0);
+ }
+ }
+}
+
+static struct ufs_sprd_priv n6_ufs = {
+ .rci[SPRD_UFSHCI_SOFT_RST] = { .name = "controller", },
+ .rci[SPRD_UFS_DEV_RST] = { .name = "device", },
+
+ .sysci[SPRD_UFS_ANLG] = { .name = "sprd,ufs-anlg-syscon", },
+ .sysci[SPRD_UFS_AON_APB] = { .name = "sprd,aon-apb-syscon", },
+
+ .vregi[SPRD_UFS_VDD_MPHY] = { .name = "vdd-mphy", },
+
+ .ufs_hba_sprd_vops = {
+ .name = "sprd,ums9620-ufs",
+ .init = ufs_sprd_n6_init,
+ .hce_enable_notify = sprd_ufs_n6_hce_enable_notify,
+ .pwr_change_notify = sprd_ufs_pwr_change_notify,
+ .hibern8_notify = sprd_ufs_n6_h8_notify,
+ .device_reset = ufs_sprd_n6_device_reset,
+ .suspend = ufs_sprd_suspend,
+ },
+};
+
+static const struct of_device_id __maybe_unused ufs_sprd_of_match[] = {
+ { .compatible = "sprd,ums9620-ufs", .data = &n6_ufs.ufs_hba_sprd_vops},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ufs_sprd_of_match);
+
+static int ufs_sprd_probe(struct platform_device *pdev)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(ufs_sprd_of_match, dev->of_node);
+ err = ufshcd_pltfrm_init(pdev, of_id->data);
+ if (err)
+ dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+
+ return err;
+}
+
+static int ufs_sprd_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&(pdev)->dev);
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct dev_pm_ops ufs_sprd_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+};
+
+static struct platform_driver ufs_sprd_pltform = {
+ .probe = ufs_sprd_probe,
+ .remove = ufs_sprd_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "ufshcd-sprd",
+ .pm = &ufs_sprd_pm_ops,
+ .of_match_table = of_match_ptr(ufs_sprd_of_match),
+ },
+};
+module_platform_driver(ufs_sprd_pltform);
+
+MODULE_AUTHOR("Zhe Wang <zhe.wang1@unisoc.com>");
+MODULE_DESCRIPTION("Unisoc UFS Host Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ufs/host/ufs-sprd.h b/drivers/ufs/host/ufs-sprd.h
new file mode 100644
index 000000000000..26ad5c3af4c1
--- /dev/null
+++ b/drivers/ufs/host/ufs-sprd.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UNISOC UFS Host Controller driver
+ *
+ * Copyright (C) 2022 Unisoc, Inc.
+ * Author: Zhe Wang <zhe.wang1@unisoc.com>
+ */
+
+#ifndef _UFS_SPRD_H_
+#define _UFS_SPRD_H_
+
+/* Vendor specific attributes */
+#define RXSQCONTROL 0x8009
+#define CBRATESEL 0x8114
+#define CBCREGADDRLSB 0x8116
+#define CBCREGADDRMSB 0x8117
+#define CBCREGWRLSB 0x8118
+#define CBCREGWRMSB 0x8119
+#define CBCREGRDWRSEL 0x811C
+#define CBCRCTRL 0x811F
+#define CBREFCLKCTRL2 0x8132
+#define VS_MPHYDISABLE 0xD0C1
+
+#define APB_UFSDEV_REG 0xCE8
+#define APB_UFSDEV_REFCLK_EN 0x2
+#define APB_USB31PLL_CTRL 0xCFC
+#define APB_USB31PLLV_REF2MPHY 0x1
+
+#define SPRD_SIP_SVC_STORAGE_UFS_CRYPTO_ENABLE \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_SIP, \
+ 0x0301)
+
+enum SPRD_UFS_RST_INDEX {
+ SPRD_UFSHCI_SOFT_RST,
+ SPRD_UFS_DEV_RST,
+
+ SPRD_UFS_RST_MAX
+};
+
+enum SPRD_UFS_SYSCON_INDEX {
+ SPRD_UFS_ANLG,
+ SPRD_UFS_AON_APB,
+
+ SPRD_UFS_SYSCON_MAX
+};
+
+enum SPRD_UFS_VREG_INDEX {
+ SPRD_UFS_VDD_MPHY,
+
+ SPRD_UFS_VREG_MAX
+};
+
+struct ufs_sprd_rst {
+ const char *name;
+ struct reset_control *rc;
+};
+
+struct ufs_sprd_syscon {
+ const char *name;
+ struct regmap *regmap;
+};
+
+struct ufs_sprd_vreg {
+ const char *name;
+ struct regulator *vreg;
+};
+
+struct ufs_sprd_priv {
+ struct ufs_sprd_rst rci[SPRD_UFS_RST_MAX];
+ struct ufs_sprd_syscon sysci[SPRD_UFS_SYSCON_MAX];
+ struct ufs_sprd_vreg vregi[SPRD_UFS_VREG_MAX];
+ const struct ufs_hba_variant_ops ufs_hba_sprd_vops;
+};
+
+struct ufs_sprd_host {
+ struct ufs_hba *hba;
+ struct ufs_sprd_priv *priv;
+ void __iomem *ufs_dbg_mmio;
+
+ enum ufs_unipro_ver unipro_ver;
+};
+
+#endif /* _UFS_SPRD_H_ */