From c831a4a08636d5462a0f9eb479771e2f65ad0378 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:35 -0700 Subject: scsi: aacraid: Remove __GFP_DMA for raw srb memory The raw srb commands do not requires memory that in the ZONE_DMA memory space. For 32bit srb commands use GFP_DMA32 to limit the memory to 32bit memory range (4GB). Signed-off-by: Raghava Aditya Renukunta Reviewed-by: Dave Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/commctrl.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index d2f8d5954840..106b9332f718 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -668,7 +668,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } - p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA); + p = kmalloc(sg_count[i], GFP_KERNEL); if (!p) { rcode = -ENOMEM; goto cleanup; @@ -732,8 +732,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) rcode = -EINVAL; goto cleanup; } - /* Does this really need to be GFP_DMA? */ - p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA); + + p = kmalloc(sg_count[i], GFP_KERNEL); if(!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", sg_count[i], i, upsg->count)); @@ -788,8 +788,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) rcode = -EINVAL; goto cleanup; } - /* Does this really need to be GFP_DMA? */ - p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA); + + p = kmalloc(sg_count[i], GFP_KERNEL); if(!p) { dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", sg_count[i], i, usg->count)); @@ -845,8 +845,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) rcode = -EINVAL; goto cleanup; } - /* Does this really need to be GFP_DMA? */ - p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA); + p = kmalloc(sg_count[i], GFP_KERNEL|GFP_DMA32); if (!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", sg_count[i], i, usg->count)); @@ -887,7 +886,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) rcode = -EINVAL; goto cleanup; } - p = kmalloc(sg_count[i], GFP_KERNEL); + p = kmalloc(sg_count[i], GFP_KERNEL|GFP_DMA32); if (!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", sg_count[i], i, upsg->count)); -- cgit From 8105d39d0e7600ebbcce5827c11f15bf77c73af5 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:36 -0700 Subject: scsi: aacraid: Fix DMAR issues with iommu=pt The driver changed the DMA consistent map after consistent memory was allocated, this invalidated the IOMMU identity mapping. The fix was to make sure that we set the DMA consistent mask setting once depending on the controller card. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: Dave Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 17 ++++++----------- drivers/scsi/aacraid/commsup.c | 29 ++++++++++++++++++----------- drivers/scsi/aacraid/linit.c | 32 +++++++++++++++++++------------- 3 files changed, 43 insertions(+), 35 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 43d88389e899..707ee2f5954d 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -2071,20 +2071,15 @@ int aac_get_adapter_info(struct aac_dev* dev) expose_physicals = 0; } - if(dev->dac_support != 0) { - if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(64))) { + if (dev->dac_support) { + if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) { if (!dev->in_reset) - printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", - dev->name, dev->id); - } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32)) && - !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32))) { - printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n", - dev->name, dev->id); + dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n"); + } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) { + dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n"); dev->dac_support = 0; } else { - printk(KERN_WARNING"%s%d: No suitable DMA available.\n", - dev->name, dev->id); + dev_info(&dev->pdev->dev, "No suitable DMA available\n"); rcode = -ENOMEM; } } diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 7a1b8a2ce658..45de9b5cc42d 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1513,6 +1513,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) struct scsi_cmnd *command_list; int jafo = 0; int bled; + u64 dmamask; /* * Assumptions: @@ -1580,21 +1581,27 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) aac_free_irq(aac); kfree(aac->fsa_dev); aac->fsa_dev = NULL; + + dmamask = DMA_BIT_MASK(32); quirks = aac_get_driver_ident(index)->quirks; - if (quirks & AAC_QUIRK_31BIT) { - if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) || - ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31))))) - goto out; - } else { - if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) || - ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32))))) - goto out; + if (quirks & AAC_QUIRK_31BIT) + retval = pci_set_dma_mask(aac->pdev, dmamask); + else if (!(quirks & AAC_QUIRK_SRC)) + retval = pci_set_dma_mask(aac->pdev, dmamask); + else + retval = pci_set_consistent_dma_mask(aac->pdev, dmamask); + + if (quirks & AAC_QUIRK_31BIT && !retval) { + dmamask = DMA_BIT_MASK(31); + retval = pci_set_consistent_dma_mask(aac->pdev, dmamask); } + + if (retval) + goto out; + if ((retval = (*(aac_get_driver_ident(index)->init))(aac))) goto out; - if (quirks & AAC_QUIRK_31BIT) - if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) - goto out; + if (jafo) { aac->thread = kthread_run(aac_command_thread, aac, "%s", aac->name); diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 372a07533026..5a201da93250 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1403,6 +1403,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) int error = -ENODEV; int unique_id = 0; u64 dmamask; + int mask_bits = 0; extern int aac_sync_mode; /* @@ -1426,18 +1427,32 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto out; error = -ENODEV; + if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) { + error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (error) { + dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed"); + goto out_disable_pdev; + } + } + /* * If the quirk31 bit is set, the adapter needs adapter * to driver communication memory to be allocated below 2gig */ - if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) + if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) { dmamask = DMA_BIT_MASK(31); - else + mask_bits = 31; + } else { dmamask = DMA_BIT_MASK(32); + mask_bits = 32; + } - if (pci_set_dma_mask(pdev, dmamask) || - pci_set_consistent_dma_mask(pdev, dmamask)) + error = pci_set_consistent_dma_mask(pdev, dmamask); + if (error) { + dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n" + , mask_bits); goto out_disable_pdev; + } pci_set_master(pdev); @@ -1501,15 +1516,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto out_deinit; } - /* - * If we had set a smaller DMA mask earlier, set it to 4gig - * now since the adapter can dma data to at least a 4gig - * address space. - */ - if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) - goto out_deinit; - aac->maximum_num_channels = aac_drivers[index].channels; error = aac_get_adapter_info(aac); if (error < 0) -- cgit From d58129c96b3cde4821763b08b5f1bba17f031138 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:37 -0700 Subject: scsi: aacraid: Added 32 and 64 queue depth for arc natives The qd for ARC Native disks is calculated by dividing the max IO 1024 by the number of disks or 256 which ever is lower. This causes poor disk IO performance. The fix is set the qd based on the type of disk (SAS - 64 and SATA - 32). Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 1 + drivers/scsi/aacraid/linit.c | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index d281492009fb..cb7d69939a34 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -415,6 +415,7 @@ struct aac_ciss_identify_pd { * These macros convert from physical channels to virtual channels */ #define CONTAINER_CHANNEL (0) +#define NATIVE_CHANNEL (1) #define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL) #define CONTAINER_TO_ID(cont) (cont) #define CONTAINER_TO_LUN(cont) (0) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 5a201da93250..5e1a2d67d90c 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -466,6 +466,17 @@ static int aac_slave_configure(struct scsi_device *sdev) ++num_lsu; depth = (host->can_queue - num_one) / num_lsu; + + if (sdev_channel(sdev) != NATIVE_CHANNEL) + goto common_config; + + /* + * Check if SATA drive + */ + if (strncmp(sdev->vendor, "ATA", 3) == 0) + depth = 32; + else + depth = 64; } common_config: -- cgit From 58eaffe54bca77fbf1a1bad7703265950a758cd1 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:38 -0700 Subject: scsi: aacraid: Set correct Queue Depth for HBA1000 RAW disks The default queue depth for non NATIVE RAW disks is calculated from the number of fibs and number of disks or a max of 256. This causes poor disk IO performance. The fix is to set default qd based on the type of disks (SATA -32 and SAS -64) Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/linit.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 5e1a2d67d90c..9ef98e40d81a 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -405,17 +405,23 @@ static int aac_slave_configure(struct scsi_device *sdev) int chn, tid; unsigned int depth = 0; unsigned int set_timeout = 0; + bool set_qd_dev_type = false; + u8 devtype = 0; chn = aac_logical_to_phys(sdev_channel(sdev)); tid = sdev_id(sdev); - if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && - aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW) { - depth = aac->hba_map[chn][tid].qd_limit; + if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) { + devtype = aac->hba_map[chn][tid].devtype; + + if (devtype == AAC_DEVTYPE_NATIVE_RAW) + depth = aac->hba_map[chn][tid].qd_limit; + else if (devtype == AAC_DEVTYPE_ARC_RAW) + set_qd_dev_type = true; + set_timeout = 1; goto common_config; } - if (aac->jbod && (sdev->type == TYPE_DISK)) sdev->removable = 1; @@ -470,16 +476,22 @@ static int aac_slave_configure(struct scsi_device *sdev) if (sdev_channel(sdev) != NATIVE_CHANNEL) goto common_config; - /* - * Check if SATA drive - */ + set_qd_dev_type = true; + + } + +common_config: + + /* + * Check if SATA drive + */ + if (set_qd_dev_type) { if (strncmp(sdev->vendor, "ATA", 3) == 0) depth = 32; else depth = 64; } -common_config: /* * Firmware has an individual device recovery time typically * of 35 seconds, give us a margin. -- cgit From fed820073f647f4ecb4f4ae310d698520a802891 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:39 -0700 Subject: scsi: aacraid: Remove reset support from check_health Check health does not need to reset the ctrl but just return the controller health status. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/commsup.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 45de9b5cc42d..aa4f47bf10b1 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1775,8 +1775,6 @@ int aac_check_health(struct aac_dev * aac) int BlinkLED; unsigned long time_now, flagv = 0; struct list_head * entry; - struct Scsi_Host * host; - int bled; /* Extending the scope of fib_lock slightly to protect aac->in_reset */ if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) @@ -1888,19 +1886,6 @@ int aac_check_health(struct aac_dev * aac) printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); - if (!aac_check_reset || ((aac_check_reset == 1) && - (aac->supplement_adapter_info.supported_options2 & - AAC_OPTION_IGNORE_RESET))) - goto out; - host = aac->scsi_host_ptr; - if (aac->thread->pid != current->pid) - spin_lock_irqsave(host->host_lock, flagv); - bled = aac_check_reset != 1 ? 1 : 0; - _aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET); - if (aac->thread->pid != current->pid) - spin_unlock_irqrestore(host->host_lock, flagv); - return BlinkLED; - out: aac->in_reset = 0; return BlinkLED; -- cgit From 2a4a62c03fd0b5f2e361fbda85e043b2c1ff197d Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:40 -0700 Subject: scsi: aacraid: Change wait time for fib completion Change the completion wait time for the fibs in the reset and abort callback from 2 minutes to 15 seconds. 2 minutes is too long for waiting for completion. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/linit.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 9ef98e40d81a..f6a11af0432b 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -684,8 +684,8 @@ static int aac_eh_abort(struct scsi_cmnd* cmd) (fib_callback) aac_hba_callback, (void *) cmd); - /* Wait up to 2 minutes for completion */ - for (count = 0; count < 120; ++count) { + /* Wait up to 15 secs for completion */ + for (count = 0; count < 15; ++count) { if (cmd->SCp.sent_command) { ret = SUCCESS; break; @@ -840,8 +840,8 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) (fib_callback) aac_hba_callback, (void *) cmd); - /* Wait up to 2 minutes for completion */ - for (count = 0; count < 120; ++count) { + /* Wait up to 15 seconds for completion */ + for (count = 0; count < 15; ++count) { if (cmd->SCp.sent_command) { ret = SUCCESS; break; -- cgit From 895dc759cf3996a56ca64e3e09cbea64e2a7ff62 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:41 -0700 Subject: scsi: aacraid: Log count info of scsi cmds before reset Log the location of the scsi cmds before triggering a reset. This information is useful for debugging. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/linit.c | 90 ++++++++++++++++++++++++++------------------ 1 file changed, 53 insertions(+), 37 deletions(-) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index f6a11af0432b..0a8d303c8bc9 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -624,6 +624,56 @@ static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg) return aac_do_ioctl(dev, cmd, arg); } +static int get_num_of_incomplete_fibs(struct aac_dev *aac) +{ + + unsigned long flags; + struct scsi_device *sdev = NULL; + struct Scsi_Host *shost = aac->scsi_host_ptr; + struct scsi_cmnd *scmnd = NULL; + struct device *ctrl_dev; + + int mlcnt = 0; + int llcnt = 0; + int ehcnt = 0; + int fwcnt = 0; + int krlcnt = 0; + + __shost_for_each_device(sdev, shost) { + spin_lock_irqsave(&sdev->list_lock, flags); + list_for_each_entry(scmnd, &sdev->cmd_list, list) { + switch (scmnd->SCp.phase) { + case AAC_OWNER_FIRMWARE: + fwcnt++; + break; + case AAC_OWNER_ERROR_HANDLER: + ehcnt++; + break; + case AAC_OWNER_LOWLEVEL: + llcnt++; + break; + case AAC_OWNER_MIDLEVEL: + mlcnt++; + break; + default: + krlcnt++; + break; + } + } + spin_unlock_irqrestore(&sdev->list_lock, flags); + } + + ctrl_dev = &aac->pdev->dev; + + dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", mlcnt); + dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", llcnt); + dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", ehcnt); + dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fwcnt); + dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", krlcnt); + + return mlcnt + llcnt + ehcnt + fwcnt; +} + static int aac_eh_abort(struct scsi_cmnd* cmd) { struct scsi_device * dev = cmd->device; @@ -853,8 +903,6 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) pr_err("%s: Host adapter reset request timed out\n", AAC_DRIVERNAME); } else { - struct scsi_cmnd *command; - unsigned long flags; /* Mark the assoc. FIB to not complete, eh handler does this */ for (count = 0; @@ -873,41 +921,9 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME); - count = aac_check_health(aac); - if (count) - return count; - /* - * Wait for all commands to complete to this specific - * target (block maximum 60 seconds). - */ - for (count = 60; count; --count) { - int active = aac->in_reset; - - if (active == 0) - __shost_for_each_device(dev, host) { - spin_lock_irqsave(&dev->list_lock, flags); - list_for_each_entry(command, &dev->cmd_list, - list) { - if ((command != cmd) && - (command->SCp.phase == - AAC_OWNER_FIRMWARE)) { - active++; - break; - } - } - spin_unlock_irqrestore(&dev->list_lock, flags); - if (active) - break; - - } - /* - * We can exit If all the commands are complete - */ - if (active == 0) - return SUCCESS; - ssleep(1); - } - pr_err("%s: SCSI bus appears hung\n", AAC_DRIVERNAME); + count = get_num_of_incomplete_fibs(aac); + if (count == 0) + return SUCCESS; /* * This adapter needs a blind reset, only do so for -- cgit From 144ecd41f0f43600f0c103cb6d0d2f1619d70e96 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:42 -0700 Subject: scsi: aacraid: Print ctrl status before eh reset Log the status of the controller before issuing a reset. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/linit.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 0a8d303c8bc9..3dea4384489d 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -827,6 +827,7 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) int count; u32 bus, cid; int ret = FAILED; + int status = 0; bus = aac_logical_to_phys(scmd_channel(cmd)); cid = scmd_id(cmd); @@ -921,6 +922,14 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME); + /* + * Check the health of the controller + */ + status = aac_adapter_check_health(aac); + if (status) + dev_err(&aac->pdev->dev, "Adapter health - %d\n", + status); + count = get_num_of_incomplete_fibs(aac); if (count == 0) return SUCCESS; -- cgit From 6b24d425881792b16ccf2189b43d57b4aff2a4e6 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:43 -0700 Subject: scsi: aacraid: Using single reset mask for IOP reset The driver can now trigger IOP reset with a single reset mask. Removed code that retrieves a reset_mask from the firmware. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 5 ++++- drivers/scsi/aacraid/src.c | 16 ++-------------- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index cb7d69939a34..c74e1aa18a20 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2378,6 +2378,7 @@ struct revision #define SOFT_RESET_TIME 60 + struct aac_common { /* @@ -2488,7 +2489,9 @@ struct aac_hba_info { #define IOP_RESET_FW_FIB_DUMP 0x00000034 #define IOP_RESET 0x00001000 #define IOP_RESET_ALWAYS 0x00001001 -#define RE_INIT_ADAPTER 0x000000ee +#define RE_INIT_ADAPTER 0x000000ee + +#define IOP_SRC_RESET_MASK 0x00000100 /* * Adapter Status Register diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 7b0410e0f569..f278a21c86db 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -704,22 +704,10 @@ static void aac_send_iop_reset(struct aac_dev *dev, int bled) 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); - if ((bled || var != 0x00000001) && !dev->doorbell_mask) - bled = -EINVAL; - else if (dev->doorbell_mask) { - reset_mask = dev->doorbell_mask; - bled = 0; - var = 0x00000001; - } - aac_set_intx_mode(dev); - if (!bled && (dev->supplement_adapter_info.supported_options2 & - AAC_OPTION_DOORBELL_RESET)) { - src_writel(dev, MUnit.IDR, reset_mask); - } else { - src_writel(dev, MUnit.IDR, 0x100); - } + src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); + msleep(30000); } -- cgit From 80c7d8a5cffa7187c3b3b78eb67705dae91e9a1a Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:44 -0700 Subject: scsi: aacraid: Rework IOP reset Reworked IOP reset to remove unneeded variable and created a helper function to notify fw of an imminent IOP reset. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/src.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index f278a21c86db..39ecb58762a7 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -694,15 +694,17 @@ static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev) 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } -static void aac_send_iop_reset(struct aac_dev *dev, int bled) +static void aac_notify_fw_of_iop_reset(struct aac_dev *dev) { - u32 var, reset_mask; + aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL, + NULL, NULL, NULL, NULL); +} +static void aac_send_iop_reset(struct aac_dev *dev) +{ aac_dump_fw_fib_iop_reset(dev); - bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, - 0, 0, 0, 0, 0, 0, &var, - &reset_mask, NULL, NULL, NULL); + aac_notify_fw_of_iop_reset(dev); aac_set_intx_mode(dev); @@ -742,7 +744,7 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) switch (reset_type) { case IOP_HWSOFT_RESET: - aac_send_iop_reset(dev, bled); + aac_send_iop_reset(dev); /* * Check to see if KERNEL_UP_AND_RUNNING * Wait for the adapter to be up and running. @@ -769,7 +771,7 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) } break; default: - aac_send_iop_reset(dev, bled); + aac_send_iop_reset(dev); break; } -- cgit From 0e9973ed3382652b324971753745cfe08488bb9f Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:45 -0700 Subject: scsi: aacraid: Add periodic checks to see IOP reset status Added function that waits with a timeout for the ctrl to be up and running after triggering an IOP reset. Also removed 30 sec sleep as it is not needed. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 1 + drivers/scsi/aacraid/src.c | 45 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index c74e1aa18a20..bd0afdd7f2e4 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2516,6 +2516,7 @@ struct aac_hba_info { #define SELF_TEST_FAILED 0x00000004 #define MONITOR_PANIC 0x00000020 +#define KERNEL_BOOTING 0x00000040 #define KERNEL_UP_AND_RUNNING 0x00000080 #define KERNEL_PANIC 0x00000100 #define FLASH_UPD_PENDING 0x00002000 diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 39ecb58762a7..cde4160c3e47 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -694,6 +694,37 @@ static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev) 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } +static bool aac_is_ctrl_up_and_running(struct aac_dev *dev) +{ + bool ctrl_up = true; + unsigned long status, start; + bool is_up = false; + + start = jiffies; + do { + schedule(); + status = src_readl(dev, MUnit.OMR); + + if (status == 0xffffffff) + status = 0; + + if (status & KERNEL_BOOTING) { + start = jiffies; + continue; + } + + if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) { + ctrl_up = false; + break; + } + + is_up = status & KERNEL_UP_AND_RUNNING; + + } while (!is_up); + + return ctrl_up; +} + static void aac_notify_fw_of_iop_reset(struct aac_dev *dev) { aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL, @@ -709,8 +740,6 @@ static void aac_send_iop_reset(struct aac_dev *dev) aac_set_intx_mode(dev); src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); - - msleep(30000); } static void aac_send_hardware_soft_reset(struct aac_dev *dev) @@ -726,6 +755,7 @@ static void aac_send_hardware_soft_reset(struct aac_dev *dev) static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) { unsigned long status, start; + bool is_ctrl_up; if (bled < 0) goto invalid_out; @@ -745,6 +775,16 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) switch (reset_type) { case IOP_HWSOFT_RESET: aac_send_iop_reset(dev); + + /* + * Creates a delay or wait till up and running comes thru + */ + is_ctrl_up = aac_is_ctrl_up_and_running(dev); + if (!is_ctrl_up) + dev_err(&dev->pdev->dev, "IOP reset failed\n"); + else + goto set_startup; + /* * Check to see if KERNEL_UP_AND_RUNNING * Wait for the adapter to be up and running. @@ -780,6 +820,7 @@ invalid_out: if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) return -ENODEV; +set_startup: if (startup_timeout < 300) startup_timeout = 300; -- cgit From 77cb6d5ea6033e5d477947aa682728959d6c3f8f Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:46 -0700 Subject: scsi: aacraid: Rework SOFT reset code Now the driver issues a soft reset and waits for the controller to be up and running by periodically checking on the status of the controller health registers. Also prevents ARC adapters from issuing soft reset if IOP resets failed. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/src.c | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index cde4160c3e47..9ad60d63586d 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -754,8 +754,8 @@ static void aac_send_hardware_soft_reset(struct aac_dev *dev) static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) { - unsigned long status, start; bool is_ctrl_up; + int ret = 0; if (bled < 0) goto invalid_out; @@ -785,24 +785,21 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) else goto set_startup; - /* - * Check to see if KERNEL_UP_AND_RUNNING - * Wait for the adapter to be up and running. - * If !KERNEL_UP_AND_RUNNING issue HW Soft Reset - */ - status = src_readl(dev, MUnit.OMR); - if (dev->sa_firmware - && !(status & KERNEL_UP_AND_RUNNING)) { - start = jiffies; - do { - status = src_readl(dev, MUnit.OMR); - if (time_after(jiffies, - start+HZ*SOFT_RESET_TIME)) { - aac_send_hardware_soft_reset(dev); - start = jiffies; - } - } while (!(status & KERNEL_UP_AND_RUNNING)); + if (!dev->sa_firmware) { + ret = -ENODEV; + goto out; } + + aac_send_hardware_soft_reset(dev); + dev->msi_enabled = 0; + + is_ctrl_up = aac_is_ctrl_up_and_running(dev); + if (!is_ctrl_up) { + dev_err(&dev->pdev->dev, "SOFT reset failed\n"); + ret = -ENODEV; + goto out; + } + break; case HW_SOFT_RESET: if (dev->sa_firmware) { @@ -818,13 +815,14 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) invalid_out: if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) - return -ENODEV; + ret = -ENODEV; set_startup: if (startup_timeout < 300) startup_timeout = 300; - return 0; +out: + return ret; } /** -- cgit From 5aa60732520dd0476ed9e20047b837780bbb7799 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:47 -0700 Subject: scsi: aacraid: Rework aac_src_restart Removed switch case and replaced with if mask checks. Moved KERNEL_PANIC check to when bled is less than 0. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/src.c | 34 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 9ad60d63586d..e7b4c0782806 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -772,8 +772,7 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; - switch (reset_type) { - case IOP_HWSOFT_RESET: + if (reset_type & HW_IOP_RESET) { aac_send_iop_reset(dev); /* @@ -784,12 +783,14 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) dev_err(&dev->pdev->dev, "IOP reset failed\n"); else goto set_startup; + } - if (!dev->sa_firmware) { - ret = -ENODEV; - goto out; - } + if (!dev->sa_firmware) { + ret = -ENODEV; + goto out; + } + if (reset_type & HW_SOFT_RESET) { aac_send_hardware_soft_reset(dev); dev->msi_enabled = 0; @@ -799,30 +800,19 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) ret = -ENODEV; goto out; } - - break; - case HW_SOFT_RESET: - if (dev->sa_firmware) { - aac_send_hardware_soft_reset(dev); - aac_set_intx_mode(dev); - } - break; - default: - aac_send_iop_reset(dev); - break; } -invalid_out: - - if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) - ret = -ENODEV; - set_startup: if (startup_timeout < 300) startup_timeout = 300; out: return ret; + +invalid_out: + if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) + ret = -ENODEV; +goto out; } /** -- cgit From 9473ddb2b037161b0bf16b60b37694f961fd6d48 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:48 -0700 Subject: scsi: aacraid: Use correct function to get ctrl health The command thread checks the ctrl health periodically before sending updates to the controller. The function that it uses is aac_check_health which does more than get the health status. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/commsup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index aa4f47bf10b1..c48a245a6d47 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -2475,7 +2475,7 @@ int aac_command_thread(void *data) if ((time_before(next_check_jiffies,next_jiffies)) && ((difference = next_check_jiffies - jiffies) <= 0)) { next_check_jiffies = next_jiffies; - if (aac_check_health(dev) == 0) { + if (aac_adapter_check_health(dev) == 0) { difference = ((long)(unsigned)check_interval) * HZ; next_check_jiffies = jiffies + difference; @@ -2488,7 +2488,7 @@ int aac_command_thread(void *data) int ret; /* Don't even try to talk to adapter if its sick */ - ret = aac_check_health(dev); + ret = aac_adapter_check_health(dev); if (ret || !dev->queues) break; next_check_jiffies = jiffies -- cgit From 8c41b9b7987e404b3c922cf9d8ff941112051837 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:49 -0700 Subject: scsi: aacraid: Make sure ioctl returns on controller reset Made sure that ioctl commands return in case of a controller reset. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: Dave Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/commsup.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index c48a245a6d47..1541ebc1efa2 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -803,11 +803,11 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, if (aac_check_eeh_failure(dev)) return -EFAULT; - /* Only set for first known interruptable command */ - if (down_interruptible(&fibptr->event_wait)) { + fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; + if (down_interruptible(&fibptr->event_wait)) fibptr->done = 2; - up(&fibptr->event_wait); - } + fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT); + spin_lock_irqsave(&fibptr->event_lock, flags); if ((fibptr->done == 0) || (fibptr->done == 2)) { fibptr->done = 2; /* Tell interrupt we aborted */ @@ -1514,6 +1514,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) int jafo = 0; int bled; u64 dmamask; + int num_of_fibs = 0; /* * Assumptions: @@ -1547,10 +1548,20 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) /* * Loop through the fibs, close the synchronous FIBS */ - for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) { + retval = 1; + num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB; + for (index = 0; index < num_of_fibs; index++) { + struct fib *fib = &aac->fibs[index]; - if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) && - (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) { + __le32 XferState = fib->hw_fib_va->header.XferState; + bool is_response_expected = false; + + if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) && + (XferState & cpu_to_le32(ResponseExpected))) + is_response_expected = true; + + if (is_response_expected + || fib->flags & FIB_CONTEXT_FLAG_WAIT) { unsigned long flagv; spin_lock_irqsave(&fib->event_lock, flagv); up(&fib->event_wait); -- cgit From 786e898c86ee532b76ca5e2ec0b8c2d464d553db Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:50 -0700 Subject: scsi: aacraid: Enable ctrl reset for both hba and arc Make sure that IOP and SOFT reset are enabled for both for both arc and hba1000 controllers. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/linit.c | 77 +++++++++++++++++++++++--------------------- 1 file changed, 40 insertions(+), 37 deletions(-) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 3dea4384489d..d933d2f93ed7 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -828,6 +828,11 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) u32 bus, cid; int ret = FAILED; int status = 0; + __le32 supported_options2 = 0; + bool is_mu_reset; + bool is_ignore_reset; + bool is_doorbell_reset; + bus = aac_logical_to_phys(scmd_channel(cmd)); cid = scmd_id(cmd); @@ -900,9 +905,9 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) msleep(1000); } - if (ret != SUCCESS) - pr_err("%s: Host adapter reset request timed out\n", - AAC_DRIVERNAME); + if (ret == SUCCESS) + goto out; + } else { /* Mark the assoc. FIB to not complete, eh handler does this */ @@ -918,44 +923,42 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; } } + } - pr_err("%s: Host adapter reset request. SCSI hang ?\n", - AAC_DRIVERNAME); + pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME); + + /* + * Check the health of the controller + */ + status = aac_adapter_check_health(aac); + if (status) + dev_err(&aac->pdev->dev, "Adapter health - %d\n", status); + + count = get_num_of_incomplete_fibs(aac); + if (count == 0) + return SUCCESS; - /* - * Check the health of the controller - */ - status = aac_adapter_check_health(aac); - if (status) - dev_err(&aac->pdev->dev, "Adapter health - %d\n", - status); - - count = get_num_of_incomplete_fibs(aac); - if (count == 0) - return SUCCESS; - - /* - * This adapter needs a blind reset, only do so for - * Adapters that support a register, instead of a commanded, - * reset. - */ - if (((aac->supplement_adapter_info.supported_options2 & - AAC_OPTION_MU_RESET) || - (aac->supplement_adapter_info.supported_options2 & - AAC_OPTION_DOORBELL_RESET)) && - aac_check_reset && - ((aac_check_reset != 1) || - !(aac->supplement_adapter_info.supported_options2 & - AAC_OPTION_IGNORE_RESET))) { - /* Bypass wait for command quiesce */ - aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET); - } - ret = SUCCESS; - } /* - * Cause an immediate retry of the command with a ten second delay - * after successful tur + * Check if reset is supported by the firmware */ + supported_options2 = aac->supplement_adapter_info.supported_options2; + is_mu_reset = supported_options2 & AAC_OPTION_MU_RESET; + is_doorbell_reset = supported_options2 & AAC_OPTION_DOORBELL_RESET; + is_ignore_reset = supported_options2 & AAC_OPTION_IGNORE_RESET; + /* + * This adapter needs a blind reset, only do so for + * Adapters that support a register, instead of a commanded, + * reset. + */ + if ((is_mu_reset || is_doorbell_reset) + && aac_check_reset + && (aac_check_reset != -1 || !is_ignore_reset)) { + /* Bypass wait for command quiesce */ + aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET); + } + ret = SUCCESS; + +out: return ret; } -- cgit From 4a76be0dc53a2d725ee126a806e5988135952a05 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:51 -0700 Subject: scsi: aacraid: Add reset debugging statements Added info and error messages in controller reset function to log information about the status of the IOP/SOFT reset. Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/src.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index e7b4c0782806..48c2b2b34b72 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -761,8 +761,7 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) goto invalid_out; if (bled) - pr_err("%s%d: adapter kernel panic'd %x.\n", - dev->name, dev->id, bled); + dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled); /* * When there is a BlinkLED, IOP_RESET has not effect @@ -772,7 +771,10 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; + dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type); + if (reset_type & HW_IOP_RESET) { + dev_info(&dev->pdev->dev, "Issuing IOP reset\n"); aac_send_iop_reset(dev); /* @@ -781,16 +783,20 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) is_ctrl_up = aac_is_ctrl_up_and_running(dev); if (!is_ctrl_up) dev_err(&dev->pdev->dev, "IOP reset failed\n"); - else + else { + dev_info(&dev->pdev->dev, "IOP reset succeded\n"); goto set_startup; + } } if (!dev->sa_firmware) { + dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n"); ret = -ENODEV; goto out; } if (reset_type & HW_SOFT_RESET) { + dev_info(&dev->pdev->dev, "Issuing SOFT reset\n"); aac_send_hardware_soft_reset(dev); dev->msi_enabled = 0; @@ -799,7 +805,8 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) dev_err(&dev->pdev->dev, "SOFT reset failed\n"); ret = -ENODEV; goto out; - } + } else + dev_info(&dev->pdev->dev, "SOFT reset succeded\n"); } set_startup: -- cgit From 395e5df79a9588abf1099ea746f11872c9086252 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:52 -0700 Subject: scsi: aacraid: Remove reference to Series-9 Remove reference to Series-9 HBA and created arc ctrl check function. Signed-off-by: Prasad B Munirathnam Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 13 ++++++++++++- drivers/scsi/aacraid/comminit.c | 18 ++++-------------- drivers/scsi/aacraid/commsup.c | 5 +---- drivers/scsi/aacraid/linit.c | 9 ++------- 4 files changed, 19 insertions(+), 26 deletions(-) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index bd0afdd7f2e4..861a35c77c01 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -424,7 +424,6 @@ struct aac_ciss_identify_pd { #define PMC_DEVICE_S6 0x28b #define PMC_DEVICE_S7 0x28c #define PMC_DEVICE_S8 0x28d -#define PMC_DEVICE_S9 0x28f #define aac_phys_to_logical(x) ((x)+1) #define aac_logical_to_phys(x) ((x)?(x)-1:0) @@ -2689,6 +2688,18 @@ int aac_probe_container(struct aac_dev *dev, int cid); int _aac_rx_init(struct aac_dev *dev); int aac_rx_select_comm(struct aac_dev *dev, int comm); int aac_rx_deliver_producer(struct fib * fib); + +static inline int aac_is_src(struct aac_dev *dev) +{ + u16 device = dev->pdev->device; + + if (device == PMC_DEVICE_S6 || + device == PMC_DEVICE_S7 || + device == PMC_DEVICE_S8) + return 1; + return 0; +} + char * get_container_type(unsigned type); extern int numacb; extern char aac_driver_version[]; diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 1151505853cf..9ee025b1d0e0 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -53,11 +53,8 @@ static inline int aac_is_msix_mode(struct aac_dev *dev) { u32 status = 0; - if (dev->pdev->device == PMC_DEVICE_S6 || - dev->pdev->device == PMC_DEVICE_S7 || - dev->pdev->device == PMC_DEVICE_S8) { + if (aac_is_src(dev)) status = src_readl(dev, MUnit.OMR); - } return (status & AAC_INT_MODE_MSIX); } @@ -325,9 +322,7 @@ int aac_send_shutdown(struct aac_dev * dev) /* FIB should be freed only after getting the response from the F/W */ if (status != -ERESTARTSYS) aac_fib_free(fibctx); - if ((dev->pdev->device == PMC_DEVICE_S7 || - dev->pdev->device == PMC_DEVICE_S8 || - dev->pdev->device == PMC_DEVICE_S9) && + if (aac_is_src(dev) && dev->msi_enabled) aac_set_intx_mode(dev); return status; @@ -583,9 +578,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) dev->max_fib_size = status[1] & 0xFFE0; host->sg_tablesize = status[2] >> 16; dev->sg_tablesize = status[2] & 0xFFFF; - if (dev->pdev->device == PMC_DEVICE_S7 || - dev->pdev->device == PMC_DEVICE_S8 || - dev->pdev->device == PMC_DEVICE_S9) { + if (aac_is_src(dev)) { if (host->can_queue > (status[3] >> 16) - AAC_NUM_MGT_FIB) host->can_queue = (status[3] >> 16) - @@ -604,10 +597,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) pr_warn("numacb=%d ignored\n", numacb); } - if (dev->pdev->device == PMC_DEVICE_S6 || - dev->pdev->device == PMC_DEVICE_S7 || - dev->pdev->device == PMC_DEVICE_S8 || - dev->pdev->device == PMC_DEVICE_S9) + if (aac_is_src(dev)) aac_define_int_mode(dev); /* * Ok now init the communication subsystem diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 1541ebc1efa2..1c617ccfaf12 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -2591,10 +2591,7 @@ void aac_free_irq(struct aac_dev *dev) int cpu; cpu = cpumask_first(cpu_online_mask); - if (dev->pdev->device == PMC_DEVICE_S6 || - dev->pdev->device == PMC_DEVICE_S7 || - dev->pdev->device == PMC_DEVICE_S8 || - dev->pdev->device == PMC_DEVICE_S9) { + if (aac_is_src(dev)) { if (dev->max_msix > 1) { for (i = 0; i < dev->max_msix; i++) free_irq(pci_irq_vector(dev->pdev, i), diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index d933d2f93ed7..0f277df73af0 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1416,10 +1416,7 @@ static void __aac_shutdown(struct aac_dev * aac) kthread_stop(aac->thread); } aac_adapter_disable_int(aac); - if (aac->pdev->device == PMC_DEVICE_S6 || - aac->pdev->device == PMC_DEVICE_S7 || - aac->pdev->device == PMC_DEVICE_S8 || - aac->pdev->device == PMC_DEVICE_S9) { + if (aac_is_src(aac)) { if (aac->max_msix > 1) { for (i = 0; i < aac->max_msix; i++) { free_irq(pci_irq_vector(aac->pdev, i), @@ -1684,9 +1681,7 @@ static int aac_acquire_resources(struct aac_dev *dev) aac_adapter_enable_int(dev); - if ((dev->pdev->device == PMC_DEVICE_S7 || - dev->pdev->device == PMC_DEVICE_S8 || - dev->pdev->device == PMC_DEVICE_S9)) + if (aac_is_src(dev)) aac_define_int_mode(dev); if (dev->msi_enabled) -- cgit From 216e80ff78cd2fe1a92c9e4565d578e540e35cc8 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Wed, 10 May 2017 09:39:53 -0700 Subject: scsi: aacraid: Update driver version to 50834 Update the driver version to 50834 Signed-off-by: Raghava Aditya Renukunta Reviewed-by: David Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 861a35c77c01..d31a9bc2ba69 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -97,7 +97,7 @@ enum { #define PMC_GLOBAL_INT_BIT0 0x00000001 #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 50792 +# define AAC_DRIVER_BUILD 50834 # define AAC_DRIVER_BRANCH "-custom" #endif #define MAXIMUM_NUM_CONTAINERS 32 -- cgit From 046b263f7521f63fdf3efd02f74efe7b2cff4b1f Mon Sep 17 00:00:00 2001 From: Don Brace Date: Thu, 4 May 2017 17:50:44 -0500 Subject: scsi: hpsa: update identify physical device structure - align with latest spec. - added __attribute((aligned(512))) Reviewed-by: Scott Teel Reviewed-by: Scott Benesh Reviewed-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa_cmd.h | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index 5961705eef76..078afe448115 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -809,10 +809,7 @@ struct bmic_identify_physical_device { u8 max_temperature_degreesC; u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512*2^exp */ __le16 current_queue_depth_limit; - u8 switch_name[10]; - __le16 switch_port; - u8 alternate_paths_switch_name[40]; - u8 alternate_paths_switch_port[8]; + u8 reserved_switch_stuff[60]; __le16 power_on_hours; /* valid only if gas gauge supported */ __le16 percent_endurance_used; /* valid only if gas gauge supported. */ #define BMIC_PHYS_DRIVE_SSD_WEAROUT(idphydrv) \ @@ -828,11 +825,22 @@ struct bmic_identify_physical_device { (idphydrv->smart_carrier_authentication == 0x01) u8 smart_carrier_app_fw_version; u8 smart_carrier_bootloader_fw_version; + u8 sanitize_support_flags; + u8 drive_key_flags; u8 encryption_key_name[64]; __le32 misc_drive_flags; __le16 dek_index; - u8 padding[112]; -}; + __le16 hba_drive_encryption_flags; + __le16 max_overwrite_time; + __le16 max_block_erase_time; + __le16 max_crypto_erase_time; + u8 device_connector_info[5]; + u8 connector_name[8][8]; + u8 page_83_id[16]; + u8 max_link_rate[256]; + u8 neg_phys_link_rate[256]; + u8 box_conn_name[8]; +} __attribute((aligned(512))); struct bmic_sense_subsystem_info { u8 primary_slot_number; -- cgit From 5ac517b8a2c592a3a6e3928de7b868c52b200444 Mon Sep 17 00:00:00 2001 From: Don Brace Date: Thu, 4 May 2017 17:50:50 -0500 Subject: scsi: hpsa: do not get enclosure info for external devices external shelves do not support BMICs. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 73daace478cb..8e22aed09adb 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3353,6 +3353,11 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h, bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); + if (encl_dev->target == -1 || encl_dev->lun == -1) { + rc = IO_OK; + goto out; + } + if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) { rc = IO_OK; goto out; -- cgit From 8516a2db9aec293d983a729c23488acd688fece4 Mon Sep 17 00:00:00 2001 From: Don Brace Date: Thu, 4 May 2017 17:50:58 -0500 Subject: scsi: hpsa: update reset handler Use the return from TUR as a check for the device state. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 8e22aed09adb..9fb30c4aae82 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3090,7 +3090,7 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, if (unlikely(rc)) atomic_set(&dev->reset_cmds_out, 0); else - wait_for_device_to_become_ready(h, scsi3addr, 0); + rc = wait_for_device_to_become_ready(h, scsi3addr, 0); mutex_unlock(&h->reset_mutex); return rc; -- cgit From ef8a52036268d0f8b5a77f6e34a1d09b0a7fdfdc Mon Sep 17 00:00:00 2001 From: Don Brace Date: Thu, 4 May 2017 17:51:04 -0500 Subject: scsi: hpsa: do not reset enclosures Prevent enclosure resets. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 9fb30c4aae82..2990897b40e0 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5853,6 +5853,9 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) return FAILED; } + if (dev->devtype == TYPE_ENCLOSURE) + return SUCCESS; + /* if controller locked up, we can guarantee command won't complete */ if (lockup_detected(h)) { snprintf(msg, sizeof(msg), -- cgit From 3b476aa24d44ffb68167d1442f8d12319ef4f183 Mon Sep 17 00:00:00 2001 From: Don Brace Date: Thu, 4 May 2017 17:51:10 -0500 Subject: scsi: hpsa: rescan later if reset in progress - schedule another scan. - mark current scan as completed. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 2990897b40e0..53a4f34e73c2 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5620,6 +5620,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh) */ if (h->reset_in_progress) { h->drv_req_rescan = 1; + hpsa_scan_complete(h); return; } -- cgit From d2315ce6e3d09c8dbc9ee93ef6b3f5213e8325ac Mon Sep 17 00:00:00 2001 From: Don Brace Date: Thu, 4 May 2017 17:51:16 -0500 Subject: scsi: hpsa: correct resets on retried commands - call scsi_done when the command completes. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 53a4f34e73c2..a2852daa11c2 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5465,7 +5465,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work) return hpsa_cmd_free_and_done(c->h, c, cmd); } if (c->reset_pending) - return hpsa_cmd_resolve_and_free(c->h, c); + return hpsa_cmd_free_and_done(c->h, c, cmd); if (c->abort_pending) return hpsa_cmd_abort_and_free(c->h, c, cmd); if (c->cmd_type == CMD_IOACCEL2) { -- cgit From c59d04f30d4216af12930a4f5fdc35f490777171 Mon Sep 17 00:00:00 2001 From: Don Brace Date: Thu, 4 May 2017 17:51:22 -0500 Subject: scsi: hpsa: cleanup reset handler - mark device state sooner. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 59 ++++++++++++++++++++++++++++++++++++++++------------- drivers/scsi/hpsa.h | 1 + 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index a2852daa11c2..20b4e83d0529 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1859,10 +1859,13 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, * A reset can cause a device status to change * re-schedule the scan to see what happened. */ + spin_lock_irqsave(&h->reset_lock, flags); if (h->reset_in_progress) { h->drv_req_rescan = 1; + spin_unlock_irqrestore(&h->reset_lock, flags); return; } + spin_unlock_irqrestore(&h->reset_lock, flags); added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); @@ -5618,11 +5621,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh) /* * Do the scan after a reset completion */ + spin_lock_irqsave(&h->reset_lock, flags); if (h->reset_in_progress) { h->drv_req_rescan = 1; + spin_unlock_irqrestore(&h->reset_lock, flags); hpsa_scan_complete(h); return; } + spin_unlock_irqrestore(&h->reset_lock, flags); hpsa_update_scsi_devices(h); @@ -5834,28 +5840,38 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h, */ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) { - int rc; + int rc = SUCCESS; struct ctlr_info *h; struct hpsa_scsi_dev_t *dev; u8 reset_type; char msg[48]; + unsigned long flags; /* find the controller to which the command to be aborted was sent */ h = sdev_to_hba(scsicmd->device); if (h == NULL) /* paranoia */ return FAILED; - if (lockup_detected(h)) - return FAILED; + spin_lock_irqsave(&h->reset_lock, flags); + h->reset_in_progress = 1; + spin_unlock_irqrestore(&h->reset_lock, flags); + + if (lockup_detected(h)) { + rc = FAILED; + goto return_reset_status; + } dev = scsicmd->device->hostdata; if (!dev) { dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); - return FAILED; + rc = FAILED; + goto return_reset_status; } - if (dev->devtype == TYPE_ENCLOSURE) - return SUCCESS; + if (dev->devtype == TYPE_ENCLOSURE) { + rc = SUCCESS; + goto return_reset_status; + } /* if controller locked up, we can guarantee command won't complete */ if (lockup_detected(h)) { @@ -5863,7 +5879,8 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) "cmd %d RESET FAILED, lockup detected", hpsa_get_cmd_index(scsicmd)); hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); - return FAILED; + rc = FAILED; + goto return_reset_status; } /* this reset request might be the result of a lockup; check */ @@ -5872,12 +5889,15 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) "cmd %d RESET FAILED, new lockup detected", hpsa_get_cmd_index(scsicmd)); hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); - return FAILED; + rc = FAILED; + goto return_reset_status; } /* Do not attempt on controller */ - if (is_hba_lunid(dev->scsi3addr)) - return SUCCESS; + if (is_hba_lunid(dev->scsi3addr)) { + rc = SUCCESS; + goto return_reset_status; + } if (is_logical_dev_addr_mode(dev->scsi3addr)) reset_type = HPSA_DEVICE_RESET_MSG; @@ -5888,17 +5908,24 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); - h->reset_in_progress = 1; - /* send a reset to the SCSI LUN which the command was sent to */ rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type, DEFAULT_REPLY_QUEUE); + if (rc == 0) + rc = SUCCESS; + else + rc = FAILED; + sprintf(msg, "reset %s %s", reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ", - rc == 0 ? "completed successfully" : "failed"); + rc == SUCCESS ? "completed successfully" : "failed"); hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + +return_reset_status: + spin_lock_irqsave(&h->reset_lock, flags); h->reset_in_progress = 0; - return rc == 0 ? SUCCESS : FAILED; + spin_unlock_irqrestore(&h->reset_lock, flags); + return rc; } static void swizzle_abort_tag(u8 *tag) @@ -8649,10 +8676,13 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work) /* * Do the scan after the reset */ + spin_lock_irqsave(&h->reset_lock, flags); if (h->reset_in_progress) { h->drv_req_rescan = 1; + spin_unlock_irqrestore(&h->reset_lock, flags); return; } + spin_unlock_irqrestore(&h->reset_lock, flags); if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { scsi_host_get(h->scsi_host); @@ -8759,6 +8789,7 @@ reinit_after_soft_reset: spin_lock_init(&h->lock); spin_lock_init(&h->offline_device_lock); spin_lock_init(&h->scan_lock); + spin_lock_init(&h->reset_lock); atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 6f04f2ad4125..5352664a9f15 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -301,6 +301,7 @@ struct ctlr_info { struct mutex reset_mutex; u8 reset_in_progress; struct hpsa_sas_node *sas_host; + spinlock_t reset_lock; }; struct offline_device_entry { -- cgit From 5086435e662c7b6ada6cb5f48a1215fc6f612153 Mon Sep 17 00:00:00 2001 From: Don Brace Date: Thu, 4 May 2017 17:51:28 -0500 Subject: scsi: hpsa: correct queue depth for externals - queue depth assignment not in correct place, had no effect. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 22 ++++++++++------------ drivers/scsi/hpsa.h | 1 + 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 20b4e83d0529..5b0cc0ebb5e0 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -2069,10 +2069,13 @@ static int hpsa_slave_configure(struct scsi_device *sdev) sd = sdev->hostdata; sdev->no_uld_attach = !sd || !sd->expose_device; - if (sd) - queue_depth = sd->queue_depth != 0 ? - sd->queue_depth : sdev->host->can_queue; - else + if (sd) { + if (sd->external) + queue_depth = EXTERNAL_QD; + else + queue_depth = sd->queue_depth != 0 ? + sd->queue_depth : sdev->host->can_queue; + } else queue_depth = sdev->host->can_queue; scsi_change_queue_depth(sdev, queue_depth); @@ -3915,6 +3918,9 @@ static int hpsa_update_device_info(struct ctlr_info *h, this_device->queue_depth = h->nr_cmds; } + if (this_device->external) + this_device->queue_depth = EXTERNAL_QD; + if (is_OBDR_device) { /* See if this is a One-Button-Disaster-Recovery device * by looking for "$DR-10" at offset 43 in inquiry data. @@ -4123,14 +4129,6 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, int rc; struct ext_report_lun_entry *rle; - /* - * external targets don't support BMIC - */ - if (dev->external) { - dev->queue_depth = 7; - return; - } - rle = &rlep->LUN[rle_index]; dev->ioaccel_handle = rle->ioaccel_handle; diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 5352664a9f15..61dd54aa4d5d 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -57,6 +57,7 @@ struct hpsa_sas_phy { bool added_to_port; }; +#define EXTERNAL_QD 7 struct hpsa_scsi_dev_t { unsigned int devtype; int bus, target, lun; /* as presented to the OS */ -- cgit From 3d38f00c4107cc007056db9f4ab14ecb17ed193f Mon Sep 17 00:00:00 2001 From: Scott Teel Date: Thu, 4 May 2017 17:51:36 -0500 Subject: scsi: hpsa: separate monitor events from rescan worker create new worker thread to monitor controller events - both the rescan and event monitor workers can cause a rescan to occur however for multipath we have found that we need to respond faster than the normal scheduled rescan interval for path fail-overs. - getting controller events only involves reading a register, but the rescan worker can obtain an updated LUN list when there is a PTRAID device present. - move common code to a separate function. advantages: - detect controller events more frequently. - leave rescan thread interval at 30 seconds. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 76 ++++++++++++++++++++++++++++++++++++++++------------- drivers/scsi/hpsa.h | 1 + 2 files changed, 59 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 5b0cc0ebb5e0..2ec907993400 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1110,6 +1110,7 @@ static int is_firmware_flash_cmd(u8 *cdb) */ #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) +#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ) static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, struct CommandList *c) { @@ -8661,15 +8662,10 @@ out: return rc; } -static void hpsa_rescan_ctlr_worker(struct work_struct *work) +static void hpsa_perform_rescan(struct ctlr_info *h) { + struct Scsi_Host *sh = NULL; unsigned long flags; - struct ctlr_info *h = container_of(to_delayed_work(work), - struct ctlr_info, rescan_ctlr_work); - - - if (h->remove_in_progress) - return; /* * Do the scan after the reset @@ -8682,23 +8678,63 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work) } spin_unlock_irqrestore(&h->reset_lock, flags); - if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { - scsi_host_get(h->scsi_host); + sh = scsi_host_get(h->scsi_host); + if (sh != NULL) { + hpsa_scan_start(sh); + scsi_host_put(sh); + h->drv_req_rescan = 0; + } +} + +/* + * watch for controller events + */ +static void hpsa_event_monitor_worker(struct work_struct *work) +{ + struct ctlr_info *h = container_of(to_delayed_work(work), + struct ctlr_info, event_monitor_work); + unsigned long flags; + + spin_lock_irqsave(&h->lock, flags); + if (h->remove_in_progress) { + spin_unlock_irqrestore(&h->lock, flags); + return; + } + spin_unlock_irqrestore(&h->lock, flags); + + if (hpsa_ctlr_needs_rescan(h)) { hpsa_ack_ctlr_events(h); - hpsa_scan_start(h->scsi_host); - scsi_host_put(h->scsi_host); + hpsa_perform_rescan(h); + } + + spin_lock_irqsave(&h->lock, flags); + if (!h->remove_in_progress) + schedule_delayed_work(&h->event_monitor_work, + HPSA_EVENT_MONITOR_INTERVAL); + spin_unlock_irqrestore(&h->lock, flags); +} + +static void hpsa_rescan_ctlr_worker(struct work_struct *work) +{ + unsigned long flags; + struct ctlr_info *h = container_of(to_delayed_work(work), + struct ctlr_info, rescan_ctlr_work); + + spin_lock_irqsave(&h->lock, flags); + if (h->remove_in_progress) { + spin_unlock_irqrestore(&h->lock, flags); + return; + } + spin_unlock_irqrestore(&h->lock, flags); + + if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) { + hpsa_perform_rescan(h); } else if (h->discovery_polling) { hpsa_disable_rld_caching(h); if (hpsa_luns_changed(h)) { - struct Scsi_Host *sh = NULL; - dev_info(&h->pdev->dev, "driver discovery polling rescan.\n"); - sh = scsi_host_get(h->scsi_host); - if (sh != NULL) { - hpsa_scan_start(sh); - scsi_host_put(sh); - } + hpsa_perform_rescan(h); } } spin_lock_irqsave(&h->lock, flags); @@ -8964,6 +9000,9 @@ reinit_after_soft_reset: INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, h->heartbeat_sample_interval); + INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker); + schedule_delayed_work(&h->event_monitor_work, + HPSA_EVENT_MONITOR_INTERVAL); return 0; clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ @@ -9132,6 +9171,7 @@ static void hpsa_remove_one(struct pci_dev *pdev) spin_unlock_irqrestore(&h->lock, flags); cancel_delayed_work_sync(&h->monitor_ctlr_work); cancel_delayed_work_sync(&h->rescan_ctlr_work); + cancel_delayed_work_sync(&h->event_monitor_work); destroy_workqueue(h->rescan_ctlr_wq); destroy_workqueue(h->resubmit_wq); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 61dd54aa4d5d..c9c4927f5d0e 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -245,6 +245,7 @@ struct ctlr_info { u32 __percpu *lockup_detected; struct delayed_work monitor_ctlr_work; struct delayed_work rescan_ctlr_work; + struct delayed_work event_monitor_work; int remove_in_progress; /* Address of h->q[x] is passed to intr handler to know which queue */ u8 q[MAX_REPLY_QUEUES]; -- cgit From b63c64ac5a5ccb6ee56e0d906f2f5d2b7e54cbd9 Mon Sep 17 00:00:00 2001 From: Don Brace Date: Thu, 4 May 2017 17:51:42 -0500 Subject: scsi: hpsa: send ioaccel requests with 0 length down raid path - Block I/O requests with 0 length transfers which go down the ioaccel path. This causes lockup issues down in the basecode. - These issues have been fixed, but there are customers who are experiencing the issues when running older firmware. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 2ec907993400..ea778cc453dc 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -4591,7 +4591,55 @@ sglist_finished: return 0; } -#define IO_ACCEL_INELIGIBLE (1) +#define BUFLEN 128 +static inline void warn_zero_length_transfer(struct ctlr_info *h, + u8 *cdb, int cdb_len, + const char *func) +{ + char buf[BUFLEN]; + int outlen; + int i; + + outlen = scnprintf(buf, BUFLEN, + "%s: Blocking zero-length request: CDB:", func); + for (i = 0; i < cdb_len; i++) + outlen += scnprintf(buf+outlen, BUFLEN - outlen, + "%02hhx", cdb[i]); + dev_warn(&h->pdev->dev, "%s\n", buf); +} + +#define IO_ACCEL_INELIGIBLE 1 +/* zero-length transfers trigger hardware errors. */ +static bool is_zero_length_transfer(u8 *cdb) +{ + u32 block_cnt; + + /* Block zero-length transfer sizes on certain commands. */ + switch (cdb[0]) { + case READ_10: + case WRITE_10: + case VERIFY: /* 0x2F */ + case WRITE_VERIFY: /* 0x2E */ + block_cnt = get_unaligned_be16(&cdb[7]); + break; + case READ_12: + case WRITE_12: + case VERIFY_12: /* 0xAF */ + case WRITE_VERIFY_12: /* 0xAE */ + block_cnt = get_unaligned_be32(&cdb[6]); + break; + case READ_16: + case WRITE_16: + case VERIFY_16: /* 0x8F */ + block_cnt = get_unaligned_be32(&cdb[10]); + break; + default: + return false; + } + + return block_cnt == 0; +} + static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) { int is_write = 0; @@ -4658,6 +4706,12 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); + if (is_zero_length_transfer(cdb)) { + warn_zero_length_transfer(h, cdb, cdb_len, __func__); + atomic_dec(&phys_disk->ioaccel_cmds_out); + return IO_ACCEL_INELIGIBLE; + } + if (fixup_ioaccel_cdb(cdb, &cdb_len)) { atomic_dec(&phys_disk->ioaccel_cmds_out); return IO_ACCEL_INELIGIBLE; @@ -4822,6 +4876,12 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); + if (is_zero_length_transfer(cdb)) { + warn_zero_length_transfer(h, cdb, cdb_len, __func__); + atomic_dec(&phys_disk->ioaccel_cmds_out); + return IO_ACCEL_INELIGIBLE; + } + if (fixup_ioaccel_cdb(cdb, &cdb_len)) { atomic_dec(&phys_disk->ioaccel_cmds_out); return IO_ACCEL_INELIGIBLE; -- cgit From 08ec46f673369f65dfc6fc52fb7fadb39776e81f Mon Sep 17 00:00:00 2001 From: Don Brace Date: Thu, 4 May 2017 17:51:49 -0500 Subject: scsi: hpsa: remove abort handler - simplify the driver - there are a lot of quirky racy conditions not handled - causes more aborts/resets when the number of commands to be aborted is large, such as in multi-path fail-overs. - has been turned off in our internal driver since 8/31/2015 Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 621 +--------------------------------------------------- drivers/scsi/hpsa.h | 1 - 2 files changed, 8 insertions(+), 614 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index ea778cc453dc..9a631e399169 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -258,7 +258,6 @@ static int hpsa_scan_finished(struct Scsi_Host *sh, static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); -static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); static int hpsa_slave_alloc(struct scsi_device *sdev); static int hpsa_slave_configure(struct scsi_device *sdev); static void hpsa_slave_destroy(struct scsi_device *sdev); @@ -326,7 +325,7 @@ static inline bool hpsa_is_cmd_idle(struct CommandList *c) static inline bool hpsa_is_pending_event(struct CommandList *c) { - return c->abort_pending || c->reset_pending; + return c->reset_pending; } /* extract sense key, asc, and ascq from sense data. -1 means invalid. */ @@ -581,12 +580,6 @@ static u32 soft_unresettable_controller[] = { 0x409D0E11, /* Smart Array 6400 EM */ }; -static u32 needs_abort_tags_swizzled[] = { - 0x323D103C, /* Smart Array P700m */ - 0x324a103C, /* Smart Array P712m */ - 0x324b103C, /* SmartArray P711m */ -}; - static int board_id_in_array(u32 a[], int nelems, u32 board_id) { int i; @@ -615,12 +608,6 @@ static int ctlr_is_resettable(u32 board_id) ctlr_is_soft_resettable(board_id); } -static int ctlr_needs_abort_tags_swizzled(u32 board_id) -{ - return board_id_in_array(needs_abort_tags_swizzled, - ARRAY_SIZE(needs_abort_tags_swizzled), board_id); -} - static ssize_t host_show_resettable(struct device *dev, struct device_attribute *attr, char *buf) { @@ -928,8 +915,8 @@ static struct device_attribute *hpsa_shost_attrs[] = { NULL, }; -#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \ - HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS) +#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\ + HPSA_MAX_CONCURRENT_PASSTHRUS) static struct scsi_host_template hpsa_driver_template = { .module = THIS_MODULE, @@ -941,7 +928,6 @@ static struct scsi_host_template hpsa_driver_template = { .change_queue_depth = hpsa_change_queue_depth, .this_id = -1, .use_clustering = ENABLE_CLUSTERING, - .eh_abort_handler = hpsa_eh_abort_handler, .eh_device_reset_handler = hpsa_eh_device_reset_handler, .ioctl = hpsa_ioctl, .slave_alloc = hpsa_slave_alloc, @@ -2361,26 +2347,12 @@ static void hpsa_cmd_resolve_events(struct ctlr_info *h, bool do_wake = false; /* - * Prevent the following race in the abort handler: - * - * 1. LLD is requested to abort a SCSI command - * 2. The SCSI command completes - * 3. The struct CommandList associated with step 2 is made available - * 4. New I/O request to LLD to another LUN re-uses struct CommandList - * 5. Abort handler follows scsi_cmnd->host_scribble and - * finds struct CommandList and tries to aborts it - * Now we have aborted the wrong command. - * - * Reset c->scsi_cmd here so that the abort or reset handler will know + * Reset c->scsi_cmd here so that the reset handler will know * this command has completed. Then, check to see if the handler is * waiting for this command, and, if so, wake it. */ c->scsi_cmd = SCSI_CMD_IDLE; mb(); /* Declare command idle before checking for pending events. */ - if (c->abort_pending) { - do_wake = true; - c->abort_pending = false; - } if (c->reset_pending) { unsigned long flags; struct hpsa_scsi_dev_t *dev; @@ -2423,20 +2395,6 @@ static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); } -static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd) -{ - cmd->result = DID_ABORT << 16; -} - -static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c, - struct scsi_cmnd *cmd) -{ - hpsa_set_scsi_cmd_aborted(cmd); - dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", - c->Request.CDB, c->err_info->ScsiStatus); - hpsa_cmd_resolve_and_free(h, c); -} - static void process_ioaccel2_completion(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd, struct hpsa_scsi_dev_t *dev) @@ -2561,12 +2519,9 @@ static void complete_scsi_command(struct CommandList *cp) return hpsa_cmd_free_and_done(h, cp, cmd); } - if ((unlikely(hpsa_is_pending_event(cp)))) { + if ((unlikely(hpsa_is_pending_event(cp)))) if (cp->reset_pending) return hpsa_cmd_free_and_done(h, cp, cmd); - if (cp->abort_pending) - return hpsa_cmd_abort_and_free(h, cp, cmd); - } if (cp->cmd_type == CMD_IOACCEL2) return process_ioaccel2_completion(h, cp, cmd, dev); @@ -2686,8 +2641,8 @@ static void complete_scsi_command(struct CommandList *cp) cp->Request.CDB); break; case CMD_ABORTED: - /* Return now to avoid calling scsi_done(). */ - return hpsa_cmd_abort_and_free(h, cp, cmd); + cmd->result = DID_ABORT << 16; + break; case CMD_ABORT_FAILED: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", @@ -3793,53 +3748,6 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h, return HPSA_LV_OK; } -/* - * Find out if a logical device supports aborts by simply trying one. - * Smart Array may claim not to support aborts on logical drives, but - * if a MSA2000 * is connected, the drives on that will be presented - * by the Smart Array as logical drives, and aborts may be sent to - * those devices successfully. So the simplest way to find out is - * to simply try an abort and see how the device responds. - */ -static int hpsa_device_supports_aborts(struct ctlr_info *h, - unsigned char *scsi3addr) -{ - struct CommandList *c; - struct ErrorInfo *ei; - int rc = 0; - - u64 tag = (u64) -1; /* bogus tag */ - - /* Assume that physical devices support aborts */ - if (!is_logical_dev_addr_mode(scsi3addr)) - return 1; - - c = cmd_alloc(h); - - (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG); - (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, - DEFAULT_TIMEOUT); - /* no unmap needed here because no data xfer. */ - ei = c->err_info; - switch (ei->CommandStatus) { - case CMD_INVALID: - rc = 0; - break; - case CMD_UNABORTABLE: - case CMD_ABORT_FAILED: - rc = 1; - break; - case CMD_TMF_STATUS: - rc = hpsa_evaluate_tmf_status(h, c); - break; - default: - rc = 0; - break; - } - cmd_free(h, c); - return rc; -} - static int hpsa_update_device_info(struct ctlr_info *h, unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, unsigned char *is_OBDR_device) @@ -3939,31 +3847,6 @@ bail_out: return rc; } -static void hpsa_update_device_supports_aborts(struct ctlr_info *h, - struct hpsa_scsi_dev_t *dev, u8 *scsi3addr) -{ - unsigned long flags; - int rc, entry; - /* - * See if this device supports aborts. If we already know - * the device, we already know if it supports aborts, otherwise - * we have to find out if it supports aborts by trying one. - */ - spin_lock_irqsave(&h->devlock, flags); - rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry); - if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) && - entry >= 0 && entry < h->ndevices) { - dev->supports_aborts = h->dev[entry]->supports_aborts; - spin_unlock_irqrestore(&h->devlock, flags); - } else { - spin_unlock_irqrestore(&h->devlock, flags); - dev->supports_aborts = - hpsa_device_supports_aborts(h, scsi3addr); - if (dev->supports_aborts < 0) - dev->supports_aborts = 0; - } -} - /* * Helper function to assign bus, target, lun mapping of devices. * Logical drive target and lun are assigned at this time, but @@ -4001,35 +3884,6 @@ static void figure_bus_target_lun(struct ctlr_info *h, 0, lunid & 0x3fff); } - -/* - * Get address of physical disk used for an ioaccel2 mode command: - * 1. Extract ioaccel2 handle from the command. - * 2. Find a matching ioaccel2 handle from list of physical disks. - * 3. Return: - * 1 and set scsi3addr to address of matching physical - * 0 if no matching physical disk was found. - */ -static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, - struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) -{ - struct io_accel2_cmd *c2 = - &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; - unsigned long flags; - int i; - - spin_lock_irqsave(&h->devlock, flags); - for (i = 0; i < h->ndevices; i++) - if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) { - memcpy(scsi3addr, h->dev[i]->scsi3addr, - sizeof(h->dev[i]->scsi3addr)); - spin_unlock_irqrestore(&h->devlock, flags); - return 1; - } - spin_unlock_irqrestore(&h->devlock, flags); - return 0; -} - static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position, int i, int nphysicals, int nlocal_logicals) { @@ -4394,7 +4248,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) } figure_bus_target_lun(h, lunaddrbytes, tmpdevice); - hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes); this_device = currentsd[ncurrent]; /* Turn on discovery_polling if there are ext target devices. @@ -5528,8 +5381,6 @@ static void hpsa_command_resubmit_worker(struct work_struct *work) } if (c->reset_pending) return hpsa_cmd_free_and_done(c->h, c, cmd); - if (c->abort_pending) - return hpsa_cmd_abort_and_free(c->h, c, cmd); if (c->cmd_type == CMD_IOACCEL2) { struct ctlr_info *h = c->h; struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; @@ -5987,433 +5838,6 @@ return_reset_status: return rc; } -static void swizzle_abort_tag(u8 *tag) -{ - u8 original_tag[8]; - - memcpy(original_tag, tag, 8); - tag[0] = original_tag[3]; - tag[1] = original_tag[2]; - tag[2] = original_tag[1]; - tag[3] = original_tag[0]; - tag[4] = original_tag[7]; - tag[5] = original_tag[6]; - tag[6] = original_tag[5]; - tag[7] = original_tag[4]; -} - -static void hpsa_get_tag(struct ctlr_info *h, - struct CommandList *c, __le32 *taglower, __le32 *tagupper) -{ - u64 tag; - if (c->cmd_type == CMD_IOACCEL1) { - struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) - &h->ioaccel_cmd_pool[c->cmdindex]; - tag = le64_to_cpu(cm1->tag); - *tagupper = cpu_to_le32(tag >> 32); - *taglower = cpu_to_le32(tag); - return; - } - if (c->cmd_type == CMD_IOACCEL2) { - struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) - &h->ioaccel2_cmd_pool[c->cmdindex]; - /* upper tag not used in ioaccel2 mode */ - memset(tagupper, 0, sizeof(*tagupper)); - *taglower = cm2->Tag; - return; - } - tag = le64_to_cpu(c->Header.tag); - *tagupper = cpu_to_le32(tag >> 32); - *taglower = cpu_to_le32(tag); -} - -static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, - struct CommandList *abort, int reply_queue) -{ - int rc = IO_OK; - struct CommandList *c; - struct ErrorInfo *ei; - __le32 tagupper, taglower; - - c = cmd_alloc(h); - - /* fill_cmd can't fail here, no buffer to map */ - (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag, - 0, 0, scsi3addr, TYPE_MSG); - if (h->needs_abort_tags_swizzled) - swizzle_abort_tag(&c->Request.CDB[4]); - (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); - hpsa_get_tag(h, abort, &taglower, &tagupper); - dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n", - __func__, tagupper, taglower); - /* no unmap needed here because no data xfer. */ - - ei = c->err_info; - switch (ei->CommandStatus) { - case CMD_SUCCESS: - break; - case CMD_TMF_STATUS: - rc = hpsa_evaluate_tmf_status(h, c); - break; - case CMD_UNABORTABLE: /* Very common, don't make noise. */ - rc = -1; - break; - default: - dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", - __func__, tagupper, taglower); - hpsa_scsi_interpret_error(h, c); - rc = -1; - break; - } - cmd_free(h, c); - dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", - __func__, tagupper, taglower); - return rc; -} - -static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h, - struct CommandList *command_to_abort, int reply_queue) -{ - struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; - struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; - struct io_accel2_cmd *c2a = - &h->ioaccel2_cmd_pool[command_to_abort->cmdindex]; - struct scsi_cmnd *scmd = command_to_abort->scsi_cmd; - struct hpsa_scsi_dev_t *dev = scmd->device->hostdata; - - if (!dev) - return; - - /* - * We're overlaying struct hpsa_tmf_struct on top of something which - * was allocated as a struct io_accel2_cmd, so we better be sure it - * actually fits, and doesn't overrun the error info space. - */ - BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) > - sizeof(struct io_accel2_cmd)); - BUG_ON(offsetof(struct io_accel2_cmd, error_data) < - offsetof(struct hpsa_tmf_struct, error_len) + - sizeof(ac->error_len)); - - c->cmd_type = IOACCEL2_TMF; - c->scsi_cmd = SCSI_CMD_BUSY; - - /* Adjust the DMA address to point to the accelerated command buffer */ - c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + - (c->cmdindex * sizeof(struct io_accel2_cmd)); - BUG_ON(c->busaddr & 0x0000007F); - - memset(ac, 0, sizeof(*c2)); /* yes this is correct */ - ac->iu_type = IOACCEL2_IU_TMF_TYPE; - ac->reply_queue = reply_queue; - ac->tmf = IOACCEL2_TMF_ABORT; - ac->it_nexus = cpu_to_le32(dev->ioaccel_handle); - memset(ac->lun_id, 0, sizeof(ac->lun_id)); - ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT); - ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag)); - ac->error_ptr = cpu_to_le64(c->busaddr + - offsetof(struct io_accel2_cmd, error_data)); - ac->error_len = cpu_to_le32(sizeof(c2->error_data)); -} - -/* ioaccel2 path firmware cannot handle abort task requests. - * Change abort requests to physical target reset, and send to the - * address of the physical disk used for the ioaccel 2 command. - * Return 0 on success (IO_OK) - * -1 on failure - */ - -static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, - unsigned char *scsi3addr, struct CommandList *abort, int reply_queue) -{ - int rc = IO_OK; - struct scsi_cmnd *scmd; /* scsi command within request being aborted */ - struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ - unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ - unsigned char *psa = &phys_scsi3addr[0]; - - /* Get a pointer to the hpsa logical device. */ - scmd = abort->scsi_cmd; - dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); - if (dev == NULL) { - dev_warn(&h->pdev->dev, - "Cannot abort: no device pointer for command.\n"); - return -1; /* not abortable */ - } - - if (h->raid_offload_debug > 0) - dev_info(&h->pdev->dev, - "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n", - h->scsi_host->host_no, dev->bus, dev->target, dev->lun, - "Reset as abort", scsi3addr); - - if (!dev->offload_enabled) { - dev_warn(&h->pdev->dev, - "Can't abort: device is not operating in HP SSD Smart Path mode.\n"); - return -1; /* not abortable */ - } - - /* Incoming scsi3addr is logical addr. We need physical disk addr. */ - if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { - dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); - return -1; /* not abortable */ - } - - /* send the reset */ - if (h->raid_offload_debug > 0) - dev_info(&h->pdev->dev, - "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n", - psa); - rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue); - if (rc != 0) { - dev_warn(&h->pdev->dev, - "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n", - psa); - return rc; /* failed to reset */ - } - - /* wait for device to recover */ - if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) { - dev_warn(&h->pdev->dev, - "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n", - psa); - return -1; /* failed to recover */ - } - - /* device recovered */ - dev_info(&h->pdev->dev, - "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n", - psa); - - return rc; /* success */ -} - -static int hpsa_send_abort_ioaccel2(struct ctlr_info *h, - struct CommandList *abort, int reply_queue) -{ - int rc = IO_OK; - struct CommandList *c; - __le32 taglower, tagupper; - struct hpsa_scsi_dev_t *dev; - struct io_accel2_cmd *c2; - - dev = abort->scsi_cmd->device->hostdata; - if (!dev) - return -1; - - if (!dev->offload_enabled && !dev->hba_ioaccel_enabled) - return -1; - - c = cmd_alloc(h); - setup_ioaccel2_abort_cmd(c, h, abort, reply_queue); - c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; - (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); - hpsa_get_tag(h, abort, &taglower, &tagupper); - dev_dbg(&h->pdev->dev, - "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n", - __func__, tagupper, taglower); - /* no unmap needed here because no data xfer. */ - - dev_dbg(&h->pdev->dev, - "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n", - __func__, tagupper, taglower, c2->error_data.serv_response); - switch (c2->error_data.serv_response) { - case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: - case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: - rc = 0; - break; - case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: - case IOACCEL2_SERV_RESPONSE_FAILURE: - case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: - rc = -1; - break; - default: - dev_warn(&h->pdev->dev, - "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n", - __func__, tagupper, taglower, - c2->error_data.serv_response); - rc = -1; - } - cmd_free(h, c); - dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, - tagupper, taglower); - return rc; -} - -static int hpsa_send_abort_both_ways(struct ctlr_info *h, - struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue) -{ - /* - * ioccelerator mode 2 commands should be aborted via the - * accelerated path, since RAID path is unaware of these commands, - * but not all underlying firmware can handle abort TMF. - * Change abort to physical device reset when abort TMF is unsupported. - */ - if (abort->cmd_type == CMD_IOACCEL2) { - if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) || - dev->physical_device) - return hpsa_send_abort_ioaccel2(h, abort, - reply_queue); - else - return hpsa_send_reset_as_abort_ioaccel2(h, - dev->scsi3addr, - abort, reply_queue); - } - return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue); -} - -/* Find out which reply queue a command was meant to return on */ -static int hpsa_extract_reply_queue(struct ctlr_info *h, - struct CommandList *c) -{ - if (c->cmd_type == CMD_IOACCEL2) - return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue; - return c->Header.ReplyQueue; -} - -/* - * Limit concurrency of abort commands to prevent - * over-subscription of commands - */ -static inline int wait_for_available_abort_cmd(struct ctlr_info *h) -{ -#define ABORT_CMD_WAIT_MSECS 5000 - return !wait_event_timeout(h->abort_cmd_wait_queue, - atomic_dec_if_positive(&h->abort_cmds_available) >= 0, - msecs_to_jiffies(ABORT_CMD_WAIT_MSECS)); -} - -/* Send an abort for the specified command. - * If the device and controller support it, - * send a task abort request. - */ -static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) -{ - - int rc; - struct ctlr_info *h; - struct hpsa_scsi_dev_t *dev; - struct CommandList *abort; /* pointer to command to be aborted */ - struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ - char msg[256]; /* For debug messaging. */ - int ml = 0; - __le32 tagupper, taglower; - int refcount, reply_queue; - - if (sc == NULL) - return FAILED; - - if (sc->device == NULL) - return FAILED; - - /* Find the controller of the command to be aborted */ - h = sdev_to_hba(sc->device); - if (h == NULL) - return FAILED; - - /* Find the device of the command to be aborted */ - dev = sc->device->hostdata; - if (!dev) { - dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n", - msg); - return FAILED; - } - - /* If controller locked up, we can guarantee command won't complete */ - if (lockup_detected(h)) { - hpsa_show_dev_msg(KERN_WARNING, h, dev, - "ABORT FAILED, lockup detected"); - return FAILED; - } - - /* This is a good time to check if controller lockup has occurred */ - if (detect_controller_lockup(h)) { - hpsa_show_dev_msg(KERN_WARNING, h, dev, - "ABORT FAILED, new lockup detected"); - return FAILED; - } - - /* Check that controller supports some kind of task abort */ - if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && - !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) - return FAILED; - - memset(msg, 0, sizeof(msg)); - ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p", - h->scsi_host->host_no, sc->device->channel, - sc->device->id, sc->device->lun, - "Aborting command", sc); - - /* Get SCSI command to be aborted */ - abort = (struct CommandList *) sc->host_scribble; - if (abort == NULL) { - /* This can happen if the command already completed. */ - return SUCCESS; - } - refcount = atomic_inc_return(&abort->refcount); - if (refcount == 1) { /* Command is done already. */ - cmd_free(h, abort); - return SUCCESS; - } - - /* Don't bother trying the abort if we know it won't work. */ - if (abort->cmd_type != CMD_IOACCEL2 && - abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) { - cmd_free(h, abort); - return FAILED; - } - - /* - * Check that we're aborting the right command. - * It's possible the CommandList already completed and got re-used. - */ - if (abort->scsi_cmd != sc) { - cmd_free(h, abort); - return SUCCESS; - } - - abort->abort_pending = true; - hpsa_get_tag(h, abort, &taglower, &tagupper); - reply_queue = hpsa_extract_reply_queue(h, abort); - ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); - as = abort->scsi_cmd; - if (as != NULL) - ml += sprintf(msg+ml, - "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ", - as->cmd_len, as->cmnd[0], as->cmnd[1], - as->serial_number); - dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg); - hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command"); - - /* - * Command is in flight, or possibly already completed - * by the firmware (but not to the scsi mid layer) but we can't - * distinguish which. Send the abort down. - */ - if (wait_for_available_abort_cmd(h)) { - dev_warn(&h->pdev->dev, - "%s FAILED, timeout waiting for an abort command to become available.\n", - msg); - cmd_free(h, abort); - return FAILED; - } - rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue); - atomic_inc(&h->abort_cmds_available); - wake_up_all(&h->abort_cmd_wait_queue); - if (rc != 0) { - dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg); - hpsa_show_dev_msg(KERN_WARNING, h, dev, - "FAILED to abort command"); - cmd_free(h, abort); - return FAILED; - } - dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg); - wait_event(h->event_sync_wait_queue, - abort->scsi_cmd != sc || lockup_detected(h)); - cmd_free(h, abort); - return !lockup_detected(h) ? SUCCESS : FAILED; -} - /* * For operations with an associated SCSI command, a command block is allocated * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the @@ -6459,9 +5883,7 @@ static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) { /* * Release our reference to the block. We don't need to do anything - * else to free it, because it is accessed by index. (There's no point - * in checking the result of the decrement, since we cannot guarantee - * that there isn't a concurrent abort which is also accessing it.) + * else to free it, because it is accessed by index. */ (void)atomic_dec(&c->refcount); } @@ -7000,7 +6422,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, int cmd_type) { int pci_dir = XFER_NONE; - u64 tag; /* for commands to be aborted */ c->cmd_type = CMD_IOCTL_PEND; c->scsi_cmd = SCSI_CMD_BUSY; @@ -7184,27 +6605,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c->Request.CDB[6] = 0x00; c->Request.CDB[7] = 0x00; break; - case HPSA_ABORT_MSG: - memcpy(&tag, buff, sizeof(tag)); - dev_dbg(&h->pdev->dev, - "Abort Tag:0x%016llx using rqst Tag:0x%016llx", - tag, c->Header.tag); - c->Request.CDBLen = 16; - c->Request.type_attr_dir = - TYPE_ATTR_DIR(cmd_type, - ATTR_SIMPLE, XFER_WRITE); - c->Request.Timeout = 0; /* Don't time out */ - c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; - c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; - c->Request.CDB[2] = 0x00; /* reserved */ - c->Request.CDB[3] = 0x00; /* reserved */ - /* Tag to abort goes in CDB[4]-CDB[11] */ - memcpy(&c->Request.CDB[4], &tag, sizeof(tag)); - c->Request.CDB[12] = 0x00; /* reserved */ - c->Request.CDB[13] = 0x00; /* reserved */ - c->Request.CDB[14] = 0x00; /* reserved */ - c->Request.CDB[15] = 0x00; /* reserved */ - break; default: dev_warn(&h->pdev->dev, "unknown message type %d\n", cmd); @@ -8162,9 +7562,6 @@ static int hpsa_pci_init(struct ctlr_info *h) h->product_name = products[prod_index].product_name; h->access = *(products[prod_index].access); - h->needs_abort_tags_swizzled = - ctlr_needs_abort_tags_swizzled(h->board_id); - pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); @@ -8885,7 +8282,6 @@ reinit_after_soft_reset: spin_lock_init(&h->scan_lock); spin_lock_init(&h->reset_lock); atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); - atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS); /* Allocate and clear per-cpu variable lockup_detected */ h->lockup_detected = alloc_percpu(u32); @@ -8937,7 +8333,6 @@ reinit_after_soft_reset: if (rc) goto clean5; /* cmd, irq, shost, pci, lu, aer/h */ init_waitqueue_head(&h->scan_wait_queue); - init_waitqueue_head(&h->abort_cmd_wait_queue); init_waitqueue_head(&h->event_sync_wait_queue); mutex_init(&h->reset_mutex); h->scan_finished = 1; /* no scan currently in progress */ diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index c9c4927f5d0e..1c49741bc639 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -298,7 +298,6 @@ struct ctlr_info { struct workqueue_struct *resubmit_wq; struct workqueue_struct *rescan_ctlr_wq; atomic_t abort_cmds_available; - wait_queue_head_t abort_cmd_wait_queue; wait_queue_head_t event_sync_wait_queue; struct mutex reset_mutex; u8 reset_in_progress; -- cgit From 30c0061c9ba4e46185da00986ccbbd8de8f8645a Mon Sep 17 00:00:00 2001 From: Don Brace Date: Thu, 4 May 2017 17:51:56 -0500 Subject: scsi: hpsa: bump driver version Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 9a631e399169..9934947073e6 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -60,7 +60,7 @@ * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' * with an optional trailing '-' followed by a byte value (0-255). */ -#define HPSA_DRIVER_VERSION "3.4.18-0" +#define HPSA_DRIVER_VERSION "3.4.20-0" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" #define HPSA "hpsa" -- cgit From a37ef74517acf0d022ab4c8fa671c82c877eed7b Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:52:22 -0500 Subject: scsi: smartpqi: correct remove scsi devices correct a problem caused by holding a spinlock during device deletion. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 657ad15682a3..dd00c6954500 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -1823,19 +1823,25 @@ static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) { unsigned long flags; struct pqi_scsi_dev *device; - struct pqi_scsi_dev *next; - spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + while (1) { + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = list_first_entry_or_null(&ctrl_info->scsi_device_list, + struct pqi_scsi_dev, scsi_device_list_entry); + if (device) + list_del(&device->scsi_device_list_entry); + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, + flags); + + if (!device) + break; - list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, - scsi_device_list_entry) { if (device->sdev) pqi_remove_device(ctrl_info, device); - list_del(&device->scsi_device_list_entry); pqi_free_device(device); } - - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); } static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) -- cgit From 98bf061b0b423a2b6f3c31b7e4b48d947352331c Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:52:28 -0500 Subject: scsi: smartpqi: cleanup interrupt management minor cleanup of interrupt initialization and tear-down. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 43 +++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index dd00c6954500..b5a35055229b 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -2913,23 +2913,44 @@ static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) return 0; } +static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) +{ + int i; + + for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) + free_irq(pci_irq_vector(ctrl_info->pci_dev, i), + &ctrl_info->queue_groups[i]); + + ctrl_info->num_msix_vectors_initialized = 0; +} + static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) { - int ret; + int num_vectors_enabled; - ret = pci_alloc_irq_vectors(ctrl_info->pci_dev, + num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); - if (ret < 0) { + if (num_vectors_enabled < 0) { dev_err(&ctrl_info->pci_dev->dev, - "MSI-X init failed with error %d\n", ret); - return ret; + "MSI-X init failed with error %d\n", + num_vectors_enabled); + return num_vectors_enabled; } - ctrl_info->num_msix_vectors_enabled = ret; + ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; + return 0; } +static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) +{ + if (ctrl_info->num_msix_vectors_enabled) { + pci_free_irq_vectors(ctrl_info->pci_dev); + ctrl_info->num_msix_vectors_enabled = 0; + } +} + static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) { unsigned int i; @@ -5519,14 +5540,8 @@ static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) { - int i; - - for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) { - free_irq(pci_irq_vector(ctrl_info->pci_dev, i), - &ctrl_info->queue_groups[i]); - } - - pci_free_irq_vectors(ctrl_info->pci_dev); + pqi_free_irqs(ctrl_info); + pqi_disable_msix_interrupts(ctrl_info); } static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) -- cgit From a81ed5f338a843d8bfd199928142b196d71ae62c Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:52:34 -0500 Subject: scsi: smartpqi: set pci completion timeout add support for setting PCIe completion timeout. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index b5a35055229b..735cef55d3a2 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -5437,6 +5437,13 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) return 0; } +static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, + u16 timeout) +{ + return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); +} + static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) { int rc; @@ -5480,6 +5487,17 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) ctrl_info->registers = ctrl_info->iomem_base; ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; +#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 + + /* Increase the PCIe completion timeout. */ + rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, + PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to set PCIe completion timeout\n"); + goto release_regions; + } + /* Enable bus mastering. */ pci_set_master(ctrl_info->pci_dev); -- cgit From 5b0fba0f408777113eff93bd18ab0b9f80760fb7 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:52:40 -0500 Subject: scsi: smartpqi: add in controller checkpoint for controller lockups. tell smartpqi controller to generate a checkpoint for rare lockup conditions. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 2 +- drivers/scsi/smartpqi/smartpqi_init.c | 1 + drivers/scsi/smartpqi/smartpqi_sis.c | 7 +++++++ drivers/scsi/smartpqi/smartpqi_sis.h | 1 + 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index b673825f46b5..73754caa0161 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -933,10 +933,10 @@ struct pqi_ctrl_info { struct Scsi_Host *scsi_host; struct mutex scan_mutex; + bool controller_online : 1; u8 inbound_spanning_supported : 1; u8 outbound_spanning_supported : 1; u8 pqi_mode_enabled : 1; - u8 controller_online : 1; u8 heartbeat_timer_started : 1; struct list_head scsi_device_list; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 735cef55d3a2..14d74e157eb5 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -2697,6 +2697,7 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) ctrl_info->controller_online = false; dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); + sis_shutdown_ctrl(ctrl_info); for (i = 0; i < ctrl_info->num_queue_groups; i++) { queue_group = &ctrl_info->queue_groups[i]; diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index 71408f9e8f75..c7d9ea10b02a 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -34,6 +34,7 @@ #define SIS_REENABLE_SIS_MODE 0x1 #define SIS_ENABLE_MSIX 0x40 #define SIS_SOFT_RESET 0x100 +#define SIS_TRIGGER_SHUTDOWN 0x800000 #define SIS_CMD_READY 0x200 #define SIS_CMD_COMPLETE 0x1000 #define SIS_CLEAR_CTRL_TO_HOST_DOORBELL 0x1000 @@ -342,6 +343,12 @@ void sis_soft_reset(struct pqi_ctrl_info *ctrl_info) &ctrl_info->registers->sis_host_to_ctrl_doorbell); } +void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info) +{ + writel(SIS_TRIGGER_SHUTDOWN, + &ctrl_info->registers->sis_host_to_ctrl_doorbell); +} + #define SIS_MODE_READY_TIMEOUT_SECS 30 int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info) diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h index bd6e7b08338e..7f7b68ca0bd2 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.h +++ b/drivers/scsi/smartpqi/smartpqi_sis.h @@ -27,6 +27,7 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info); void sis_enable_msix(struct pqi_ctrl_info *ctrl_info); void sis_disable_msix(struct pqi_ctrl_info *ctrl_info); void sis_soft_reset(struct pqi_ctrl_info *ctrl_info); +void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info); int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info); void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value); u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info); -- cgit From 162d7753fce9a00719c09dfebd9fee3855e27fbe Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:52:46 -0500 Subject: scsi: smartpqi: ensure controller is in SIS mode at init put in SIS mode during initialization. support kexec/kdump Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 2 +- drivers/scsi/smartpqi/smartpqi_init.c | 60 ++++++++++++++++++++--------------- drivers/scsi/smartpqi/smartpqi_sis.c | 6 ++++ drivers/scsi/smartpqi/smartpqi_sis.h | 1 + 4 files changed, 42 insertions(+), 27 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 73754caa0161..62045c1990c7 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -964,7 +964,7 @@ struct pqi_ctrl_info { }; enum pqi_ctrl_mode { - UNKNOWN, + SIS_MODE = 0, PQI_MODE }; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 14d74e157eb5..5a5b9abe501f 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -5245,38 +5245,50 @@ out: return rc; } -static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info) +/* Switches the controller from PQI mode back into SIS mode. */ + +static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + + sis_disable_msix(ctrl_info); + rc = pqi_reset(ctrl_info); + if (rc) + return rc; + sis_reenable_sis_mode(ctrl_info); + pqi_save_ctrl_mode(ctrl_info, SIS_MODE); + + return 0; +} + +/* + * If the controller isn't already in SIS mode, this function forces it into + * SIS mode. + */ + +static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) { if (!sis_is_firmware_running(ctrl_info)) return -ENXIO; - if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) { - sis_disable_msix(ctrl_info); - if (pqi_reset(ctrl_info) == 0) - sis_reenable_sis_mode(ctrl_info); + if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) + return 0; + + if (sis_is_kernel_up(ctrl_info)) { + pqi_save_ctrl_mode(ctrl_info, SIS_MODE); + return 0; } - return 0; + return pqi_revert_to_sis_mode(ctrl_info); } static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) { int rc; - if (reset_devices) { - rc = pqi_kdump_init(ctrl_info); - if (rc) - return rc; - } - - /* - * When the controller comes out of reset, it is always running - * in legacy SIS mode. This is so that it can be compatible - * with legacy drivers shipped with OSes. So we have to talk - * to it using SIS commands at first. Once we are satisified - * that the controller supports PQI, we transition it into PQI - * mode. - */ + rc = pqi_force_sis_mode(ctrl_info); + if (rc) + return rc; /* * Wait until the controller is ready to start accepting SIS @@ -5594,12 +5606,8 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) cancel_delayed_work_sync(&ctrl_info->update_time_work); pqi_remove_all_scsi_devices(ctrl_info); pqi_unregister_scsi(ctrl_info); - - if (ctrl_info->pqi_mode_enabled) { - sis_disable_msix(ctrl_info); - if (pqi_reset(ctrl_info) == 0) - sis_reenable_sis_mode(ctrl_info); - } + if (ctrl_info->pqi_mode_enabled) + pqi_revert_to_sis_mode(ctrl_info); pqi_free_ctrl_resources(ctrl_info); } diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index c7d9ea10b02a..c5325a4d0f0f 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -127,6 +127,12 @@ bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info) return running; } +bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info) +{ + return readl(&ctrl_info->registers->sis_firmware_status) & + SIS_CTRL_KERNEL_UP; +} + /* used for passing command parameters/results when issuing SIS commands */ struct sis_sync_cmd_params { u32 mailbox[6]; /* mailboxes 0-5 */ diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h index 7f7b68ca0bd2..157768d939a9 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.h +++ b/drivers/scsi/smartpqi/smartpqi_sis.h @@ -21,6 +21,7 @@ int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info); bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info); +bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info); int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info); int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info); int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info); -- cgit From 6a50d6ada03d8d9102a632d0e2db70cd9b6620f5 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:52:52 -0500 Subject: scsi: smartpqi: add supporting events Only register for controller events that driver supports cleanup event handling. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 11 +--- drivers/scsi/smartpqi/smartpqi_init.c | 110 ++++++++++++++++++---------------- 2 files changed, 61 insertions(+), 60 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 62045c1990c7..02b31965bf6e 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -860,15 +860,8 @@ struct pqi_io_request { struct list_head request_list_entry; }; -/* for indexing into the pending_events[] field of struct pqi_ctrl_info */ #define PQI_EVENT_HEARTBEAT 0 -#define PQI_EVENT_HOTPLUG 1 -#define PQI_EVENT_HARDWARE 2 -#define PQI_EVENT_PHYSICAL_DEVICE 3 -#define PQI_EVENT_LOGICAL_DEVICE 4 -#define PQI_EVENT_AIO_STATE_CHANGE 5 -#define PQI_EVENT_AIO_CONFIG_CHANGE 6 -#define PQI_NUM_SUPPORTED_EVENTS 7 +#define PQI_NUM_SUPPORTED_EVENTS 6 struct pqi_event { bool pending; @@ -951,7 +944,7 @@ struct pqi_ctrl_info { struct pqi_io_request *io_request_pool; u16 next_io_request_slot; - struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS]; + struct pqi_event events[PQI_NUM_SUPPORTED_EVENTS]; struct work_struct event_work; atomic_t num_interrupts; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 5a5b9abe501f..279dd41796c0 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -81,6 +81,15 @@ static struct scsi_transport_template *pqi_sas_transport_template; static atomic_t pqi_controller_count = ATOMIC_INIT(0); +static unsigned int pqi_supported_event_types[] = { + PQI_EVENT_TYPE_HOTPLUG, + PQI_EVENT_TYPE_HARDWARE, + PQI_EVENT_TYPE_PHYSICAL_DEVICE, + PQI_EVENT_TYPE_LOGICAL_DEVICE, + PQI_EVENT_TYPE_AIO_STATE_CHANGE, + PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, +}; + static int pqi_disable_device_id_wildcards; module_param_named(disable_device_id_wildcards, pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR); @@ -2665,20 +2674,20 @@ static void pqi_event_worker(struct work_struct *work) { unsigned int i; struct pqi_ctrl_info *ctrl_info; - struct pqi_event *pending_event; + struct pqi_event *event; bool got_non_heartbeat_event = false; ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); - pending_event = ctrl_info->pending_events; + event = ctrl_info->events; for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { - if (pending_event->pending) { - pending_event->pending = false; - pqi_acknowledge_event(ctrl_info, pending_event); - if (i != PQI_EVENT_HEARTBEAT) + if (event->pending) { + event->pending = false; + pqi_acknowledge_event(ctrl_info, event); + if (i != PQI_EVENT_TYPE_HEARTBEAT) got_non_heartbeat_event = true; } - pending_event++; + event++; } if (got_non_heartbeat_event) @@ -2742,7 +2751,7 @@ static void pqi_heartbeat_timer_handler(unsigned long data) pqi_take_ctrl_offline(ctrl_info); return; } - ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true; + ctrl_info->events[PQI_EVENT_HEARTBEAT].pending = true; schedule_work(&ctrl_info->event_work); } else { ctrl_info->num_heartbeats_requested = 0; @@ -2773,38 +2782,20 @@ static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) del_timer_sync(&ctrl_info->heartbeat_timer); } -static int pqi_event_type_to_event_index(unsigned int event_type) +static inline int pqi_event_type_to_event_index(unsigned int event_type) { int index; - switch (event_type) { - case PQI_EVENT_TYPE_HEARTBEAT: - index = PQI_EVENT_HEARTBEAT; - break; - case PQI_EVENT_TYPE_HOTPLUG: - index = PQI_EVENT_HOTPLUG; - break; - case PQI_EVENT_TYPE_HARDWARE: - index = PQI_EVENT_HARDWARE; - break; - case PQI_EVENT_TYPE_PHYSICAL_DEVICE: - index = PQI_EVENT_PHYSICAL_DEVICE; - break; - case PQI_EVENT_TYPE_LOGICAL_DEVICE: - index = PQI_EVENT_LOGICAL_DEVICE; - break; - case PQI_EVENT_TYPE_AIO_STATE_CHANGE: - index = PQI_EVENT_AIO_STATE_CHANGE; - break; - case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE: - index = PQI_EVENT_AIO_CONFIG_CHANGE; - break; - default: - index = -1; - break; - } + for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) + if (event_type == pqi_supported_event_types[index]) + return index; - return index; + return -1; +} + +static inline bool pqi_is_supported_event(unsigned int event_type) +{ + return pqi_event_type_to_event_index(event_type) != -1; } static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) @@ -2814,7 +2805,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) pqi_index_t oq_ci; struct pqi_event_queue *event_queue; struct pqi_event_response *response; - struct pqi_event *pending_event; + struct pqi_event *event; bool need_delayed_work; int event_index; @@ -2837,15 +2828,14 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) if (event_index >= 0) { if (response->request_acknowlege) { - pending_event = - &ctrl_info->pending_events[event_index]; - pending_event->event_type = - response->event_type; - pending_event->event_id = response->event_id; - pending_event->additional_event_id = + event = &ctrl_info->events[event_index]; + event->pending = true; + event->event_type = response->event_type; + event->event_id = response->event_id; + event->additional_event_id = response->additional_event_id; - if (event_index != PQI_EVENT_HEARTBEAT) { - pending_event->pending = true; + if (event_index != PQI_EVENT_TYPE_HEARTBEAT) { + event->pending = true; need_delayed_work = true; } } @@ -3899,11 +3889,13 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) (offsetof(struct pqi_event_config, descriptors) + \ (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) -static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info) +static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, + bool enable_events) { int rc; unsigned int i; struct pqi_event_config *event_config; + struct pqi_event_descriptor *event_descriptor; struct pqi_general_management_request request; event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, @@ -3937,9 +3929,15 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info) if (rc) goto out; - for (i = 0; i < event_config->num_event_descriptors; i++) - put_unaligned_le16(ctrl_info->event_queue.oq_id, - &event_config->descriptors[i].oq_id); + for (i = 0; i < event_config->num_event_descriptors; i++) { + event_descriptor = &event_config->descriptors[i]; + if (enable_events && + pqi_is_supported_event(event_descriptor->event_type)) + put_unaligned_le16(ctrl_info->event_queue.oq_id, + &event_descriptor->oq_id); + else + put_unaligned_le16(0, &event_descriptor->oq_id); + } memset(&request, 0, sizeof(request)); @@ -3970,6 +3968,16 @@ out: return rc; } +static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) +{ + return pqi_configure_events(ctrl_info, true); +} + +static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) +{ + return pqi_configure_events(ctrl_info, false); +} + static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) { unsigned int i; @@ -5413,10 +5421,10 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) sis_enable_msix(ctrl_info); - rc = pqi_configure_events(ctrl_info); + rc = pqi_enable_events(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, - "error configuring events\n"); + "error enabling events\n"); return rc; } -- cgit From 7561a7e4412e515100ac195303531fc2621ac2db Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:52:58 -0500 Subject: scsi: smartpqi: enhance resets - Block all I/O targeted at LUN reset device. - Wait until all I/O targeted at LUN reset device has been consumed by the controller. - Issue LUN reset request. - Wait until all outstanding I/Os and LUN reset completion have been received by the host. - Return to OS results of LUN reset request. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 13 +- drivers/scsi/smartpqi/smartpqi_init.c | 279 +++++++++++++++++++++++++++++++--- 2 files changed, 265 insertions(+), 27 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 02b31965bf6e..5b0c6fb3ceb1 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -734,6 +734,8 @@ struct pqi_scsi_dev { u8 new_device : 1; u8 keep_device : 1; u8 volume_offline : 1; + bool in_reset; + bool device_offline; u8 vendor[8]; /* bytes 8-15 of inquiry data */ u8 model[16]; /* bytes 16-31 of inquiry data */ u64 sas_address; @@ -761,6 +763,8 @@ struct pqi_scsi_dev { struct list_head new_device_list_entry; struct list_head add_list_entry; struct list_head delete_list_entry; + + atomic_t scsi_cmds_outstanding; }; /* VPD inquiry pages */ @@ -926,7 +930,9 @@ struct pqi_ctrl_info { struct Scsi_Host *scsi_host; struct mutex scan_mutex; - bool controller_online : 1; + struct mutex lun_reset_mutex; + bool controller_online; + bool block_requests; u8 inbound_spanning_supported : 1; u8 outbound_spanning_supported : 1; u8 pqi_mode_enabled : 1; @@ -953,7 +959,9 @@ struct pqi_ctrl_info { struct timer_list heartbeat_timer; struct semaphore sync_request_sem; - struct semaphore lun_reset_sem; + atomic_t num_busy_threads; + atomic_t num_blocked_threads; + wait_queue_head_t block_requests_wait; }; enum pqi_ctrl_mode { @@ -1092,6 +1100,7 @@ int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node, void pqi_remove_sas_device(struct pqi_scsi_dev *device); struct pqi_scsi_dev *pqi_find_device_by_sas_rphy( struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy); +void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd); extern struct sas_function_template pqi_sas_transport_functions; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 279dd41796c0..bd8a66d9acc0 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -126,6 +126,7 @@ static char *pqi_raid_level_to_string(u8 raid_level) static inline void pqi_scsi_done(struct scsi_cmnd *scmd) { + pqi_prep_for_scsi_done(scmd); scmd->scsi_done(scmd); } @@ -175,7 +176,85 @@ static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, sis_write_driver_scratch(ctrl_info, mode); } -#define PQI_RESCAN_WORK_INTERVAL (10 * HZ) +#define PQI_RESCAN_WORK_INTERVAL (10 * HZ) +static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->block_requests = true; + scsi_block_requests(ctrl_info->scsi_host); +} + +static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->block_requests = false; + wake_up_all(&ctrl_info->block_requests_wait); + scsi_unblock_requests(ctrl_info->scsi_host); +} + +static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) +{ + return ctrl_info->block_requests; +} + +static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, + unsigned long timeout_msecs) +{ + unsigned long remaining_msecs; + + if (!pqi_ctrl_blocked(ctrl_info)) + return timeout_msecs; + + atomic_inc(&ctrl_info->num_blocked_threads); + + if (timeout_msecs == NO_TIMEOUT) { + wait_event(ctrl_info->block_requests_wait, + !pqi_ctrl_blocked(ctrl_info)); + remaining_msecs = timeout_msecs; + } else { + unsigned long remaining_jiffies; + + remaining_jiffies = + wait_event_timeout(ctrl_info->block_requests_wait, + !pqi_ctrl_blocked(ctrl_info), + msecs_to_jiffies(timeout_msecs)); + remaining_msecs = jiffies_to_msecs(remaining_jiffies); + } + + atomic_dec(&ctrl_info->num_blocked_threads); + + return remaining_msecs; +} + +static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) +{ + atomic_inc(&ctrl_info->num_busy_threads); +} + +static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) +{ + atomic_dec(&ctrl_info->num_busy_threads); +} + +static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) +{ + while (atomic_read(&ctrl_info->num_busy_threads) > + atomic_read(&ctrl_info->num_blocked_threads)) + usleep_range(1000, 2000); +} + +static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) +{ + device->in_reset = true; +} + +static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) +{ + device->in_reset = false; +} + +static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) +{ + return device->in_reset; +} static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) { @@ -2679,6 +2758,9 @@ static void pqi_event_worker(struct work_struct *work) ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); + pqi_ctrl_busy(ctrl_info); + pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); + event = ctrl_info->events; for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { if (event->pending) { @@ -2690,8 +2772,9 @@ static void pqi_event_worker(struct work_struct *work) event++; } - if (got_non_heartbeat_event) - pqi_schedule_rescan_worker(ctrl_info); + pqi_ctrl_unbusy(ctrl_info); + + pqi_schedule_rescan_worker(ctrl_info); } static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) @@ -3436,6 +3519,13 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, } } + pqi_ctrl_busy(ctrl_info); + timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); + if (timeout_msecs == 0) { + rc = -ETIMEDOUT; + goto out; + } + io_request = pqi_alloc_io_request(ctrl_info); put_unaligned_le16(io_request->index, @@ -3476,6 +3566,8 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, pqi_free_io_request(io_request); +out: + pqi_ctrl_unbusy(ctrl_info); up(&ctrl_info->sync_request_sem); return rc; @@ -4499,6 +4591,19 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, return 0; } +/* + * This function gets called just before we hand the completed SCSI request + * back to the SML. + */ + +void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) +{ + struct pqi_scsi_dev *device; + + device = scmd->device->hostdata; + atomic_dec(&device->scsi_cmds_outstanding); +} + static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { @@ -4512,12 +4617,20 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, device = scmd->device->hostdata; ctrl_info = shost_to_hba(shost); + atomic_inc(&device->scsi_cmds_outstanding); + if (pqi_ctrl_offline(ctrl_info)) { set_host_byte(scmd, DID_NO_CONNECT); pqi_scsi_done(scmd); return 0; } + pqi_ctrl_busy(ctrl_info); + if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + /* * This is necessary because the SML doesn't zero out this field during * error recovery. @@ -4554,9 +4667,116 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, queue_group); } +out: + pqi_ctrl_unbusy(ctrl_info); + if (rc) + atomic_dec(&device->scsi_cmds_outstanding); + return rc; } +static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, + struct pqi_queue_group *queue_group) +{ + unsigned int path; + unsigned long flags; + bool list_is_empty; + + for (path = 0; path < 2; path++) { + while (1) { + spin_lock_irqsave( + &queue_group->submit_lock[path], flags); + list_is_empty = + list_empty(&queue_group->request_list[path]); + spin_unlock_irqrestore( + &queue_group->submit_lock[path], flags); + if (list_is_empty) + break; + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) + return -ENXIO; + usleep_range(1000, 2000); + } + } + + return 0; +} + +static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + unsigned int i; + unsigned int path; + struct pqi_queue_group *queue_group; + pqi_index_t iq_pi; + pqi_index_t iq_ci; + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + queue_group = &ctrl_info->queue_groups[i]; + + rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); + if (rc) + return rc; + + for (path = 0; path < 2; path++) { + iq_pi = queue_group->iq_pi_copy[path]; + + while (1) { + iq_ci = *queue_group->iq_ci[path]; + if (iq_ci == iq_pi) + break; + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) + return -ENXIO; + usleep_range(1000, 2000); + } + } + } + + return 0; +} + +static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + unsigned int i; + unsigned int path; + struct pqi_queue_group *queue_group; + unsigned long flags; + struct pqi_io_request *io_request; + struct pqi_io_request *next; + struct scsi_cmnd *scmd; + struct pqi_scsi_dev *scsi_device; + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + queue_group = &ctrl_info->queue_groups[i]; + + for (path = 0; path < 2; path++) { + spin_lock_irqsave( + &queue_group->submit_lock[path], flags); + + list_for_each_entry_safe(io_request, next, + &queue_group->request_list[path], + request_list_entry) { + scmd = io_request->scmd; + if (!scmd) + continue; + + scsi_device = scmd->device->hostdata; + if (scsi_device != device) + continue; + + list_del(&io_request->request_list_entry); + set_host_byte(scmd, DID_RESET); + pqi_scsi_done(scmd); + } + + spin_unlock_irqrestore( + &queue_group->submit_lock[path], flags); + } + } +} + static void pqi_lun_reset_complete(struct pqi_io_request *io_request, void *context) { @@ -4571,7 +4791,6 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct completion *wait) { int rc; - unsigned int wait_secs = 0; while (1) { if (wait_for_completion_io_timeout(wait, @@ -4585,13 +4804,6 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, rc = -ETIMEDOUT; break; } - - wait_secs += PQI_LUN_RESET_TIMEOUT_SECS; - - dev_err(&ctrl_info->pci_dev->dev, - "resetting scsi %d:%d:%d:%d - waiting %u seconds\n", - ctrl_info->scsi_host->host_no, device->bus, - device->target, device->lun, wait_secs); } return rc; @@ -4605,8 +4817,6 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, DECLARE_COMPLETION_ONSTACK(wait); struct pqi_task_management_request *request; - down(&ctrl_info->lun_reset_sem); - io_request = pqi_alloc_io_request(ctrl_info); io_request->io_complete_callback = pqi_lun_reset_complete; io_request->context = &wait; @@ -4631,7 +4841,6 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, rc = io_request->status; pqi_free_io_request(io_request); - up(&ctrl_info->lun_reset_sem); return rc; } @@ -4643,10 +4852,6 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, { int rc; - pqi_check_ctrl_health(ctrl_info); - if (pqi_ctrl_offline(ctrl_info)) - return FAILED; - rc = pqi_lun_reset(ctrl_info, device); return rc == 0 ? SUCCESS : FAILED; @@ -4655,23 +4860,46 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) { int rc; + struct Scsi_Host *shost; struct pqi_ctrl_info *ctrl_info; struct pqi_scsi_dev *device; - ctrl_info = shost_to_hba(scmd->device->host); + shost = scmd->device->host; + ctrl_info = shost_to_hba(shost); device = scmd->device->hostdata; dev_err(&ctrl_info->pci_dev->dev, "resetting scsi %d:%d:%d:%d\n", - ctrl_info->scsi_host->host_no, - device->bus, device->target, device->lun); + shost->host_no, device->bus, device->target, device->lun); - rc = pqi_device_reset(ctrl_info, device); + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) { + rc = FAILED; + goto out; + } + mutex_lock(&ctrl_info->lun_reset_mutex); + + pqi_ctrl_block_requests(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); + pqi_fail_io_queued_for_device(ctrl_info, device); + rc = pqi_wait_until_inbound_queues_empty(ctrl_info); + pqi_device_reset_start(device); + pqi_ctrl_unblock_requests(ctrl_info); + + if (rc) + rc = FAILED; + else + rc = pqi_device_reset(ctrl_info, device); + + pqi_device_reset_done(device); + + mutex_unlock(&ctrl_info->lun_reset_mutex); + +out: dev_err(&ctrl_info->pci_dev->dev, "reset of scsi %d:%d:%d:%d: %s\n", - ctrl_info->scsi_host->host_no, - device->bus, device->target, device->lun, + shost->host_no, device->bus, device->target, device->lun, rc == SUCCESS ? "SUCCESS" : "FAILED"); return rc; @@ -5552,6 +5780,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) return NULL; mutex_init(&ctrl_info->scan_mutex); + mutex_init(&ctrl_info->lun_reset_mutex); INIT_LIST_HEAD(&ctrl_info->scsi_device_list); spin_lock_init(&ctrl_info->scsi_device_list_lock); @@ -5564,7 +5793,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) sema_init(&ctrl_info->sync_request_sem, PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); - sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET); + init_waitqueue_head(&ctrl_info->block_requests_wait); ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; -- cgit From 061ef06a2d436cea85984cf0b51b452547a5496c Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:53:05 -0500 Subject: scsi: smartpqi: add suspend and resume support add support for ACPI S3 (suspend) and S4 (hibernate) system power states. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 12 +- drivers/scsi/smartpqi/smartpqi_init.c | 425 +++++++++++++++++++++++++++++++--- drivers/scsi/smartpqi/smartpqi_sis.c | 75 +++++- drivers/scsi/smartpqi/smartpqi_sis.h | 3 + 4 files changed, 485 insertions(+), 30 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 5b0c6fb3ceb1..06e2b7152d52 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -61,7 +61,7 @@ struct pqi_device_registers { /* * controller registers * - * These are defined by the PMC implementation. + * These are defined by the Microsemi implementation. * * Some registers (those named sis_*) are only used when in * legacy SIS mode before we transition the controller into @@ -102,6 +102,12 @@ enum pqi_io_path { AIO_PATH = 1 }; +enum pqi_irq_mode { + IRQ_MODE_NONE, + IRQ_MODE_INTX, + IRQ_MODE_MSIX +}; + struct pqi_sg_descriptor { __le64 address; __le32 length; @@ -908,7 +914,7 @@ struct pqi_ctrl_info { dma_addr_t error_buffer_dma_handle; size_t sg_chain_buffer_length; unsigned int num_queue_groups; - unsigned int num_active_queue_groups; + u16 max_hw_queue_index; u16 num_elements_per_iq; u16 num_elements_per_oq; u16 max_inbound_iu_length_per_firmware; @@ -923,6 +929,7 @@ struct pqi_ctrl_info { struct pqi_admin_queues admin_queues; struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS]; struct pqi_event_queue event_queue; + enum pqi_irq_mode irq_mode; int max_msix_vectors; int num_msix_vectors_enabled; int num_msix_vectors_initialized; @@ -937,6 +944,7 @@ struct pqi_ctrl_info { u8 outbound_spanning_supported : 1; u8 pqi_mode_enabled : 1; u8 heartbeat_timer_started : 1; + u8 update_time_worker_scheduled : 1; struct list_head scsi_device_list; spinlock_t scsi_device_list_lock; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index bd8a66d9acc0..7af4add16276 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -262,6 +262,11 @@ static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) PQI_RESCAN_WORK_INTERVAL); } +static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) +{ + cancel_delayed_work_sync(&ctrl_info->rescan_work); +} + static int pqi_map_single(struct pci_dev *pci_dev, struct pqi_sg_descriptor *sg_descriptor, void *buffer, size_t buffer_length, int data_direction) @@ -588,7 +593,7 @@ static int pqi_write_driver_version_to_host_wellness( buffer->driver_version_tag[1] = 'V'; put_unaligned_le16(sizeof(buffer->driver_version), &buffer->driver_version_length); - strncpy(buffer->driver_version, DRIVER_VERSION, + strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, sizeof(buffer->driver_version) - 1); buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; buffer->end_tag[0] = 'Z'; @@ -686,7 +691,21 @@ static void pqi_update_time_worker(struct work_struct *work) static inline void pqi_schedule_update_time_worker( struct pqi_ctrl_info *ctrl_info) { + if (ctrl_info->update_time_worker_scheduled) + return; + schedule_delayed_work(&ctrl_info->update_time_work, 0); + ctrl_info->update_time_worker_scheduled = true; +} + +static inline void pqi_cancel_update_time_worker( + struct pqi_ctrl_info *ctrl_info) +{ + if (!ctrl_info->update_time_worker_scheduled) + return; + + cancel_delayed_work_sync(&ctrl_info->update_time_work); + ctrl_info->update_time_worker_scheduled = false; } static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, @@ -1967,6 +1986,18 @@ static int pqi_scan_finished(struct Scsi_Host *shost, return !mutex_is_locked(&ctrl_info->scan_mutex); } +static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) +{ + mutex_lock(&ctrl_info->scan_mutex); + mutex_unlock(&ctrl_info->scan_mutex); +} + +static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) +{ + mutex_lock(&ctrl_info->lun_reset_mutex); + mutex_unlock(&ctrl_info->lun_reset_mutex); +} + static inline void pqi_set_encryption_info( struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, u64 first_block) @@ -2825,6 +2856,9 @@ static void pqi_heartbeat_timer_handler(unsigned long data) int num_interrupts; struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data; + if (!ctrl_info->heartbeat_timer_started) + return; + num_interrupts = atomic_read(&ctrl_info->num_interrupts); if (num_interrupts == ctrl_info->previous_num_interrupts) { @@ -2855,14 +2889,16 @@ static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info; ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler; - add_timer(&ctrl_info->heartbeat_timer); ctrl_info->heartbeat_timer_started = true; + add_timer(&ctrl_info->heartbeat_timer); } static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) { - if (ctrl_info->heartbeat_timer_started) + if (ctrl_info->heartbeat_timer_started) { + ctrl_info->heartbeat_timer_started = false; del_timer_sync(&ctrl_info->heartbeat_timer); + } } static inline int pqi_event_type_to_event_index(unsigned int event_type) @@ -2938,6 +2974,106 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) return num_events; } +#define PQI_LEGACY_INTX_MASK 0x1 + +static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, + bool enable_intx) +{ + u32 intx_mask; + struct pqi_device_registers __iomem *pqi_registers; + volatile void __iomem *register_addr; + + pqi_registers = ctrl_info->pqi_registers; + + if (enable_intx) + register_addr = &pqi_registers->legacy_intx_mask_clear; + else + register_addr = &pqi_registers->legacy_intx_mask_set; + + intx_mask = readl(register_addr); + intx_mask |= PQI_LEGACY_INTX_MASK; + writel(intx_mask, register_addr); +} + +static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, + enum pqi_irq_mode new_mode) +{ + switch (ctrl_info->irq_mode) { + case IRQ_MODE_MSIX: + switch (new_mode) { + case IRQ_MODE_MSIX: + break; + case IRQ_MODE_INTX: + pqi_configure_legacy_intx(ctrl_info, true); + sis_disable_msix(ctrl_info); + sis_enable_intx(ctrl_info); + break; + case IRQ_MODE_NONE: + sis_disable_msix(ctrl_info); + break; + } + break; + case IRQ_MODE_INTX: + switch (new_mode) { + case IRQ_MODE_MSIX: + pqi_configure_legacy_intx(ctrl_info, false); + sis_disable_intx(ctrl_info); + sis_enable_msix(ctrl_info); + break; + case IRQ_MODE_INTX: + break; + case IRQ_MODE_NONE: + pqi_configure_legacy_intx(ctrl_info, false); + sis_disable_intx(ctrl_info); + break; + } + break; + case IRQ_MODE_NONE: + switch (new_mode) { + case IRQ_MODE_MSIX: + sis_enable_msix(ctrl_info); + break; + case IRQ_MODE_INTX: + pqi_configure_legacy_intx(ctrl_info, true); + sis_enable_intx(ctrl_info); + break; + case IRQ_MODE_NONE: + break; + } + break; + } + + ctrl_info->irq_mode = new_mode; +} + +#define PQI_LEGACY_INTX_PENDING 0x1 + +static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) +{ + bool valid_irq; + u32 intx_status; + + switch (ctrl_info->irq_mode) { + case IRQ_MODE_MSIX: + valid_irq = true; + break; + case IRQ_MODE_INTX: + intx_status = + readl(&ctrl_info->pqi_registers->legacy_intx_status); + if (intx_status & PQI_LEGACY_INTX_PENDING) + valid_irq = true; + else + valid_irq = false; + break; + case IRQ_MODE_NONE: + default: + valid_irq = false; + break; + } + + return valid_irq; +} + static irqreturn_t pqi_irq_handler(int irq, void *data) { struct pqi_ctrl_info *ctrl_info; @@ -2947,7 +3083,7 @@ static irqreturn_t pqi_irq_handler(int irq, void *data) queue_group = data; ctrl_info = queue_group->ctrl_info; - if (!ctrl_info || !queue_group->oq_ci) + if (!pqi_is_valid_irq(ctrl_info)) return IRQ_NONE; num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); @@ -3013,7 +3149,7 @@ static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) } ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; - + ctrl_info->irq_mode = IRQ_MODE_MSIX; return 0; } @@ -3798,16 +3934,15 @@ static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) return 0; } -static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info) +static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, + unsigned int group_number) { - unsigned int i; int rc; struct pqi_queue_group *queue_group; struct pqi_general_admin_request request; struct pqi_general_admin_response response; - i = ctrl_info->num_active_queue_groups; - queue_group = &ctrl_info->queue_groups[i]; + queue_group = &ctrl_info->queue_groups[group_number]; /* * Create IQ (Inbound Queue - host to device queue) for @@ -3937,8 +4072,6 @@ static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info) get_unaligned_le64( &response.data.create_operational_oq.oq_ci_offset); - ctrl_info->num_active_queue_groups++; - return 0; delete_inbound_queue_aio: @@ -3965,7 +4098,7 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) } for (i = 0; i < ctrl_info->num_queue_groups; i++) { - rc = pqi_create_queue_group(ctrl_info); + rc = pqi_create_queue_group(ctrl_info, i); if (rc) { dev_err(&ctrl_info->pci_dev->dev, "error creating queue group number %u/%u\n", @@ -4219,6 +4352,7 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) num_queue_groups = min(num_queue_groups, max_queue_groups); ctrl_info->num_queue_groups = num_queue_groups; + ctrl_info->max_hw_queue_index = num_queue_groups - 1; /* * Make sure that the max. inbound IU length is an even multiple @@ -4591,6 +4725,18 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, return 0; } +static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd) +{ + u16 hw_queue; + + hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); + if (hw_queue > ctrl_info->max_hw_queue_index) + hw_queue = 0; + + return hw_queue; +} + /* * This function gets called just before we hand the completed SCSI request * back to the SML. @@ -4610,7 +4756,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, int rc; struct pqi_ctrl_info *ctrl_info; struct pqi_scsi_dev *device; - u16 hwq; + u16 hw_queue; struct pqi_queue_group *queue_group; bool raid_bypassed; @@ -4637,11 +4783,8 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, */ scmd->result = 0; - hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); - if (hwq >= ctrl_info->num_queue_groups) - hwq = 0; - - queue_group = &ctrl_info->queue_groups[hwq]; + hw_queue = pqi_get_hw_queue(ctrl_info, scmd); + queue_group = &ctrl_info->queue_groups[hw_queue]; if (pqi_is_logical_device(device)) { raid_bypassed = false; @@ -4777,6 +4920,52 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, } } +static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + while (atomic_read(&device->scsi_cmds_outstanding)) { + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) + return -ENXIO; + usleep_range(1000, 2000); + } + + return 0; +} + +static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info) +{ + bool io_pending; + unsigned long flags; + struct pqi_scsi_dev *device; + + while (1) { + io_pending = false; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + list_for_each_entry(device, &ctrl_info->scsi_device_list, + scsi_device_list_entry) { + if (atomic_read(&device->scsi_cmds_outstanding)) { + io_pending = true; + break; + } + } + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, + flags); + + if (!io_pending) + break; + + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) + return -ENXIO; + + usleep_range(1000, 2000); + } + + return 0; +} + static void pqi_lun_reset_complete(struct pqi_io_request *io_request, void *context) { @@ -4853,6 +5042,8 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, int rc; rc = pqi_lun_reset(ctrl_info, device); + if (rc == 0) + rc = pqi_device_wait_for_pending_io(ctrl_info, device); return rc == 0 ? SUCCESS : FAILED; } @@ -5487,7 +5678,7 @@ static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) { int rc; - sis_disable_msix(ctrl_info); + pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); rc = pqi_reset(ctrl_info); if (rc) return rc; @@ -5647,7 +5838,10 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) if (rc) return rc; - sis_enable_msix(ctrl_info); + pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); + + ctrl_info->controller_online = true; + pqi_start_heartbeat_timer(ctrl_info); rc = pqi_enable_events(ctrl_info); if (rc) { @@ -5656,10 +5850,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) return rc; } - pqi_start_heartbeat_timer(ctrl_info); - - ctrl_info->controller_online = true; - /* Register with the SCSI subsystem. */ rc = pqi_register_scsi(ctrl_info); if (rc) @@ -5686,6 +5876,116 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) return 0; } +#if defined(CONFIG_PM) + +static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + struct pqi_admin_queues *admin_queues; + struct pqi_event_queue *event_queue; + + admin_queues = &ctrl_info->admin_queues; + admin_queues->iq_pi_copy = 0; + admin_queues->oq_ci_copy = 0; + *admin_queues->oq_pi = 0; + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; + ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; + ctrl_info->queue_groups[i].oq_ci_copy = 0; + + *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0; + *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0; + *ctrl_info->queue_groups[i].oq_pi = 0; + } + + event_queue = &ctrl_info->event_queue; + *event_queue->oq_pi = 0; + event_queue->oq_ci_copy = 0; +} + +static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + + rc = pqi_force_sis_mode(ctrl_info); + if (rc) + return rc; + + /* + * Wait until the controller is ready to start accepting SIS + * commands. + */ + rc = sis_wait_for_ctrl_ready_resume(ctrl_info); + if (rc) + return rc; + + /* + * If the function we are about to call succeeds, the + * controller will transition from legacy SIS mode + * into PQI mode. + */ + rc = sis_init_base_struct_addr(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error initializing PQI mode\n"); + return rc; + } + + /* Wait for the controller to complete the SIS -> PQI transition. */ + rc = pqi_wait_for_pqi_mode_ready(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "transition to PQI mode failed\n"); + return rc; + } + + /* From here on, we are running in PQI mode. */ + ctrl_info->pqi_mode_enabled = true; + pqi_save_ctrl_mode(ctrl_info, PQI_MODE); + + pqi_reinit_queues(ctrl_info); + + rc = pqi_create_admin_queues(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error creating admin queues\n"); + return rc; + } + + rc = pqi_create_queues(ctrl_info); + if (rc) + return rc; + + pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); + + ctrl_info->controller_online = true; + pqi_start_heartbeat_timer(ctrl_info); + pqi_ctrl_unblock_requests(ctrl_info); + + rc = pqi_enable_events(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error configuring events\n"); + return rc; + } + + rc = pqi_write_driver_version_to_host_wellness(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error updating host wellness\n"); + return rc; + } + + pqi_schedule_update_time_worker(ctrl_info); + + pqi_scan_scsi_devices(ctrl_info); + + return 0; +} + +#endif /* CONFIG_PM */ + static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) { @@ -5796,6 +6096,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) init_waitqueue_head(&ctrl_info->block_requests_wait); ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; + ctrl_info->irq_mode = IRQ_MODE_NONE; ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; return ctrl_info; @@ -5839,8 +6140,8 @@ static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) { - cancel_delayed_work_sync(&ctrl_info->rescan_work); - cancel_delayed_work_sync(&ctrl_info->update_time_work); + pqi_cancel_rescan_worker(ctrl_info); + pqi_cancel_update_time_worker(ctrl_info); pqi_remove_all_scsi_devices(ctrl_info); pqi_unregister_scsi(ctrl_info); if (ctrl_info->pqi_mode_enabled) @@ -5952,6 +6253,71 @@ error: "unable to flush controller cache\n"); } +#if defined(CONFIG_PM) + +static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) +{ + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = pci_get_drvdata(pci_dev); + + pqi_disable_events(ctrl_info); + pqi_cancel_update_time_worker(ctrl_info); + pqi_cancel_rescan_worker(ctrl_info); + pqi_wait_until_scan_finished(ctrl_info); + pqi_wait_until_lun_reset_finished(ctrl_info); + pqi_flush_cache(ctrl_info); + pqi_ctrl_block_requests(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); + pqi_wait_until_inbound_queues_empty(ctrl_info); + pqi_ctrl_wait_for_pending_io(ctrl_info); + pqi_stop_heartbeat_timer(ctrl_info); + + if (state.event == PM_EVENT_FREEZE) + return 0; + + pci_save_state(pci_dev); + pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); + + ctrl_info->controller_online = false; + ctrl_info->pqi_mode_enabled = false; + + return 0; +} + +static int pqi_resume(struct pci_dev *pci_dev) +{ + int rc; + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = pci_get_drvdata(pci_dev); + + if (pci_dev->current_state != PCI_D0) { + ctrl_info->max_hw_queue_index = 0; + pqi_free_interrupts(ctrl_info); + pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); + rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, + IRQF_SHARED, DRIVER_NAME_SHORT, + &ctrl_info->queue_groups[0]); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "irq %u init failed with error %d\n", + pci_dev->irq, rc); + return rc; + } + pqi_start_heartbeat_timer(ctrl_info); + pqi_ctrl_unblock_requests(ctrl_info); + return 0; + } + + pci_set_power_state(pci_dev, PCI_D0); + pci_restore_state(pci_dev); + + return pqi_ctrl_init_resume(ctrl_info); +} + +#endif /* CONFIG_PM */ + /* Define the PCI IDs for the controllers that we support. */ static const struct pci_device_id pqi_pci_id_table[] = { { @@ -6093,6 +6459,10 @@ static struct pci_driver pqi_pci_driver = { .probe = pqi_pci_probe, .remove = pqi_pci_remove, .shutdown = pqi_shutdown, +#if defined(CONFIG_PM) + .suspend = pqi_suspend, + .resume = pqi_resume, +#endif }; static int __init pqi_init(void) @@ -6458,6 +6828,9 @@ static void __attribute__((unused)) verify_structures(void) BUILD_BUG_ON(offsetof(struct pqi_event_config, descriptors) != 4); + BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != + ARRAY_SIZE(pqi_supported_event_types)); + BUILD_BUG_ON(offsetof(struct pqi_event_response, header.iu_type) != 0); BUILD_BUG_ON(offsetof(struct pqi_event_response, diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index c5325a4d0f0f..12853fd75550 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -33,6 +33,7 @@ /* for submission of legacy SIS commands */ #define SIS_REENABLE_SIS_MODE 0x1 #define SIS_ENABLE_MSIX 0x40 +#define SIS_ENABLE_INTX 0x80 #define SIS_SOFT_RESET 0x100 #define SIS_TRIGGER_SHUTDOWN 0x800000 #define SIS_CMD_READY 0x200 @@ -56,6 +57,7 @@ #define SIS_CTRL_KERNEL_UP 0x80 #define SIS_CTRL_KERNEL_PANIC 0x100 #define SIS_CTRL_READY_TIMEOUT_SECS 30 +#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90 #define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10 #pragma pack(1) @@ -79,12 +81,13 @@ struct sis_base_struct { #pragma pack() -int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info) +static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info, + unsigned int timeout_secs) { unsigned long timeout; u32 status; - timeout = (SIS_CTRL_READY_TIMEOUT_SECS * HZ) + jiffies; + timeout = (timeout_secs * HZ) + jiffies; while (1) { status = readl(&ctrl_info->registers->sis_firmware_status); @@ -107,6 +110,18 @@ int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info) return 0; } +int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info) +{ + return sis_wait_for_ctrl_ready_with_timeout(ctrl_info, + SIS_CTRL_READY_TIMEOUT_SECS); +} + +int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info) +{ + return sis_wait_for_ctrl_ready_with_timeout(ctrl_info, + SIS_CTRL_READY_RESUME_TIMEOUT_SECS); +} + bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info) { bool running; @@ -315,6 +330,34 @@ out: return rc; } +#define SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS 30 + +static void sis_wait_for_doorbell_bit_to_clear( + struct pqi_ctrl_info *ctrl_info, u32 bit) +{ + u32 doorbell_register; + unsigned long timeout; + + timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies; + + while (1) { + doorbell_register = + readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell); + if ((doorbell_register & bit) == 0) + break; + if (readl(&ctrl_info->registers->sis_firmware_status) & + SIS_CTRL_KERNEL_PANIC) + break; + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "doorbell register bit 0x%x not cleared\n", + bit); + break; + } + usleep_range(1000, 2000); + } +} + /* Enable MSI-X interrupts on the controller. */ void sis_enable_msix(struct pqi_ctrl_info *ctrl_info) @@ -327,6 +370,8 @@ void sis_enable_msix(struct pqi_ctrl_info *ctrl_info) writel(doorbell_register, &ctrl_info->registers->sis_host_to_ctrl_doorbell); + + sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_MSIX); } /* Disable MSI-X interrupts on the controller. */ @@ -343,6 +388,32 @@ void sis_disable_msix(struct pqi_ctrl_info *ctrl_info) &ctrl_info->registers->sis_host_to_ctrl_doorbell); } +void sis_enable_intx(struct pqi_ctrl_info *ctrl_info) +{ + u32 doorbell_register; + + doorbell_register = + readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell); + doorbell_register |= SIS_ENABLE_INTX; + + writel(doorbell_register, + &ctrl_info->registers->sis_host_to_ctrl_doorbell); + + sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_INTX); +} + +void sis_disable_intx(struct pqi_ctrl_info *ctrl_info) +{ + u32 doorbell_register; + + doorbell_register = + readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell); + doorbell_register &= ~SIS_ENABLE_INTX; + + writel(doorbell_register, + &ctrl_info->registers->sis_host_to_ctrl_doorbell); +} + void sis_soft_reset(struct pqi_ctrl_info *ctrl_info) { writel(SIS_SOFT_RESET, diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h index 157768d939a9..08ee0abb8656 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.h +++ b/drivers/scsi/smartpqi/smartpqi_sis.h @@ -20,6 +20,7 @@ #define _SMARTPQI_SIS_H int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info); +int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info); bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info); bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info); int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info); @@ -27,6 +28,8 @@ int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info); int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info); void sis_enable_msix(struct pqi_ctrl_info *ctrl_info); void sis_disable_msix(struct pqi_ctrl_info *ctrl_info); +void sis_enable_intx(struct pqi_ctrl_info *ctrl_info); +void sis_disable_intx(struct pqi_ctrl_info *ctrl_info); void sis_soft_reset(struct pqi_ctrl_info *ctrl_info); void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info); int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info); -- cgit From 98f876674a6fba3591c342dfbcfdbaa7ecf0a84e Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:53:11 -0500 Subject: scsi: smartpqi: add heartbeat check check for controller lockups Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 60 ++++++++++++++-- drivers/scsi/smartpqi/smartpqi_init.c | 126 +++++++++++++++++++++++----------- drivers/scsi/smartpqi/smartpqi_sis.c | 4 ++ 3 files changed, 143 insertions(+), 47 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 06e2b7152d52..1ac09e74d8c2 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -490,7 +490,6 @@ struct pqi_raid_error_info { #define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5 #define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd #define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe -#define PQI_EVENT_TYPE_HEARTBEAT 0xff #pragma pack() @@ -635,6 +634,58 @@ struct pqi_encryption_info { u32 encrypt_tweak_upper; }; +#pragma pack(1) + +#define PQI_CONFIG_TABLE_SIGNATURE "CFGTABLE" +#define PQI_CONFIG_TABLE_MAX_LENGTH ((u16)~0) + +/* configuration table section IDs */ +#define PQI_CONFIG_TABLE_SECTION_GENERAL_INFO 0 +#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES 1 +#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA 2 +#define PQI_CONFIG_TABLE_SECTION_DEBUG 3 +#define PQI_CONFIG_TABLE_SECTION_HEARTBEAT 4 + +struct pqi_config_table { + u8 signature[8]; /* "CFGTABLE" */ + __le32 first_section_offset; /* offset in bytes from the base */ + /* address of this table to the */ + /* first section */ +}; + +struct pqi_config_table_section_header { + __le16 section_id; /* as defined by the */ + /* PQI_CONFIG_TABLE_SECTION_* */ + /* manifest constants above */ + __le16 next_section_offset; /* offset in bytes from base */ + /* address of the table of the */ + /* next section or 0 if last entry */ +}; + +struct pqi_config_table_general_info { + struct pqi_config_table_section_header header; + __le32 section_length; /* size of this section in bytes */ + /* including the section header */ + __le32 max_outstanding_requests; /* max. outstanding */ + /* commands supported by */ + /* the controller */ + __le32 max_sg_size; /* max. transfer size of a single */ + /* command */ + __le32 max_sg_per_request; /* max. number of scatter-gather */ + /* entries supported in a single */ + /* command */ +}; + +struct pqi_config_table_debug { + struct pqi_config_table_section_header header; + __le32 scratchpad; +}; + +struct pqi_config_table_heartbeat { + struct pqi_config_table_section_header header; + __le32 heartbeat_counter; +}; + #define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0) #define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U) @@ -645,8 +696,6 @@ struct pqi_encryption_info { #define PQI_HBA_BUS 2 #define PQI_MAX_BUS PQI_HBA_BUS -#pragma pack(1) - struct report_lun_header { __be32 list_length; u8 extended_response; @@ -870,7 +919,6 @@ struct pqi_io_request { struct list_head request_list_entry; }; -#define PQI_EVENT_HEARTBEAT 0 #define PQI_NUM_SUPPORTED_EVENTS 6 struct pqi_event { @@ -943,7 +991,6 @@ struct pqi_ctrl_info { u8 inbound_spanning_supported : 1; u8 outbound_spanning_supported : 1; u8 pqi_mode_enabled : 1; - u8 heartbeat_timer_started : 1; u8 update_time_worker_scheduled : 1; struct list_head scsi_device_list; @@ -963,7 +1010,8 @@ struct pqi_ctrl_info { atomic_t num_interrupts; int previous_num_interrupts; - unsigned int num_heartbeats_requested; + u32 previous_heartbeat_count; + __le32 __iomem *heartbeat_counter; struct timer_list heartbeat_timer; struct semaphore sync_request_sem; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 7af4add16276..de33942860b3 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -267,6 +267,14 @@ static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) cancel_delayed_work_sync(&ctrl_info->rescan_work); } +static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) +{ + if (!ctrl_info->heartbeat_counter) + return 0; + + return readl(ctrl_info->heartbeat_counter); +} + static int pqi_map_single(struct pci_dev *pci_dev, struct pqi_sg_descriptor *sg_descriptor, void *buffer, size_t buffer_length, int data_direction) @@ -2708,23 +2716,18 @@ static inline unsigned int pqi_num_elements_free(unsigned int pi, return elements_in_queue - num_elements_used - 1; } -#define PQI_EVENT_ACK_TIMEOUT 30 - -static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info, +static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, struct pqi_event_acknowledge_request *iu, size_t iu_length) { pqi_index_t iq_pi; pqi_index_t iq_ci; unsigned long flags; void *next_element; - unsigned long timeout; struct pqi_queue_group *queue_group; queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); - timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies; - while (1) { spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); @@ -2738,11 +2741,8 @@ static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info, spin_unlock_irqrestore( &queue_group->submit_lock[RAID_PATH], flags); - if (time_after(jiffies, timeout)) { - dev_err(&ctrl_info->pci_dev->dev, - "sending event acknowledge timed out\n"); + if (pqi_ctrl_offline(ctrl_info)) return; - } } next_element = queue_group->iq_element_array[RAID_PATH] + @@ -2751,7 +2751,6 @@ static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info, memcpy(next_element, iu, iu_length); iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; - queue_group->iq_pi_copy[RAID_PATH] = iq_pi; /* @@ -2777,7 +2776,7 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, request.event_id = event->event_id; request.additional_event_id = event->additional_event_id; - pqi_start_event_ack(ctrl_info, &request, sizeof(request)); + pqi_send_event_ack(ctrl_info, &request, sizeof(request)); } static void pqi_event_worker(struct work_struct *work) @@ -2785,7 +2784,6 @@ static void pqi_event_worker(struct work_struct *work) unsigned int i; struct pqi_ctrl_info *ctrl_info; struct pqi_event *event; - bool got_non_heartbeat_event = false; ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); @@ -2797,8 +2795,6 @@ static void pqi_event_worker(struct work_struct *work) if (event->pending) { event->pending = false; pqi_acknowledge_event(ctrl_info, event); - if (i != PQI_EVENT_TYPE_HEARTBEAT) - got_non_heartbeat_event = true; } event++; } @@ -2848,57 +2844,58 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) } } -#define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ) -#define PQI_MAX_HEARTBEAT_REQUESTS 5 +#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) static void pqi_heartbeat_timer_handler(unsigned long data) { int num_interrupts; + u32 heartbeat_count; struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data; - if (!ctrl_info->heartbeat_timer_started) + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) return; num_interrupts = atomic_read(&ctrl_info->num_interrupts); + heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); if (num_interrupts == ctrl_info->previous_num_interrupts) { - ctrl_info->num_heartbeats_requested++; - if (ctrl_info->num_heartbeats_requested > - PQI_MAX_HEARTBEAT_REQUESTS) { + if (heartbeat_count == ctrl_info->previous_heartbeat_count) { + dev_err(&ctrl_info->pci_dev->dev, + "no heartbeat detected - last heartbeat count: %u\n", + heartbeat_count); pqi_take_ctrl_offline(ctrl_info); return; } - ctrl_info->events[PQI_EVENT_HEARTBEAT].pending = true; - schedule_work(&ctrl_info->event_work); } else { - ctrl_info->num_heartbeats_requested = 0; + ctrl_info->previous_num_interrupts = num_interrupts; } - ctrl_info->previous_num_interrupts = num_interrupts; + ctrl_info->previous_heartbeat_count = heartbeat_count; mod_timer(&ctrl_info->heartbeat_timer, jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); } static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) { + if (!ctrl_info->heartbeat_counter) + return; + ctrl_info->previous_num_interrupts = atomic_read(&ctrl_info->num_interrupts); + ctrl_info->previous_heartbeat_count = + pqi_read_heartbeat_counter(ctrl_info); - init_timer(&ctrl_info->heartbeat_timer); ctrl_info->heartbeat_timer.expires = jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info; ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler; - ctrl_info->heartbeat_timer_started = true; add_timer(&ctrl_info->heartbeat_timer); } static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) { - if (ctrl_info->heartbeat_timer_started) { - ctrl_info->heartbeat_timer_started = false; - del_timer_sync(&ctrl_info->heartbeat_timer); - } + del_timer_sync(&ctrl_info->heartbeat_timer); } static inline int pqi_event_type_to_event_index(unsigned int event_type) @@ -2925,12 +2922,10 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) struct pqi_event_queue *event_queue; struct pqi_event_response *response; struct pqi_event *event; - bool need_delayed_work; int event_index; event_queue = &ctrl_info->event_queue; num_events = 0; - need_delayed_work = false; oq_ci = event_queue->oq_ci_copy; while (1) { @@ -2953,10 +2948,6 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) event->event_id = response->event_id; event->additional_event_id = response->additional_event_id; - if (event_index != PQI_EVENT_TYPE_HEARTBEAT) { - event->pending = true; - need_delayed_work = true; - } } } @@ -2966,9 +2957,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) if (num_events) { event_queue->oq_ci_copy = oq_ci; writel(oq_ci, event_queue->oq_ci); - - if (need_delayed_work) - schedule_work(&ctrl_info->event_work); + schedule_work(&ctrl_info->event_work); } return num_events; @@ -3220,7 +3209,7 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) if (!ctrl_info->queue_memory_base) { dev_err(&ctrl_info->pci_dev->dev, - "failed to allocate memory for PQI admin queues\n"); + "unable to allocate memory for PQI admin queues\n"); return -ENOMEM; } @@ -5672,6 +5661,55 @@ out: return rc; } +static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) +{ + u32 table_length; + u32 section_offset; + void __iomem *table_iomem_addr; + struct pqi_config_table *config_table; + struct pqi_config_table_section_header *section; + + table_length = ctrl_info->config_table_length; + + config_table = kmalloc(table_length, GFP_KERNEL); + if (!config_table) { + dev_err(&ctrl_info->pci_dev->dev, + "unable to allocate memory for PQI configuration table\n"); + return -ENOMEM; + } + + /* + * Copy the config table contents from I/O memory space into the + * temporary buffer. + */ + table_iomem_addr = ctrl_info->iomem_base + + ctrl_info->config_table_offset; + memcpy_fromio(config_table, table_iomem_addr, table_length); + + section_offset = + get_unaligned_le32(&config_table->first_section_offset); + + while (section_offset) { + section = (void *)config_table + section_offset; + + switch (get_unaligned_le16(§ion->section_id)) { + case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: + ctrl_info->heartbeat_counter = table_iomem_addr + + section_offset + + offsetof(struct pqi_config_table_heartbeat, + heartbeat_counter); + break; + } + + section_offset = + get_unaligned_le16(§ion->next_section_offset); + } + + kfree(config_table); + + return 0; +} + /* Switches the controller from PQI mode back into SIS mode. */ static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) @@ -5783,6 +5821,10 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) ctrl_info->pqi_mode_enabled = true; pqi_save_ctrl_mode(ctrl_info, PQI_MODE); + rc = pqi_process_config_table(ctrl_info); + if (rc) + return rc; + rc = pqi_alloc_admin_queues(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, @@ -6091,6 +6133,8 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); + init_timer(&ctrl_info->heartbeat_timer); + sema_init(&ctrl_info->sync_request_sem, PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); init_waitqueue_head(&ctrl_info->block_requests_wait); diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index 12853fd75550..3155bda88550 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -422,6 +422,10 @@ void sis_soft_reset(struct pqi_ctrl_info *ctrl_info) void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info) { + if (readl(&ctrl_info->registers->sis_firmware_status) & + SIS_CTRL_KERNEL_PANIC) + return; + writel(SIS_TRIGGER_SHUTDOWN, &ctrl_info->registers->sis_host_to_ctrl_doorbell); } -- cgit From e1d213bdc3e359c6c5da8ebbc5b2e87b376e8777 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:53:18 -0500 Subject: scsi: smartpqi: correct bdma hw bug add workaround for BDMA hardware bug that can cause hw to read up to 12 SGL elements (192 bytes) beyond the last element in the list. This fix avoids IOMMU violations Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index de33942860b3..6f31cd96e521 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -48,6 +48,8 @@ #define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")" #define DRIVER_NAME_SHORT "smartpqi" +#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) + MODULE_AUTHOR("Microsemi"); MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " DRIVER_VERSION); @@ -3202,6 +3204,8 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) alloc_length = (size_t)aligned_pointer + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; + alloc_length += PQI_EXTRA_SGL_MEMORY; + ctrl_info->queue_memory_base = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, @@ -4319,7 +4323,8 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; ctrl_info->sg_chain_buffer_length = - max_sg_entries * sizeof(struct pqi_sg_descriptor); + (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + + PQI_EXTRA_SGL_MEMORY; ctrl_info->sg_tablesize = max_sg_entries; ctrl_info->max_sectors = max_transfer_size / 512; } -- cgit From 1f37e992ad8015ce33596466b0f36babb495148e Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:53:24 -0500 Subject: scsi: smartpqi: add pqi_wait_for_completion_io Add check for controller lockup during waits for synchronous controller commands. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 6f31cd96e521..586a1d51e369 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -3578,6 +3578,37 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); } +#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 + +static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, + struct completion *wait) +{ + int rc; + unsigned int wait_secs = 0; + + while (1) { + if (wait_for_completion_io_timeout(wait, + PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { + rc = 0; + break; + } + + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) { + rc = -ENXIO; + break; + } + + wait_secs += PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS; + + dev_err(&ctrl_info->pci_dev->dev, + "waiting %u seconds for completion\n", + wait_secs); + } + + return rc; +} + static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, void *context) { @@ -3601,7 +3632,7 @@ static int pqi_submit_raid_request_synchronous_with_io_request( io_request); if (timeout_msecs == NO_TIMEOUT) { - wait_for_completion_io(&wait); + pqi_wait_for_completion_io(ctrl_info, &wait); } else { if (!wait_for_completion_io_timeout(&wait, msecs_to_jiffies(timeout_msecs))) { -- cgit From d91d7820d39629fc67cea5d6721eac8b180b0451 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:53:30 -0500 Subject: scsi: smartpqi: make pdev pointer names consistent make all variable names for pointers to struct pci_dev consistent throughout the driver. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 41 ++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 586a1d51e369..0083ea4ff81a 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -3093,19 +3093,19 @@ static irqreturn_t pqi_irq_handler(int irq, void *data) static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) { - struct pci_dev *pdev = ctrl_info->pci_dev; + struct pci_dev *pci_dev = ctrl_info->pci_dev; int i; int rc; - ctrl_info->event_irq = pci_irq_vector(pdev, 0); + ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { - rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0, + rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); if (rc) { - dev_err(&pdev->dev, + dev_err(&pci_dev->dev, "irq %u init failed with error %d\n", - pci_irq_vector(pdev, i), rc); + pci_irq_vector(pci_dev, i), rc); return rc; } ctrl_info->num_msix_vectors_initialized++; @@ -6229,7 +6229,7 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) pqi_free_ctrl_resources(ctrl_info); } -static void pqi_print_ctrl_info(struct pci_dev *pdev, +static void pqi_print_ctrl_info(struct pci_dev *pci_dev, const struct pci_device_id *id) { char *ctrl_description; @@ -6248,41 +6248,42 @@ static void pqi_print_ctrl_info(struct pci_dev *pdev, } } - dev_info(&pdev->dev, "%s found\n", ctrl_description); + dev_info(&pci_dev->dev, "%s found\n", ctrl_description); } -static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +static int pqi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) { int rc; int node; struct pqi_ctrl_info *ctrl_info; - pqi_print_ctrl_info(pdev, id); + pqi_print_ctrl_info(pci_dev, id); if (pqi_disable_device_id_wildcards && id->subvendor == PCI_ANY_ID && id->subdevice == PCI_ANY_ID) { - dev_warn(&pdev->dev, + dev_warn(&pci_dev->dev, "controller not probed because device ID wildcards are disabled\n"); return -ENODEV; } if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) - dev_warn(&pdev->dev, + dev_warn(&pci_dev->dev, "controller device ID matched using wildcards\n"); - node = dev_to_node(&pdev->dev); + node = dev_to_node(&pci_dev->dev); if (node == NUMA_NO_NODE) - set_dev_node(&pdev->dev, 0); + set_dev_node(&pci_dev->dev, 0); ctrl_info = pqi_alloc_ctrl_info(node); if (!ctrl_info) { - dev_err(&pdev->dev, + dev_err(&pci_dev->dev, "failed to allocate controller info block\n"); return -ENOMEM; } - ctrl_info->pci_dev = pdev; + ctrl_info->pci_dev = pci_dev; rc = pqi_pci_init(ctrl_info); if (rc) @@ -6300,23 +6301,23 @@ error: return rc; } -static void pqi_pci_remove(struct pci_dev *pdev) +static void pqi_pci_remove(struct pci_dev *pci_dev) { struct pqi_ctrl_info *ctrl_info; - ctrl_info = pci_get_drvdata(pdev); + ctrl_info = pci_get_drvdata(pci_dev); if (!ctrl_info) return; pqi_remove_ctrl(ctrl_info); } -static void pqi_shutdown(struct pci_dev *pdev) +static void pqi_shutdown(struct pci_dev *pci_dev) { int rc; struct pqi_ctrl_info *ctrl_info; - ctrl_info = pci_get_drvdata(pdev); + ctrl_info = pci_get_drvdata(pci_dev); if (!ctrl_info) goto error; @@ -6329,7 +6330,7 @@ static void pqi_shutdown(struct pci_dev *pdev) return; error: - dev_warn(&pdev->dev, + dev_warn(&pci_dev->dev, "unable to flush controller cache\n"); } -- cgit From 8845fdfa92ab6eb24209f9929d6340c2f5d4a2de Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:53:36 -0500 Subject: scsi: smartpqi: eliminate redundant error messages eliminate redundant error message during initialization if the controller has crashed. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 5 +---- drivers/scsi/smartpqi/smartpqi_sis.c | 5 ++++- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 0083ea4ff81a..fa21dd4db01b 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -5796,11 +5796,8 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) * commands. */ rc = sis_wait_for_ctrl_ready(ctrl_info); - if (rc) { - dev_err(&ctrl_info->pci_dev->dev, - "error initializing SIS interface\n"); + if (rc) return rc; - } /* * Get the controller properties. This allows us to determine diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index 3155bda88550..d4b1cc768921 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -102,8 +102,11 @@ static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info, if (status & SIS_CTRL_KERNEL_UP) break; } - if (time_after(jiffies, timeout)) + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "controller not ready\n"); return -ETIMEDOUT; + } msleep(SIS_CTRL_READY_POLL_INTERVAL_MSECS); } -- cgit From 1be42f46ade32c668f11c0735af03ab2d479d206 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:53:42 -0500 Subject: scsi: smartpqi: correct BMIC identify physical drive correct the BMIC Identify Physical Device structure - missing 2 fields Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 20 ++++++++++++++++---- drivers/scsi/smartpqi/smartpqi_init.c | 16 ++++++++++++++++ 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 1ac09e74d8c2..e74d3ed09da4 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -1126,9 +1126,9 @@ struct bmic_identify_physical_device { u8 multi_lun_device_lun_count; u8 minimum_good_fw_revision[8]; u8 unique_inquiry_bytes[20]; - u8 current_temperature_degreesC; - u8 temperature_threshold_degreesC; - u8 max_temperature_degreesC; + u8 current_temperature_degrees; + u8 temperature_threshold_degrees; + u8 max_temperature_degrees; u8 logical_blocks_per_phys_block_exp; __le16 current_queue_depth_limit; u8 switch_name[10]; @@ -1141,10 +1141,22 @@ struct bmic_identify_physical_device { u8 smart_carrier_authentication; u8 smart_carrier_app_fw_version; u8 smart_carrier_bootloader_fw_version; + u8 sanitize_flags; + u8 encryption_key_flags; u8 encryption_key_name[64]; __le32 misc_drive_flags; __le16 dek_index; - u8 padding[112]; + __le16 hba_drive_encryption_flags; + __le16 max_overwrite_time; + __le16 max_block_erase_time; + __le16 max_crypto_erase_time; + u8 connector_info[5]; + u8 connector_name[8][8]; + u8 page_83_identifier[16]; + u8 maximum_link_rate[256]; + u8 negotiated_physical_link_rate[256]; + u8 box_connector_name[8]; + u8 padding_to_multiple_of_512[9]; }; #pragma pack() diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index fa21dd4db01b..acf47d883851 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -6982,6 +6982,22 @@ static void __attribute__((unused)) verify_structures(void) BUILD_BUG_ON(offsetof(struct bmic_identify_controller, controller_mode) != 292); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + phys_bay_in_box) != 115); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + device_type) != 120); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + redundant_path_present_map) != 1736); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + active_path_number) != 1738); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + alternate_paths_phys_connector) != 1739); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + alternate_paths_phys_box_on_port) != 1755); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + current_queue_depth_limit) != 1796); + BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); + BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % -- cgit From cbe0c7b11dbfda368f27a6935a08ba91522edf1a Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:53:48 -0500 Subject: scsi: smartpqi: minor driver cleanup - remove debug code that is no longer necessary. - Some WARN_ON checks were removed because the driver continues to function when the conditions are met. - remove a MACRO that is no longer used. - remove unnecessary multi-line statements. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 2 +- drivers/scsi/smartpqi/smartpqi_init.c | 38 +++++++++++++---------------------- 2 files changed, 15 insertions(+), 25 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index e74d3ed09da4..7a95868028b6 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -1053,7 +1053,7 @@ enum pqi_ctrl_mode { #define BMIC_WRITE_HOST_WELLNESS 0xa5 #define BMIC_CACHE_FLUSH 0xc2 -#define SA_CACHE_FLUSH 0x01 +#define SA_CACHE_FLUSH 0x1 #define MASKED_DEVICE(lunid) ((lunid)[3] & 0xc0) #define CISS_GET_BUS(lunid) ((lunid)[7] & 0x3f) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index acf47d883851..91daedc9689e 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -57,8 +57,6 @@ MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); -#define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0 - static char *hpe_branded_controller = "HPE Smart Array Controller"; static char *microsemi_branded_controller = "Microsemi Smart Family Controller"; @@ -94,7 +92,7 @@ static unsigned int pqi_supported_event_types[] = { static int pqi_disable_device_id_wildcards; module_param_named(disable_device_id_wildcards, - pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR); + pqi_disable_device_id_wildcards, int, 0644); MODULE_PARM_DESC(disable_device_id_wildcards, "Disable device ID wildcards."); @@ -383,7 +381,6 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, default: dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); - WARN_ON(cmd); break; } @@ -1133,10 +1130,8 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, scsi_sanitize_inquiry_string(&buffer[16], 16); device->devtype = buffer[0] & 0x1f; - memcpy(device->vendor, &buffer[8], - sizeof(device->vendor)); - memcpy(device->model, &buffer[16], - sizeof(device->model)); + memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); + memcpy(device->model, &buffer[16], sizeof(device->model)); if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) { pqi_get_raid_level(ctrl_info, device); @@ -1607,9 +1602,6 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, */ device->new_device = true; break; - default: - WARN_ON(find_result); - break; } } @@ -1742,7 +1734,7 @@ static inline bool pqi_skip_device(u8 *scsi3addr, return false; } -static inline bool pqi_expose_device(struct pqi_scsi_dev *device) +static inline bool pqi_ok_to_expose_device(struct pqi_scsi_dev *device) { /* Expose all devices except for physical devices that are masked. */ if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr)) @@ -1883,7 +1875,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) pqi_assign_bus_target_lun(device); - device->expose_device = pqi_expose_device(device); + device->expose_device = pqi_ok_to_expose_device(device); if (device->is_physical_device) { device->wwid = phys_lun_ext_entry->wwid; @@ -2682,7 +2674,6 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, dev_err(&ctrl_info->pci_dev->dev, "unexpected IU type: 0x%x\n", response->header.iu_type); - WARN_ON(response->header.iu_type); break; } @@ -4642,7 +4633,6 @@ static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, dev_err(&ctrl_info->pci_dev->dev, "unknown data direction: %d\n", scmd->sc_data_direction); - WARN_ON(scmd->sc_data_direction); break; } @@ -4725,7 +4715,6 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, dev_err(&ctrl_info->pci_dev->dev, "unknown data direction: %d\n", scmd->sc_data_direction); - WARN_ON(scmd->sc_data_direction); break; } @@ -5485,8 +5474,8 @@ static ssize_t pqi_host_rescan_store(struct device *dev, return count; } -static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL); -static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store); +static DEVICE_ATTR(version, 0444, pqi_version_show, NULL); +static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); static struct device_attribute *pqi_shost_attrs[] = { &dev_attr_version, @@ -5544,8 +5533,8 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, return 2; } -static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL); -static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO, +static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); +static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); static struct device_attribute *pqi_sdev_attrs[] = { @@ -6108,9 +6097,6 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) goto release_regions; } - ctrl_info->registers = ctrl_info->iomem_base; - ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; - #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 /* Increase the PCIe completion timeout. */ @@ -6125,6 +6111,9 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) /* Enable bus mastering. */ pci_set_master(ctrl_info->pci_dev); + ctrl_info->registers = ctrl_info->iomem_base; + ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; + pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); return 0; @@ -6141,7 +6130,8 @@ static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) { iounmap(ctrl_info->iomem_base); pci_release_regions(ctrl_info->pci_dev); - pci_disable_device(ctrl_info->pci_dev); + if (pci_is_enabled(ctrl_info->pci_dev)) + pci_disable_device(ctrl_info->pci_dev); pci_set_drvdata(ctrl_info->pci_dev, NULL); } -- cgit From 7eddabff8acb0f4c25f992efe126cf6cccdd6e7b Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:53:54 -0500 Subject: scsi: smartpqi: add new PCI device IDs Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 134 +++++++++++++++++++++++++++------- 1 file changed, 109 insertions(+), 25 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 91daedc9689e..6b20a912d986 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -6388,109 +6388,193 @@ static int pqi_resume(struct pci_dev *pci_dev) /* Define the PCI IDs for the controllers that we support. */ static const struct pci_device_id pqi_pci_id_table[] = { + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x152d, 0x8a22) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x152d, 0x8a23) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x152d, 0x8a24) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x152d, 0x8a36) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x152d, 0x8a37) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x0110) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0600) + PCI_VENDOR_ID_ADAPTEC2, 0x0605) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0601) + PCI_VENDOR_ID_ADAPTEC2, 0x0800) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0602) + PCI_VENDOR_ID_ADAPTEC2, 0x0801) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0603) + PCI_VENDOR_ID_ADAPTEC2, 0x0802) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0650) + PCI_VENDOR_ID_ADAPTEC2, 0x0803) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0651) + PCI_VENDOR_ID_ADAPTEC2, 0x0804) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0652) + PCI_VENDOR_ID_ADAPTEC2, 0x0805) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0653) + PCI_VENDOR_ID_ADAPTEC2, 0x0806) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0654) + PCI_VENDOR_ID_ADAPTEC2, 0x0900) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0655) + PCI_VENDOR_ID_ADAPTEC2, 0x0901) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0700) + PCI_VENDOR_ID_ADAPTEC2, 0x0902) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0701) + PCI_VENDOR_ID_ADAPTEC2, 0x0903) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0800) + PCI_VENDOR_ID_ADAPTEC2, 0x0904) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0801) + PCI_VENDOR_ID_ADAPTEC2, 0x0905) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0802) + PCI_VENDOR_ID_ADAPTEC2, 0x0906) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0803) + PCI_VENDOR_ID_ADAPTEC2, 0x0907) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0804) + PCI_VENDOR_ID_ADAPTEC2, 0x0908) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0805) + PCI_VENDOR_ID_ADAPTEC2, 0x1200) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0900) + PCI_VENDOR_ID_ADAPTEC2, 0x1201) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0901) + PCI_VENDOR_ID_ADAPTEC2, 0x1202) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0902) + PCI_VENDOR_ID_ADAPTEC2, 0x1280) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0903) + PCI_VENDOR_ID_ADAPTEC2, 0x1281) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0904) + PCI_VENDOR_ID_ADAPTEC2, 0x1300) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0905) + PCI_VENDOR_ID_ADAPTEC2, 0x1301) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0906) + PCI_VENDOR_ID_ADAPTEC2, 0x1380) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0600) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0601) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0602) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0603) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0604) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0606) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0650) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0651) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0652) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0653) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0654) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0655) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0656) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0657) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0700) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0701) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, -- cgit From d87d5474e2080695ef0cc8c5e6c42a41d6ab961b Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:54:00 -0500 Subject: scsi: smartpqi: cleanup messages - improve some error messages. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 41 +++++++++++++++-------------------- drivers/scsi/smartpqi/smartpqi_sis.c | 3 ++- 2 files changed, 20 insertions(+), 24 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 6b20a912d986..9e782e6b1e26 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -992,7 +992,10 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, return 0; bad_raid_map: - dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg); + dev_warn(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d %s\n", + ctrl_info->scsi_host->host_no, + device->bus, device->target, device->lun, err_msg); return -EINVAL; } @@ -1250,8 +1253,7 @@ static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, status = "Volume undergoing encryption re-keying process"; break; case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: - status = - "Encrypted volume inaccessible - disabled on ctrl"; + status = "Volume encrypted but encryption is disabled"; break; case CISS_LV_PENDING_ENCRYPTION: status = "Volume pending migration to encrypted state"; @@ -2429,7 +2431,7 @@ static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) device->offload_enabled = false; } -static inline void pqi_take_device_offline(struct scsi_device *sdev) +static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) { struct pqi_ctrl_info *ctrl_info; struct pqi_scsi_dev *device; @@ -2439,8 +2441,8 @@ static inline void pqi_take_device_offline(struct scsi_device *sdev) ctrl_info = shost_to_hba(sdev->host); schedule_delayed_work(&ctrl_info->rescan_work, 0); device = sdev->hostdata; - dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n", - ctrl_info->scsi_host->host_no, device->bus, + dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n", + path, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); } } @@ -2487,7 +2489,7 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request) sshdr.sense_key == HARDWARE_ERROR && sshdr.asc == 0x3e && sshdr.ascq == 0x1) { - pqi_take_device_offline(scmd->device); + pqi_take_device_offline(scmd->device, "RAID"); host_byte = DID_NO_CONNECT; } @@ -2547,7 +2549,7 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: case PQI_AIO_STATUS_INVALID_DEVICE: device_offline = true; - pqi_take_device_offline(scmd->device); + pqi_take_device_offline(scmd->device, "AIO"); host_byte = DID_NO_CONNECT; scsi_status = SAM_STAT_CHECK_CONDITION; break; @@ -3202,11 +3204,8 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) alloc_length, &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); - if (!ctrl_info->queue_memory_base) { - dev_err(&ctrl_info->pci_dev->dev, - "unable to allocate memory for PQI admin queues\n"); + if (!ctrl_info->queue_memory_base) return -ENOMEM; - } ctrl_info->queue_memory_length = alloc_length; @@ -3575,7 +3574,6 @@ static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, struct completion *wait) { int rc; - unsigned int wait_secs = 0; while (1) { if (wait_for_completion_io_timeout(wait, @@ -3589,12 +3587,6 @@ static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, rc = -ENXIO; break; } - - wait_secs += PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS; - - dev_err(&ctrl_info->pci_dev->dev, - "waiting %u seconds for completion\n", - wait_secs); } return rc; @@ -5699,7 +5691,7 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) config_table = kmalloc(table_length, GFP_KERNEL); if (!config_table) { dev_err(&ctrl_info->pci_dev->dev, - "unable to allocate memory for PQI configuration table\n"); + "failed to allocate memory for PQI configuration table\n"); return -ENOMEM; } @@ -5850,7 +5842,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) rc = pqi_alloc_admin_queues(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, - "error allocating admin queues\n"); + "failed to allocate admin queues\n"); return rc; } @@ -5889,8 +5881,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) return rc; rc = pqi_alloc_operational_queues(ctrl_info); - if (rc) + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to allocate operational queues\n"); return rc; + } pqi_init_operational_queues(ctrl_info); @@ -6030,7 +6025,7 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) rc = pqi_enable_events(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, - "error configuring events\n"); + "error enabling events\n"); return rc; } diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index d4b1cc768921..07c53e99ef3d 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -104,7 +104,8 @@ static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info, } if (time_after(jiffies, timeout)) { dev_err(&ctrl_info->pci_dev->dev, - "controller not ready\n"); + "controller not ready after %u seconds\n", + timeout_secs); return -ETIMEDOUT; } msleep(SIS_CTRL_READY_POLL_INTERVAL_MSECS); -- cgit From b805dbfe2bce1ddf3209c29f1aa7d6b2064ab6c9 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:54:06 -0500 Subject: scsi: smartpqi: update copyright Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 2 +- drivers/scsi/smartpqi/smartpqi_init.c | 2 +- drivers/scsi/smartpqi/smartpqi_sas_transport.c | 2 +- drivers/scsi/smartpqi/smartpqi_sis.c | 2 +- drivers/scsi/smartpqi/smartpqi_sis.h | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 7a95868028b6..d044a58c2a2b 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -1,6 +1,6 @@ /* * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2016 Microsemi Corporation + * Copyright (c) 2016-2017 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 9e782e6b1e26..4b0928f074ac 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -1,6 +1,6 @@ /* * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2016 Microsemi Corporation + * Copyright (c) 2016-2017 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c index 52ca4f93f1b2..0d89d3728b43 100644 --- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c +++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c @@ -1,6 +1,6 @@ /* * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2016 Microsemi Corporation + * Copyright (c) 2016-2017 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index 07c53e99ef3d..e55dfcf200e5 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -1,6 +1,6 @@ /* * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2016 Microsemi Corporation + * Copyright (c) 2016-2017 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h index 08ee0abb8656..983184b69373 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.h +++ b/drivers/scsi/smartpqi/smartpqi_sis.h @@ -1,6 +1,6 @@ /* * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2016 Microsemi Corporation + * Copyright (c) 2016-2017 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * * This program is free software; you can redistribute it and/or modify -- cgit From bd10cf0be6057f680fab911d89761fd15d76b205 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:54:12 -0500 Subject: scsi: smartpqi: add ptraid support add support for PTRAID devices Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 8 ++++--- drivers/scsi/smartpqi/smartpqi_init.c | 43 +++++++++++++++++++++++++++++------ 2 files changed, 41 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index d044a58c2a2b..be04bcb7db36 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -694,7 +694,8 @@ struct pqi_config_table_heartbeat { #define PQI_PHYSICAL_DEVICE_BUS 0 #define PQI_RAID_VOLUME_BUS 1 #define PQI_HBA_BUS 2 -#define PQI_MAX_BUS PQI_HBA_BUS +#define PQI_EXTERNAL_RAID_VOLUME_BUS 3 +#define PQI_MAX_BUS PQI_EXTERNAL_RAID_VOLUME_BUS struct report_lun_header { __be32 list_length; @@ -781,6 +782,7 @@ struct pqi_scsi_dev { __be64 wwid; u8 volume_id[16]; u8 is_physical_device : 1; + u8 is_external_raid_device : 1; u8 target_lun_valid : 1; u8 expose_device : 1; u8 no_uld_attach : 1; @@ -1056,10 +1058,10 @@ enum pqi_ctrl_mode { #define SA_CACHE_FLUSH 0x1 #define MASKED_DEVICE(lunid) ((lunid)[3] & 0xc0) -#define CISS_GET_BUS(lunid) ((lunid)[7] & 0x3f) +#define CISS_GET_LEVEL_2_BUS(lunid) ((lunid)[7] & 0x3f) #define CISS_GET_LEVEL_2_TARGET(lunid) ((lunid)[6]) #define CISS_GET_DRIVE_NUMBER(lunid) \ - (((CISS_GET_BUS((lunid)) - 1) << 8) + \ + (((CISS_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \ CISS_GET_LEVEL_2_TARGET((lunid))) #define NO_TIMEOUT ((unsigned long) -1) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 4b0928f074ac..11545f322561 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -147,6 +147,11 @@ static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) return !device->is_physical_device; } +static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) +{ + return scsi3addr[2] != 0; +} + static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) { return !ctrl_info->controller_online; @@ -885,6 +890,9 @@ static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) { u8 *scsi3addr; u32 lunid; + int bus; + int target; + int lun; scsi3addr = device->scsi3addr; lunid = get_unaligned_le32(scsi3addr); @@ -897,8 +905,16 @@ static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) } if (pqi_is_logical_device(device)) { - pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0, - lunid & 0x3fff); + if (device->is_external_raid_device) { + bus = PQI_EXTERNAL_RAID_VOLUME_BUS; + target = (lunid >> 16) & 0x3fff; + lun = lunid & 0xff; + } else { + bus = PQI_RAID_VOLUME_BUS; + target = 0; + lun = lunid & 0x3fff; + } + pqi_set_bus_target_lun(device, bus, target, lun); device->target_lun_valid = true; return; } @@ -1137,9 +1153,15 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, memcpy(device->model, &buffer[16], sizeof(device->model)); if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) { - pqi_get_raid_level(ctrl_info, device); - pqi_get_offload_status(ctrl_info, device); - pqi_get_volume_status(ctrl_info, device); + if (device->is_external_raid_device) { + device->raid_level = SA_RAID_UNKNOWN; + device->volume_status = CISS_LV_OK; + device->volume_offline = false; + } else { + pqi_get_raid_level(ctrl_info, device); + pqi_get_offload_status(ctrl_info, device); + pqi_get_volume_status(ctrl_info, device); + } } out: @@ -1355,6 +1377,8 @@ static void pqi_update_all_logical_drive_queue_depths( continue; if (!pqi_is_logical_device(device)) continue; + if (device->is_external_raid_device) + continue; pqi_update_logical_drive_queue_depth(ctrl_info, device); } } @@ -1463,7 +1487,8 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, scsi_device_type(device->devtype), device->vendor, device->model, - pqi_raid_level_to_string(device->raid_level), + pqi_is_logical_device(device) ? + pqi_raid_level_to_string(device->raid_level) : "", device->offload_configured ? '+' : '-', device->offload_enabled_pending ? '+' : '-', device->expose_device ? '+' : '-', @@ -1487,6 +1512,8 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, /* By definition, the scsi3addr and wwid fields are already the same. */ existing_device->is_physical_device = new_device->is_physical_device; + existing_device->is_external_raid_device = + new_device->is_external_raid_device; existing_device->expose_device = new_device->expose_device; existing_device->no_uld_attach = new_device->no_uld_attach; existing_device->aio_enabled = new_device->aio_enabled; @@ -1855,7 +1882,9 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); device->is_physical_device = is_physical_device; - device->raid_level = SA_RAID_UNKNOWN; + if (!is_physical_device) + device->is_external_raid_device = + pqi_is_external_raid_addr(scsi3addr); /* Gather information about the device. */ rc = pqi_get_device_info(ctrl_info, device); -- cgit From 4e8415e3861e8b73a47c92e09e044b9dbc8ee37f Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:54:18 -0500 Subject: scsi: smartpqi: change return value for LUN reset operations change return value for controller offline to be consistent with the rest of the driver. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 11545f322561..fa6102173d2a 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -5025,7 +5025,7 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, pqi_check_ctrl_health(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) { - rc = -ETIMEDOUT; + rc = -ENXIO; break; } } -- cgit From d727a776d72b26033161bc19441266749455115b Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:54:25 -0500 Subject: scsi: smartpqi: enhance kdump constrain resource usage during kdump to avoid kdump failures Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 6 +++-- drivers/scsi/smartpqi/smartpqi_init.c | 45 +++++++++++++++++++++++++---------- 2 files changed, 36 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index be04bcb7db36..400d1fb59197 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -686,8 +686,10 @@ struct pqi_config_table_heartbeat { __le32 heartbeat_counter; }; -#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0) -#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U) +#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0) +#define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32 +#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U) +#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U) #define RAID_MAP_MAX_ENTRIES 1024 diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index fa6102173d2a..cdc3407572a5 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -4353,8 +4353,12 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) ctrl_info->error_buffer_length = ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; - max_transfer_size = - min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE); + if (reset_devices) + max_transfer_size = min(ctrl_info->max_transfer_size, + PQI_MAX_TRANSFER_SIZE_KDUMP); + else + max_transfer_size = min(ctrl_info->max_transfer_size, + PQI_MAX_TRANSFER_SIZE); max_sg_entries = max_transfer_size / PAGE_SIZE; @@ -4374,19 +4378,24 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) { - int num_cpus; - int max_queue_groups; int num_queue_groups; u16 num_elements_per_iq; u16 num_elements_per_oq; - max_queue_groups = min(ctrl_info->max_inbound_queues / 2, - ctrl_info->max_outbound_queues - 1); - max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); + if (reset_devices) { + num_queue_groups = 1; + } else { + int num_cpus; + int max_queue_groups; + + max_queue_groups = min(ctrl_info->max_inbound_queues / 2, + ctrl_info->max_outbound_queues - 1); + max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); - num_cpus = num_online_cpus(); - num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); - num_queue_groups = min(num_queue_groups, max_queue_groups); + num_cpus = num_online_cpus(); + num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); + num_queue_groups = min(num_queue_groups, max_queue_groups); + } ctrl_info->num_queue_groups = num_queue_groups; ctrl_info->max_hw_queue_index = num_queue_groups - 1; @@ -5827,9 +5836,17 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) return rc; } - if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS) - ctrl_info->max_outstanding_requests = - PQI_MAX_OUTSTANDING_REQUESTS; + if (reset_devices) { + if (ctrl_info->max_outstanding_requests > + PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) + ctrl_info->max_outstanding_requests = + PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; + } else { + if (ctrl_info->max_outstanding_requests > + PQI_MAX_OUTSTANDING_REQUESTS) + ctrl_info->max_outstanding_requests = + PQI_MAX_OUTSTANDING_REQUESTS; + } pqi_calculate_io_resources(ctrl_info); @@ -7110,4 +7127,6 @@ static void __attribute__((unused)) verify_structures(void) PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); + BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= + PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); } -- cgit From 94086f5be3f15fc8231e65975e4413c0df3e0203 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:54:31 -0500 Subject: scsi: smartpqi: remove qdepth calculations for logical volumes make the queue depth for LVs the same as the maximum I/Os supported by the controller Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 6 -- drivers/scsi/smartpqi/smartpqi_init.c | 140 ++-------------------------------- 2 files changed, 8 insertions(+), 138 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 400d1fb59197..857d1bea6d06 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -726,7 +726,6 @@ struct report_phys_lun_extended_entry { }; /* for device_flags field of struct report_phys_lun_extended_entry */ -#define REPORT_PHYS_LUN_DEV_FLAG_NON_DISK 0x1 #define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8 struct report_phys_lun_extended { @@ -786,8 +785,6 @@ struct pqi_scsi_dev { u8 is_physical_device : 1; u8 is_external_raid_device : 1; u8 target_lun_valid : 1; - u8 expose_device : 1; - u8 no_uld_attach : 1; u8 aio_enabled : 1; /* only valid for physical disks */ u8 device_gone : 1; u8 new_device : 1; @@ -1034,9 +1031,6 @@ enum pqi_ctrl_mode { */ #define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27 -/* 0 = no limit */ -#define PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH 0 - /* CISS commands */ #define CISS_READ 0xc0 #define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */ diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index cdc3407572a5..23ebe22940fd 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -1302,87 +1302,6 @@ static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, device->bus, device->target, device->lun, status); } -static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle( - struct pqi_ctrl_info *ctrl_info, u32 aio_handle) -{ - struct pqi_scsi_dev *device; - - list_for_each_entry(device, &ctrl_info->scsi_device_list, - scsi_device_list_entry) { - if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC) - continue; - if (pqi_is_logical_device(device)) - continue; - if (device->aio_handle == aio_handle) - return device; - } - - return NULL; -} - -static void pqi_update_logical_drive_queue_depth( - struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive) -{ - unsigned int i; - struct raid_map *raid_map; - struct raid_map_disk_data *disk_data; - struct pqi_scsi_dev *phys_disk; - unsigned int num_phys_disks; - unsigned int num_raid_map_entries; - unsigned int queue_depth; - - logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH; - - raid_map = logical_drive->raid_map; - if (!raid_map) - return; - - disk_data = raid_map->disk_data; - num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) * - (get_unaligned_le16(&raid_map->data_disks_per_row) + - get_unaligned_le16(&raid_map->metadata_disks_per_row)); - num_raid_map_entries = num_phys_disks * - get_unaligned_le16(&raid_map->row_cnt); - - queue_depth = 0; - for (i = 0; i < num_raid_map_entries; i++) { - phys_disk = pqi_find_disk_by_aio_handle(ctrl_info, - disk_data[i].aio_handle); - - if (!phys_disk) { - dev_warn(&ctrl_info->pci_dev->dev, - "failed to find physical disk for logical drive %016llx\n", - get_unaligned_be64(logical_drive->scsi3addr)); - logical_drive->offload_enabled = false; - logical_drive->offload_enabled_pending = false; - kfree(raid_map); - logical_drive->raid_map = NULL; - return; - } - - queue_depth += phys_disk->queue_depth; - } - - logical_drive->queue_depth = queue_depth; -} - -static void pqi_update_all_logical_drive_queue_depths( - struct pqi_ctrl_info *ctrl_info) -{ - struct pqi_scsi_dev *device; - - list_for_each_entry(device, &ctrl_info->scsi_device_list, - scsi_device_list_entry) { - if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC) - continue; - if (!pqi_is_logical_device(device)) - continue; - if (device->is_external_raid_device) - continue; - pqi_update_logical_drive_queue_depth(ctrl_info, device); - } -} - static void pqi_rescan_worker(struct work_struct *work) { struct pqi_ctrl_info *ctrl_info; @@ -1478,7 +1397,7 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, char *action, struct pqi_scsi_dev *device) { dev_info(&ctrl_info->pci_dev->dev, - "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n", + "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c qd=%d\n", action, ctrl_info->scsi_host->host_no, device->bus, @@ -1491,7 +1410,6 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, pqi_raid_level_to_string(device->raid_level) : "", device->offload_configured ? '+' : '-', device->offload_enabled_pending ? '+' : '-', - device->expose_device ? '+' : '-', device->queue_depth); } @@ -1514,8 +1432,6 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, existing_device->is_physical_device = new_device->is_physical_device; existing_device->is_external_raid_device = new_device->is_external_raid_device; - existing_device->expose_device = new_device->expose_device; - existing_device->no_uld_attach = new_device->no_uld_attach; existing_device->aio_enabled = new_device->aio_enabled; memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor)); @@ -1657,8 +1573,6 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, device->keep_device = true; } - pqi_update_all_logical_drive_queue_depths(ctrl_info); - list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) device->offload_enabled = @@ -1697,7 +1611,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, /* Expose any new devices. */ list_for_each_entry_safe(device, next, &add_list, add_list_entry) { - if (device->expose_device && !device->sdev) { + if (!device->sdev) { rc = pqi_add_device(ctrl_info, device); if (rc) { dev_warn(&ctrl_info->pci_dev->dev, @@ -1740,38 +1654,15 @@ static bool pqi_is_supported_device(struct pqi_scsi_dev *device) return is_supported; } -static inline bool pqi_skip_device(u8 *scsi3addr, - struct report_phys_lun_extended_entry *phys_lun_ext_entry) +static inline bool pqi_skip_device(u8 *scsi3addr) { - u8 device_flags; - - if (!MASKED_DEVICE(scsi3addr)) - return false; - - /* The device is masked. */ - - device_flags = phys_lun_ext_entry->device_flags; - - if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) { - /* - * It's a non-disk device. We ignore all devices of this type - * when they're masked. - */ + /* Ignore all masked devices. */ + if (MASKED_DEVICE(scsi3addr)) return true; - } return false; } -static inline bool pqi_ok_to_expose_device(struct pqi_scsi_dev *device) -{ - /* Expose all devices except for physical devices that are masked. */ - if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr)) - return false; - - return true; -} - static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) { int i; @@ -1870,8 +1761,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) scsi3addr = log_lun_ext_entry->lunid; } - if (is_physical_device && - pqi_skip_device(scsi3addr, phys_lun_ext_entry)) + if (is_physical_device && pqi_skip_device(scsi3addr)) continue; if (device) @@ -1906,8 +1796,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) pqi_assign_bus_target_lun(device); - device->expose_device = pqi_ok_to_expose_device(device); - if (device->is_physical_device) { device->wwid = phys_lun_ext_entry->wwid; if ((phys_lun_ext_entry->device_flags & @@ -4840,7 +4728,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, rc == SCSI_MLQUEUE_HOST_BUSY || rc == SAM_STAT_CHECK_CONDITION || rc == SAM_STAT_RESERVATION_CONFLICT) - raid_bypassed = true; + raid_bypassed = true; } if (!raid_bypassed) rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, @@ -5166,7 +5054,7 @@ static int pqi_slave_alloc(struct scsi_device *sdev) sdev_id(sdev), sdev->lun); } - if (device && device->expose_device) { + if (device) { sdev->hostdata = device; device->sdev = sdev; if (device->queue_depth) { @@ -5181,17 +5069,6 @@ static int pqi_slave_alloc(struct scsi_device *sdev) return 0; } -static int pqi_slave_configure(struct scsi_device *sdev) -{ - struct pqi_scsi_dev *device; - - device = sdev->hostdata; - if (!device->expose_device) - sdev->no_uld_attach = true; - - return 0; -} - static int pqi_map_queues(struct Scsi_Host *shost) { struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); @@ -5585,7 +5462,6 @@ static struct scsi_host_template pqi_driver_template = { .eh_device_reset_handler = pqi_eh_device_reset_handler, .ioctl = pqi_ioctl, .slave_alloc = pqi_slave_alloc, - .slave_configure = pqi_slave_configure, .map_queues = pqi_map_queues, .sdev_attrs = pqi_sdev_attrs, .shost_attrs = pqi_shost_attrs, -- cgit From 3c50976f33f30cf00baea9d518bd3e7ddd01ecc4 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:54:37 -0500 Subject: scsi: smartpqi: add lockup action add support for actions to take when controller goes offline. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 121 ++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 23ebe22940fd..15bb8c1602b4 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -60,6 +61,7 @@ MODULE_LICENSE("GPL"); static char *hpe_branded_controller = "HPE Smart Array Controller"; static char *microsemi_branded_controller = "Microsemi Smart Family Controller"; +static void pqi_perform_lockup_action(void); static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); static void pqi_scan_start(struct Scsi_Host *shost); @@ -81,6 +83,32 @@ static struct scsi_transport_template *pqi_sas_transport_template; static atomic_t pqi_controller_count = ATOMIC_INIT(0); +enum pqi_lockup_action { + NONE, + REBOOT, + PANIC +}; + +static enum pqi_lockup_action pqi_lockup_action = NONE; + +static struct { + enum pqi_lockup_action action; + char *name; +} pqi_lockup_actions[] = { + { + .action = NONE, + .name = "none", + }, + { + .action = REBOOT, + .name = "reboot", + }, + { + .action = PANIC, + .name = "panic", + }, +}; + static unsigned int pqi_supported_event_types[] = { PQI_EVENT_TYPE_HOTPLUG, PQI_EVENT_TYPE_HARDWARE, @@ -96,6 +124,13 @@ module_param_named(disable_device_id_wildcards, MODULE_PARM_DESC(disable_device_id_wildcards, "Disable device ID wildcards."); +static char *pqi_lockup_action_param; +module_param_named(lockup_action, + pqi_lockup_action_param, charp, 0644); +MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" + "\t\tSupported: none, reboot, panic\n" + "\t\tDefault: none"); + static char *raid_levels[] = { "RAID-0", "RAID-4", @@ -2729,6 +2764,8 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) ctrl_info->controller_online = false; dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); sis_shutdown_ctrl(ctrl_info); + pci_disable_device(ctrl_info->pci_dev); + pqi_perform_lockup_action(); for (i = 0; i < ctrl_info->num_queue_groups; i++) { queue_group = &ctrl_info->queue_groups[i]; @@ -5381,12 +5418,55 @@ static ssize_t pqi_host_rescan_store(struct device *dev, return count; } +static ssize_t pqi_lockup_action_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + int count = 0; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { + if (pqi_lockup_actions[i].action == pqi_lockup_action) + count += snprintf(buffer + count, PAGE_SIZE - count, + "[%s] ", pqi_lockup_actions[i].name); + else + count += snprintf(buffer + count, PAGE_SIZE - count, + "%s ", pqi_lockup_actions[i].name); + } + + count += snprintf(buffer + count, PAGE_SIZE - count, "\n"); + + return count; +} + +static ssize_t pqi_lockup_action_store(struct device *dev, + struct device_attribute *attr, const char *buffer, size_t count) +{ + unsigned int i; + char *action_name; + char action_name_buffer[32]; + + strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); + action_name = strstrip(action_name_buffer); + + for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { + if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { + pqi_lockup_action = pqi_lockup_actions[i].action; + return count; + } + } + + return -EINVAL; +} + static DEVICE_ATTR(version, 0444, pqi_version_show, NULL); static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); +static DEVICE_ATTR(lockup_action, 0644, + pqi_lockup_action_show, pqi_lockup_action_store); static struct device_attribute *pqi_shost_attrs[] = { &dev_attr_version, &dev_attr_rescan, + &dev_attr_lockup_action, NULL }; @@ -6133,6 +6213,21 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) pqi_free_ctrl_resources(ctrl_info); } +static void pqi_perform_lockup_action(void) +{ + switch (pqi_lockup_action) { + case PANIC: + panic("FATAL: Smart Family Controller lockup detected"); + break; + case REBOOT: + emergency_restart(); + break; + case NONE: + default: + break; + } +} + static void pqi_print_ctrl_info(struct pci_dev *pci_dev, const struct pci_device_id *id) { @@ -6238,6 +6333,30 @@ error: "unable to flush controller cache\n"); } +static void pqi_process_lockup_action_param(void) +{ + unsigned int i; + + if (!pqi_lockup_action_param) + return; + + for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { + if (strcmp(pqi_lockup_action_param, + pqi_lockup_actions[i].name) == 0) { + pqi_lockup_action = pqi_lockup_actions[i].action; + return; + } + } + + pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", + DRIVER_NAME_SHORT, pqi_lockup_action_param); +} + +static void pqi_process_module_params(void) +{ + pqi_process_lockup_action_param(); +} + #if defined(CONFIG_PM) static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) @@ -6545,6 +6664,8 @@ static int __init pqi_init(void) if (!pqi_sas_transport_template) return -ENODEV; + pqi_process_module_params(); + rc = pci_register_driver(&pqi_pci_driver); if (rc) sas_release_transport(pqi_sas_transport_template); -- cgit From 376fb880a4fbf6903918a88081b16c167819af3f Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:54:43 -0500 Subject: scsi: smartpqi: correct aio error path set the internal flag that causes I/O to be sent down the RAID path when the AIO path is disabled Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 8 +- drivers/scsi/smartpqi/smartpqi_init.c | 345 +++++++++++++++++++++++++++------- 2 files changed, 285 insertions(+), 68 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 857d1bea6d06..94b92ae63f23 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -785,11 +785,11 @@ struct pqi_scsi_dev { u8 is_physical_device : 1; u8 is_external_raid_device : 1; u8 target_lun_valid : 1; - u8 aio_enabled : 1; /* only valid for physical disks */ u8 device_gone : 1; u8 new_device : 1; u8 keep_device : 1; u8 volume_offline : 1; + bool aio_enabled; /* only valid for physical disks */ bool in_reset; bool device_offline; u8 vendor[8]; /* bytes 8-15 of inquiry data */ @@ -911,7 +911,9 @@ struct pqi_io_request { void (*io_complete_callback)(struct pqi_io_request *io_request, void *context); void *context; + u8 raid_bypass : 1; int status; + struct pqi_queue_group *queue_group; struct scsi_cmnd *scmd; void *error_info; struct pqi_sg_descriptor *sg_chain_buffer; @@ -1019,6 +1021,10 @@ struct pqi_ctrl_info { atomic_t num_busy_threads; atomic_t num_blocked_threads; wait_queue_head_t block_requests_wait; + + struct list_head raid_bypass_retry_list; + spinlock_t raid_bypass_retry_list_lock; + struct work_struct raid_bypass_retry_work; }; enum pqi_ctrl_mode { diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 15bb8c1602b4..57ff80fba3f5 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -63,6 +63,9 @@ static char *microsemi_branded_controller = "Microsemi Smart Family Controller"; static void pqi_perform_lockup_action(void); static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); +static void pqi_complete_all_queued_raid_bypass_retries( + struct pqi_ctrl_info *ctrl_info, int result); +static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); static void pqi_scan_start(struct Scsi_Host *shost); static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, @@ -74,7 +77,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, unsigned int cdb_length, struct pqi_queue_group *queue_group, - struct pqi_encryption_info *encryption_info); + struct pqi_encryption_info *encryption_info, bool raid_bypass); /* for flags argument to pqi_submit_raid_request_synchronous() */ #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 @@ -227,6 +230,7 @@ static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) { ctrl_info->block_requests = false; wake_up_all(&ctrl_info->block_requests_wait); + pqi_retry_raid_bypass_requests(ctrl_info); scsi_unblock_requests(ctrl_info->scsi_host); } @@ -445,6 +449,14 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, buffer, buffer_length, pci_dir); } +static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) +{ + io_request->scmd = NULL; + io_request->status = 0; + io_request->error_info = NULL; + io_request->raid_bypass = false; +} + static struct pqi_io_request *pqi_alloc_io_request( struct pqi_ctrl_info *ctrl_info) { @@ -462,9 +474,7 @@ static struct pqi_io_request *pqi_alloc_io_request( /* benignly racy */ ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; - io_request->scmd = NULL; - io_request->status = 0; - io_request->error_info = NULL; + pqi_reinit_io_request(io_request); return io_request; } @@ -1678,8 +1688,8 @@ static bool pqi_is_supported_device(struct pqi_scsi_dev *device) /* * Only support the HBA controller itself as a RAID * controller. If it's a RAID controller other than - * the HBA itself (an external RAID controller, MSA500 - * or similar), we don't support it. + * the HBA itself (an external RAID controller, for + * example), we don't support it. */ if (pqi_is_hba_lunid(device->scsi3addr)) is_supported = true; @@ -2308,7 +2318,7 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, } return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, - cdb, cdb_length, queue_group, encryption_info_ptr); + cdb, cdb_length, queue_group, encryption_info_ptr, true); } #define PQI_STATUS_IDLE 0x0 @@ -2381,6 +2391,7 @@ static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) device = io_request->scmd->device->hostdata; device->offload_enabled = false; + device->aio_enabled = false; } static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) @@ -2500,9 +2511,11 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) break; case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: case PQI_AIO_STATUS_INVALID_DEVICE: - device_offline = true; - pqi_take_device_offline(scmd->device, "AIO"); - host_byte = DID_NO_CONNECT; + if (!io_request->raid_bypass) { + device_offline = true; + pqi_take_device_offline(scmd->device, "AIO"); + host_byte = DID_NO_CONNECT; + } scsi_status = SAM_STAT_CHECK_CONDITION; break; case PQI_AIO_STATUS_IO_ERROR: @@ -2751,48 +2764,6 @@ static void pqi_event_worker(struct work_struct *work) pqi_schedule_rescan_worker(ctrl_info); } -static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) -{ - unsigned int i; - unsigned int path; - struct pqi_queue_group *queue_group; - unsigned long flags; - struct pqi_io_request *io_request; - struct pqi_io_request *next; - struct scsi_cmnd *scmd; - - ctrl_info->controller_online = false; - dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); - sis_shutdown_ctrl(ctrl_info); - pci_disable_device(ctrl_info->pci_dev); - pqi_perform_lockup_action(); - - for (i = 0; i < ctrl_info->num_queue_groups; i++) { - queue_group = &ctrl_info->queue_groups[i]; - - for (path = 0; path < 2; path++) { - spin_lock_irqsave( - &queue_group->submit_lock[path], flags); - - list_for_each_entry_safe(io_request, next, - &queue_group->request_list[path], - request_list_entry) { - - scmd = io_request->scmd; - if (scmd) { - set_host_byte(scmd, DID_NO_CONNECT); - pqi_scsi_done(scmd); - } - - list_del(&io_request->request_list_entry); - } - - spin_unlock_irqrestore( - &queue_group->submit_lock[path], flags); - } - } -} - #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) static void pqi_heartbeat_timer_handler(unsigned long data) @@ -3461,9 +3432,11 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, spin_lock_irqsave(&queue_group->submit_lock[path], flags); - if (io_request) + if (io_request) { + io_request->queue_group = queue_group; list_add_tail(&io_request->request_list_entry, &queue_group->request_list[path]); + } iq_pi = queue_group->iq_pi_copy[path]; @@ -3623,6 +3596,11 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, goto out; } + if (pqi_ctrl_offline(ctrl_info)) { + rc = -ENXIO; + goto out; + } + io_request = pqi_alloc_io_request(ctrl_info); put_unaligned_le16(io_request->index, @@ -4509,21 +4487,18 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request, pqi_scsi_done(scmd); } -static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, +static int pqi_raid_submit_scsi_cmd_with_io_request( + struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group) { int rc; size_t cdb_length; - struct pqi_io_request *io_request; struct pqi_raid_path_request *request; - io_request = pqi_alloc_io_request(ctrl_info); io_request->io_complete_callback = pqi_raid_io_complete; io_request->scmd = scmd; - scmd->host_scribble = (unsigned char *)io_request; - request = io_request->iu; memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); @@ -4602,6 +4577,183 @@ static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, return 0; } +static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, + struct pqi_queue_group *queue_group) +{ + struct pqi_io_request *io_request; + + io_request = pqi_alloc_io_request(ctrl_info); + + return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, + device, scmd, queue_group); +} + +static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) +{ + if (!pqi_ctrl_blocked(ctrl_info)) + schedule_work(&ctrl_info->raid_bypass_retry_work); +} + +static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) +{ + struct scsi_cmnd *scmd; + struct pqi_ctrl_info *ctrl_info; + + if (!io_request->raid_bypass) + return false; + + scmd = io_request->scmd; + if ((scmd->result & 0xff) == SAM_STAT_GOOD) + return false; + if (host_byte(scmd->result) == DID_NO_CONNECT) + return false; + + ctrl_info = shost_to_hba(scmd->device->host); + if (pqi_ctrl_offline(ctrl_info)) + return false; + + return true; +} + +static inline void pqi_add_to_raid_bypass_retry_list( + struct pqi_ctrl_info *ctrl_info, + struct pqi_io_request *io_request, bool at_head) +{ + unsigned long flags; + + spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); + if (at_head) + list_add(&io_request->request_list_entry, + &ctrl_info->raid_bypass_retry_list); + else + list_add_tail(&io_request->request_list_entry, + &ctrl_info->raid_bypass_retry_list); + spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); +} + +static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, + void *context) +{ + struct scsi_cmnd *scmd; + + scmd = io_request->scmd; + pqi_free_io_request(io_request); + pqi_scsi_done(scmd); +} + +static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) +{ + struct scsi_cmnd *scmd; + struct pqi_ctrl_info *ctrl_info; + + io_request->io_complete_callback = pqi_queued_raid_bypass_complete; + scmd = io_request->scmd; + scmd->result = 0; + ctrl_info = shost_to_hba(scmd->device->host); + + pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); + pqi_schedule_bypass_retry(ctrl_info); +} + +static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) +{ + struct scsi_cmnd *scmd; + struct pqi_scsi_dev *device; + struct pqi_ctrl_info *ctrl_info; + struct pqi_queue_group *queue_group; + + scmd = io_request->scmd; + device = scmd->device->hostdata; + if (pqi_device_in_reset(device)) { + pqi_free_io_request(io_request); + set_host_byte(scmd, DID_RESET); + pqi_scsi_done(scmd); + return 0; + } + + ctrl_info = shost_to_hba(scmd->device->host); + queue_group = io_request->queue_group; + + pqi_reinit_io_request(io_request); + + return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, + device, scmd, queue_group); +} + +static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( + struct pqi_ctrl_info *ctrl_info) +{ + unsigned long flags; + struct pqi_io_request *io_request; + + spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); + io_request = list_first_entry_or_null( + &ctrl_info->raid_bypass_retry_list, + struct pqi_io_request, request_list_entry); + if (io_request) + list_del(&io_request->request_list_entry); + spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); + + return io_request; +} + +static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + struct pqi_io_request *io_request; + + pqi_ctrl_busy(ctrl_info); + + while (1) { + if (pqi_ctrl_blocked(ctrl_info)) + break; + io_request = pqi_next_queued_raid_bypass_request(ctrl_info); + if (!io_request) + break; + rc = pqi_retry_raid_bypass(io_request); + if (rc) { + pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, + true); + pqi_schedule_bypass_retry(ctrl_info); + break; + } + } + + pqi_ctrl_unbusy(ctrl_info); +} + +static void pqi_raid_bypass_retry_worker(struct work_struct *work) +{ + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = container_of(work, struct pqi_ctrl_info, + raid_bypass_retry_work); + pqi_retry_raid_bypass_requests(ctrl_info); +} + +static void pqi_complete_all_queued_raid_bypass_retries( + struct pqi_ctrl_info *ctrl_info, int result) +{ + unsigned long flags; + struct pqi_io_request *io_request; + struct pqi_io_request *next; + struct scsi_cmnd *scmd; + + spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); + + list_for_each_entry_safe(io_request, next, + &ctrl_info->raid_bypass_retry_list, request_list_entry) { + list_del(&io_request->request_list_entry); + scmd = io_request->scmd; + pqi_free_io_request(io_request); + scmd->result = result; + pqi_scsi_done(scmd); + } + + spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); +} + static void pqi_aio_io_complete(struct pqi_io_request *io_request, void *context) { @@ -4611,6 +4763,10 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request, scsi_dma_unmap(scmd); if (io_request->status == -EAGAIN) set_host_byte(scmd, DID_IMM_RETRY); + else if (pqi_raid_bypass_retry_needed(io_request)) { + pqi_queue_raid_bypass_retry(io_request); + return; + } pqi_free_io_request(io_request); pqi_scsi_done(scmd); } @@ -4620,13 +4776,13 @@ static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) { return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, - scmd->cmnd, scmd->cmd_len, queue_group, NULL); + scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); } static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, unsigned int cdb_length, struct pqi_queue_group *queue_group, - struct pqi_encryption_info *encryption_info) + struct pqi_encryption_info *encryption_info, bool raid_bypass) { int rc; struct pqi_io_request *io_request; @@ -4635,8 +4791,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, io_request = pqi_alloc_io_request(ctrl_info); io_request->io_complete_callback = pqi_aio_io_complete; io_request->scmd = scmd; - - scmd->host_scribble = (unsigned char *)io_request; + io_request->raid_bypass = raid_bypass; request = io_request->iu; memset(request, 0, @@ -4761,11 +4916,8 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, !blk_rq_is_passthrough(scmd->request)) { rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); - if (rc == 0 || - rc == SCSI_MLQUEUE_HOST_BUSY || - rc == SAM_STAT_CHECK_CONDITION || - rc == SAM_STAT_RESERVATION_CONFLICT) - raid_bypassed = true; + if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) + raid_bypassed = true; } if (!raid_bypassed) rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, @@ -6159,6 +6311,11 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); init_waitqueue_head(&ctrl_info->block_requests_wait); + INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); + spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); + INIT_WORK(&ctrl_info->raid_bypass_retry_work, + pqi_raid_bypass_retry_worker); + ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; ctrl_info->irq_mode = IRQ_MODE_NONE; ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; @@ -6228,6 +6385,60 @@ static void pqi_perform_lockup_action(void) } } +static void pqi_complete_all_queued_requests(struct pqi_ctrl_info *ctrl_info, + int result) +{ + unsigned int i; + unsigned int path; + struct pqi_queue_group *queue_group; + unsigned long flags; + struct pqi_io_request *io_request; + struct pqi_io_request *next; + struct scsi_cmnd *scmd; + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + queue_group = &ctrl_info->queue_groups[i]; + + for (path = 0; path < 2; path++) { + spin_lock_irqsave( + &queue_group->submit_lock[path], flags); + + list_for_each_entry_safe(io_request, next, + &queue_group->request_list[path], + request_list_entry) { + + scmd = io_request->scmd; + if (scmd) { + scmd->result = result; + pqi_scsi_done(scmd); + } + + list_del(&io_request->request_list_entry); + } + + spin_unlock_irqrestore( + &queue_group->submit_lock[path], flags); + } + } +} + +static void pqi_fail_all_queued_requests(struct pqi_ctrl_info *ctrl_info) +{ + pqi_complete_all_queued_requests(ctrl_info, DID_NO_CONNECT << 16); + pqi_complete_all_queued_raid_bypass_retries(ctrl_info, + DID_NO_CONNECT << 16); +} + +static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->controller_online = false; + sis_shutdown_ctrl(ctrl_info); + pci_disable_device(ctrl_info->pci_dev); + dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); + pqi_perform_lockup_action(); + pqi_fail_all_queued_requests(ctrl_info); +} + static void pqi_print_ctrl_info(struct pci_dev *pci_dev, const struct pci_device_id *id) { -- cgit From 03b288cf3d92202b950245e931576bb573930c70 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:54:49 -0500 Subject: scsi: smartpqi: update device offline - Improve handling of offline devices. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 57ff80fba3f5..f36aaceacae4 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -285,6 +285,11 @@ static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) usleep_range(1000, 2000); } +static inline bool pqi_device_offline(struct pqi_scsi_dev *device) +{ + return device->device_offline; +} + static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) { device->in_reset = true; @@ -2399,15 +2404,17 @@ static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) struct pqi_ctrl_info *ctrl_info; struct pqi_scsi_dev *device; - if (scsi_device_online(sdev)) { - scsi_device_set_state(sdev, SDEV_OFFLINE); - ctrl_info = shost_to_hba(sdev->host); - schedule_delayed_work(&ctrl_info->rescan_work, 0); - device = sdev->hostdata; - dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n", - path, ctrl_info->scsi_host->host_no, device->bus, - device->target, device->lun); - } + device = sdev->hostdata; + if (device->device_offline) + return; + + device->device_offline = true; + scsi_device_set_state(sdev, SDEV_OFFLINE); + ctrl_info = shost_to_hba(sdev->host); + pqi_schedule_rescan_worker(ctrl_info); + dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n", + path, ctrl_info->scsi_host->host_no, device->bus, + device->target, device->lun); } static void pqi_process_raid_io_error(struct pqi_io_request *io_request) @@ -4598,6 +4605,7 @@ static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) { struct scsi_cmnd *scmd; + struct pqi_scsi_dev *device; struct pqi_ctrl_info *ctrl_info; if (!io_request->raid_bypass) @@ -4609,6 +4617,10 @@ static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) if (host_byte(scmd->result) == DID_NO_CONNECT) return false; + device = scmd->device->hostdata; + if (pqi_device_offline(device)) + return false; + ctrl_info = shost_to_hba(scmd->device->host); if (pqi_ctrl_offline(ctrl_info)) return false; -- cgit From 5f310425c8eabeeb303809898682e5b79c8a9c7e Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:54:55 -0500 Subject: scsi: smartpqi: update rescan worker improve support for taking controller offline. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 2 +- drivers/scsi/smartpqi/smartpqi_init.c | 141 ++++++++++++++++++---------------- 2 files changed, 77 insertions(+), 66 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 94b92ae63f23..2ed15cfd01d9 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -994,7 +994,6 @@ struct pqi_ctrl_info { u8 inbound_spanning_supported : 1; u8 outbound_spanning_supported : 1; u8 pqi_mode_enabled : 1; - u8 update_time_worker_scheduled : 1; struct list_head scsi_device_list; spinlock_t scsi_device_list_lock; @@ -1016,6 +1015,7 @@ struct pqi_ctrl_info { u32 previous_heartbeat_count; __le32 __iomem *heartbeat_counter; struct timer_list heartbeat_timer; + struct work_struct ctrl_offline_work; struct semaphore sync_request_sem; atomic_t num_busy_threads; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index f36aaceacae4..338ba03762b1 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -61,10 +61,8 @@ MODULE_LICENSE("GPL"); static char *hpe_branded_controller = "HPE Smart Array Controller"; static char *microsemi_branded_controller = "Microsemi Smart Family Controller"; -static void pqi_perform_lockup_action(void); static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); -static void pqi_complete_all_queued_raid_bypass_retries( - struct pqi_ctrl_info *ctrl_info, int result); +static void pqi_ctrl_offline_worker(struct work_struct *work); static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); static void pqi_scan_start(struct Scsi_Host *shost); @@ -219,7 +217,6 @@ static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, sis_write_driver_scratch(ctrl_info, mode); } -#define PQI_RESCAN_WORK_INTERVAL (10 * HZ) static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) { ctrl_info->block_requests = true; @@ -305,10 +302,26 @@ static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) return device->in_reset; } +static inline void pqi_schedule_rescan_worker_with_delay( + struct pqi_ctrl_info *ctrl_info, unsigned long delay) +{ + if (pqi_ctrl_offline(ctrl_info)) + return; + + schedule_delayed_work(&ctrl_info->rescan_work, delay); +} + static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) { - schedule_delayed_work(&ctrl_info->rescan_work, - PQI_RESCAN_WORK_INTERVAL); + pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); +} + +#define PQI_RESCAN_WORK_DELAY (10 * HZ) + +static inline void pqi_schedule_rescan_worker_delayed( + struct pqi_ctrl_info *ctrl_info) +{ + pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); } static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) @@ -741,6 +754,9 @@ static void pqi_update_time_worker(struct work_struct *work) ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, update_time_work); + if (pqi_ctrl_offline(ctrl_info)) + return; + rc = pqi_write_current_time_to_host_wellness(ctrl_info); if (rc) dev_warn(&ctrl_info->pci_dev->dev, @@ -753,21 +769,13 @@ static void pqi_update_time_worker(struct work_struct *work) static inline void pqi_schedule_update_time_worker( struct pqi_ctrl_info *ctrl_info) { - if (ctrl_info->update_time_worker_scheduled) - return; - schedule_delayed_work(&ctrl_info->update_time_work, 0); - ctrl_info->update_time_worker_scheduled = true; } static inline void pqi_cancel_update_time_worker( struct pqi_ctrl_info *ctrl_info) { - if (!ctrl_info->update_time_worker_scheduled) - return; - cancel_delayed_work_sync(&ctrl_info->update_time_work); - ctrl_info->update_time_worker_scheduled = false; } static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, @@ -1933,7 +1941,7 @@ static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) rc = pqi_update_scsi_devices(ctrl_info); if (rc) - pqi_schedule_rescan_worker(ctrl_info); + pqi_schedule_rescan_worker_delayed(ctrl_info); mutex_unlock(&ctrl_info->scan_mutex); @@ -2756,6 +2764,10 @@ static void pqi_event_worker(struct work_struct *work) pqi_ctrl_busy(ctrl_info); pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); + if (pqi_ctrl_offline(ctrl_info)) + goto out; + + pqi_schedule_rescan_worker_delayed(ctrl_info); event = ctrl_info->events; for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { @@ -2766,9 +2778,8 @@ static void pqi_event_worker(struct work_struct *work) event++; } +out: pqi_ctrl_unbusy(ctrl_info); - - pqi_schedule_rescan_worker(ctrl_info); } #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) @@ -4744,25 +4755,13 @@ static void pqi_raid_bypass_retry_worker(struct work_struct *work) pqi_retry_raid_bypass_requests(ctrl_info); } -static void pqi_complete_all_queued_raid_bypass_retries( - struct pqi_ctrl_info *ctrl_info, int result) +static void pqi_clear_all_queued_raid_bypass_retries( + struct pqi_ctrl_info *ctrl_info) { unsigned long flags; - struct pqi_io_request *io_request; - struct pqi_io_request *next; - struct scsi_cmnd *scmd; spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); - - list_for_each_entry_safe(io_request, next, - &ctrl_info->raid_bypass_retry_list, request_list_entry) { - list_del(&io_request->request_list_entry); - scmd = io_request->scmd; - pqi_free_io_request(io_request); - scmd->result = result; - pqi_scsi_done(scmd); - } - + INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); } @@ -6318,6 +6317,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); init_timer(&ctrl_info->heartbeat_timer); + INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); sema_init(&ctrl_info->sync_request_sem, PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); @@ -6397,58 +6397,69 @@ static void pqi_perform_lockup_action(void) } } -static void pqi_complete_all_queued_requests(struct pqi_ctrl_info *ctrl_info, - int result) +static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { + .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, + .status = SAM_STAT_CHECK_CONDITION, +}; + +static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) { unsigned int i; - unsigned int path; - struct pqi_queue_group *queue_group; - unsigned long flags; struct pqi_io_request *io_request; - struct pqi_io_request *next; struct scsi_cmnd *scmd; - for (i = 0; i < ctrl_info->num_queue_groups; i++) { - queue_group = &ctrl_info->queue_groups[i]; - - for (path = 0; path < 2; path++) { - spin_lock_irqsave( - &queue_group->submit_lock[path], flags); - - list_for_each_entry_safe(io_request, next, - &queue_group->request_list[path], - request_list_entry) { - - scmd = io_request->scmd; - if (scmd) { - scmd->result = result; - pqi_scsi_done(scmd); - } - - list_del(&io_request->request_list_entry); - } + for (i = 0; i < ctrl_info->max_io_slots; i++) { + io_request = &ctrl_info->io_request_pool[i]; + if (atomic_read(&io_request->refcount) == 0) + continue; - spin_unlock_irqrestore( - &queue_group->submit_lock[path], flags); + scmd = io_request->scmd; + if (scmd) { + set_host_byte(scmd, DID_NO_CONNECT); + } else { + io_request->status = -ENXIO; + io_request->error_info = + &pqi_ctrl_offline_raid_error_info; } + + io_request->io_complete_callback(io_request, + io_request->context); } } -static void pqi_fail_all_queued_requests(struct pqi_ctrl_info *ctrl_info) +static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) { - pqi_complete_all_queued_requests(ctrl_info, DID_NO_CONNECT << 16); - pqi_complete_all_queued_raid_bypass_retries(ctrl_info, - DID_NO_CONNECT << 16); + pqi_perform_lockup_action(); + pqi_stop_heartbeat_timer(ctrl_info); + pqi_free_interrupts(ctrl_info); + pqi_cancel_rescan_worker(ctrl_info); + pqi_cancel_update_time_worker(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); + pqi_fail_all_outstanding_requests(ctrl_info); + pqi_clear_all_queued_raid_bypass_retries(ctrl_info); + pqi_ctrl_unblock_requests(ctrl_info); +} + +static void pqi_ctrl_offline_worker(struct work_struct *work) +{ + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); + pqi_take_ctrl_offline_deferred(ctrl_info); } static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) { + if (!ctrl_info->controller_online) + return; + ctrl_info->controller_online = false; + ctrl_info->pqi_mode_enabled = false; + pqi_ctrl_block_requests(ctrl_info); sis_shutdown_ctrl(ctrl_info); pci_disable_device(ctrl_info->pci_dev); dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); - pqi_perform_lockup_action(); - pqi_fail_all_queued_requests(ctrl_info); + schedule_work(&ctrl_info->ctrl_offline_work); } static void pqi_print_ctrl_info(struct pci_dev *pci_dev, -- cgit From 37b36847a94669a898c9e3449d19d522d9c13979 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:55:01 -0500 Subject: scsi: smartpqi: cleanup controller branding - Improve controller branding support. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 338ba03762b1..8f71e171095a 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -58,9 +58,6 @@ MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); -static char *hpe_branded_controller = "HPE Smart Array Controller"; -static char *microsemi_branded_controller = "Microsemi Smart Family Controller"; - static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); static void pqi_ctrl_offline_worker(struct work_struct *work); static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); @@ -6467,19 +6464,10 @@ static void pqi_print_ctrl_info(struct pci_dev *pci_dev, { char *ctrl_description; - if (id->driver_data) { + if (id->driver_data) ctrl_description = (char *)id->driver_data; - } else { - switch (id->subvendor) { - case PCI_VENDOR_ID_HP: - ctrl_description = hpe_branded_controller; - break; - case PCI_VENDOR_ID_ADAPTEC2: - default: - ctrl_description = microsemi_branded_controller; - break; - } - } + else + ctrl_description = "Microsemi Smart Family Controller"; dev_info(&pci_dev->dev, "%s found\n", ctrl_description); } -- cgit From f5b63206255f68116c117565ab703c531c5ce400 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:55:07 -0500 Subject: scsi: smartpqi: map more raid errors to SCSI errors enhance mapping of RAID path errors to Linux SCSI host error codes. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 8f71e171095a..affbc4ffb9a5 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -2441,13 +2441,43 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request) scsi_status = error_info->status; host_byte = DID_OK; - if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) { + switch (error_info->data_out_result) { + case PQI_DATA_IN_OUT_GOOD: + break; + case PQI_DATA_IN_OUT_UNDERFLOW: xfer_count = get_unaligned_le32(&error_info->data_out_transferred); residual_count = scsi_bufflen(scmd) - xfer_count; scsi_set_resid(scmd, residual_count); if (xfer_count < scmd->underflow) host_byte = DID_SOFT_ERROR; + break; + case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: + case PQI_DATA_IN_OUT_ABORTED: + host_byte = DID_ABORT; + break; + case PQI_DATA_IN_OUT_TIMEOUT: + host_byte = DID_TIME_OUT; + break; + case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: + case PQI_DATA_IN_OUT_PROTOCOL_ERROR: + case PQI_DATA_IN_OUT_BUFFER_ERROR: + case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: + case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: + case PQI_DATA_IN_OUT_ERROR: + case PQI_DATA_IN_OUT_HARDWARE_ERROR: + case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: + case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: + case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: + case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: + case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: + case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: + case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: + case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: + case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: + default: + host_byte = DID_ERROR; + break; } sense_data_length = get_unaligned_le16(&error_info->sense_data_length); -- cgit From 13bede676b98d595a43d36a34e1835b686d0d140 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:55:13 -0500 Subject: scsi: smartpqi: update timeout on admin commands Increase the timeout on admin commands from 3 seconds to 60 seconds and added a check for controller crash in the loop where the driver polls for admin command completion. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index affbc4ffb9a5..e13dee379558 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -3425,6 +3425,8 @@ static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, writel(iq_pi, admin_queues->iq_pi); } +#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 + static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, struct pqi_general_admin_response *response) { @@ -3436,7 +3438,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, admin_queues = &ctrl_info->admin_queues; oq_ci = admin_queues->oq_ci_copy; - timeout = (3 * HZ) + jiffies; + timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; while (1) { oq_pi = *admin_queues->oq_pi; @@ -3447,6 +3449,8 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, "timed out waiting for admin response\n"); return -ETIMEDOUT; } + if (!sis_is_firmware_running(ctrl_info)) + return -ENXIO; usleep_range(1000, 2000); } -- cgit From 6de783f666291763bcc6c3975e146b9b698378b1 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:55:19 -0500 Subject: scsi: smartpqi: enhance device add and remove messages Improved formatting of information displayed when devices are added/removed from the system. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 90 +++++++++++++++++++++++++++-------- 1 file changed, 69 insertions(+), 21 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index e13dee379558..4e016dfff972 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -1448,24 +1448,66 @@ static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, return DEVICE_NOT_FOUND; } +#define PQI_DEV_INFO_BUFFER_LENGTH 128 + static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, char *action, struct pqi_scsi_dev *device) { - dev_info(&ctrl_info->pci_dev->dev, - "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c qd=%d\n", - action, - ctrl_info->scsi_host->host_no, - device->bus, - device->target, - device->lun, + ssize_t count; + char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; + + count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, + "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); + + if (device->target_lun_valid) + count += snprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + "%d:%d", + device->target, + device->lun); + else + count += snprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + "-:-"); + + if (pqi_is_logical_device(device)) + count += snprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " %08x%08x", + *((u32 *)&device->scsi3addr), + *((u32 *)&device->scsi3addr[4])); + else + count += snprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " %016llx", device->sas_address); + + count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, + " %s %.8s %.16s ", scsi_device_type(device->devtype), device->vendor, - device->model, - pqi_is_logical_device(device) ? - pqi_raid_level_to_string(device->raid_level) : "", - device->offload_configured ? '+' : '-', - device->offload_enabled_pending ? '+' : '-', - device->queue_depth); + device->model); + + if (pqi_is_logical_device(device)) { + if (device->devtype == TYPE_DISK) + count += snprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + "SSDSmartPathCap%c En%c %-12s", + device->offload_configured ? '+' : '-', + (device->offload_enabled || + device->offload_enabled_pending) ? '+' : '-', + pqi_raid_level_to_string(device->raid_level)); + } else { + count += snprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + "AIO%c", device->aio_enabled ? '+' : '-'); + if (device->devtype == TYPE_DISK || + device->devtype == TYPE_ZBC) + count += snprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " qd=%-6d", device->queue_depth); + } + + dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); } /* Assumes the SCSI device list lock is held. */ @@ -1638,14 +1680,14 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, /* Remove all devices that have gone away. */ list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { - if (device->sdev) - pqi_remove_device(ctrl_info, device); if (device->volume_offline) { pqi_dev_info(ctrl_info, "offline", device); pqi_show_volume_status(ctrl_info, device); } else { pqi_dev_info(ctrl_info, "removed", device); } + if (device->sdev) + pqi_remove_device(ctrl_info, device); list_del(&device->delete_list_entry); pqi_free_device(device); } @@ -1667,6 +1709,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, /* Expose any new devices. */ list_for_each_entry_safe(device, next, &add_list, add_list_entry) { if (!device->sdev) { + pqi_dev_info(ctrl_info, "added", device); rc = pqi_add_device(ctrl_info, device); if (rc) { dev_warn(&ctrl_info->pci_dev->dev, @@ -1675,10 +1718,8 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, device->bus, device->target, device->lun); pqi_fixup_botched_add(ctrl_info, device); - continue; } } - pqi_dev_info(ctrl_info, "added", device); } } @@ -1738,7 +1779,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) bool is_physical_device; u8 *scsi3addr; static char *out_of_memory_msg = - "out of memory, device discovery stopped"; + "failed to allocate memory, device discovery stopped"; INIT_LIST_HEAD(&new_device_list_head); @@ -1839,9 +1880,16 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) goto out; } if (rc) { - dev_warn(&ctrl_info->pci_dev->dev, - "obtaining device info failed, skipping device %016llx\n", - get_unaligned_be64(device->scsi3addr)); + if (device->is_physical_device) + dev_warn(&ctrl_info->pci_dev->dev, + "obtaining device info failed, skipping physical device %016llx\n", + get_unaligned_be64( + &phys_lun_ext_entry->wwid)); + else + dev_warn(&ctrl_info->pci_dev->dev, + "obtaining device info failed, skipping logical device %08x%08x\n", + *((u32 *)&device->scsi3addr), + *((u32 *)&device->scsi3addr[4])); rc = 0; continue; } -- cgit From 588a63fea1c28009fe17f194941fb8d8b101b44e Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:55:25 -0500 Subject: scsi: smartpqi: make ioaccel references consistent - make all references to RAID bypass consistent throughout driver. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 13 ++++---- drivers/scsi/smartpqi/smartpqi_init.c | 56 +++++++++++++++-------------------- 2 files changed, 30 insertions(+), 39 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 2ed15cfd01d9..e9f113a0f453 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -805,12 +805,11 @@ struct pqi_scsi_dev { u8 bay; u8 box[8]; u16 phys_connector[8]; - int offload_configured; /* I/O accel RAID offload configured */ - int offload_enabled; /* I/O accel RAID offload enabled */ - int offload_enabled_pending; - int offload_to_mirror; /* Send next I/O accelerator RAID */ - /* offload request to mirror drive. */ - struct raid_map *raid_map; /* I/O accelerator RAID map */ + bool raid_bypass_configured; /* RAID bypass configured */ + bool raid_bypass_enabled; /* RAID bypass enabled */ + int offload_to_mirror; /* Send next RAID bypass request */ + /* to mirror drive. */ + struct raid_map *raid_map; /* RAID bypass map */ struct pqi_sas_port *sas_port; struct scsi_device *sdev; @@ -827,7 +826,7 @@ struct pqi_scsi_dev { #define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */ #define SCSI_VPD_DEVICE_ID 0x83 /* standard page */ #define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */ -#define CISS_VPD_LV_OFFLOAD_STATUS 0xc2 /* vendor-specific page */ +#define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */ #define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */ #define VPD_PAGE (1 << 8) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 4e016dfff972..5f1c607905bf 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -1112,35 +1112,33 @@ error: return rc; } -static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info, +static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { int rc; u8 *buffer; - u8 offload_status; + u8 bypass_status; buffer = kmalloc(64, GFP_KERNEL); if (!buffer) return; rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, - VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64); + VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); if (rc) goto out; -#define OFFLOAD_STATUS_BYTE 4 -#define OFFLOAD_CONFIGURED_BIT 0x1 -#define OFFLOAD_ENABLED_BIT 0x2 +#define RAID_BYPASS_STATUS 4 +#define RAID_BYPASS_CONFIGURED 0x1 +#define RAID_BYPASS_ENABLED 0x2 - offload_status = buffer[OFFLOAD_STATUS_BYTE]; - device->offload_configured = - !!(offload_status & OFFLOAD_CONFIGURED_BIT); - if (device->offload_configured) { - device->offload_enabled_pending = - !!(offload_status & OFFLOAD_ENABLED_BIT); - if (pqi_get_raid_map(ctrl_info, device)) - device->offload_enabled_pending = false; - } + bypass_status = buffer[RAID_BYPASS_STATUS]; + device->raid_bypass_configured = + (bypass_status & RAID_BYPASS_CONFIGURED) != 0; + if (device->raid_bypass_configured && + (bypass_status & RAID_BYPASS_ENABLED) && + pqi_get_raid_map(ctrl_info, device) == 0) + device->raid_bypass_enabled = true; out: kfree(buffer); @@ -1214,7 +1212,7 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, device->volume_offline = false; } else { pqi_get_raid_level(ctrl_info, device); - pqi_get_offload_status(ctrl_info, device); + pqi_get_raid_bypass_status(ctrl_info, device); pqi_get_volume_status(ctrl_info, device); } } @@ -1492,9 +1490,8 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, "SSDSmartPathCap%c En%c %-12s", - device->offload_configured ? '+' : '-', - (device->offload_enabled || - device->offload_enabled_pending) ? '+' : '-', + device->raid_bypass_configured ? '+' : '-', + device->raid_bypass_enabled ? '+' : '-', pqi_raid_level_to_string(device->raid_level)); } else { count += snprintf(buffer + count, @@ -1546,13 +1543,13 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, sizeof(existing_device->box)); memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector)); - existing_device->offload_configured = new_device->offload_configured; - existing_device->offload_enabled = false; - existing_device->offload_enabled_pending = - new_device->offload_enabled_pending; existing_device->offload_to_mirror = 0; kfree(existing_device->raid_map); existing_device->raid_map = new_device->raid_map; + existing_device->raid_bypass_configured = + new_device->raid_bypass_configured; + existing_device->raid_bypass_enabled = + new_device->raid_bypass_enabled; /* To prevent this from being freed later. */ new_device->raid_map = NULL; @@ -1670,11 +1667,6 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, device->keep_device = true; } - list_for_each_entry(device, &ctrl_info->scsi_device_list, - scsi_device_list_entry) - device->offload_enabled = - device->offload_enabled_pending; - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); /* Remove all devices that have gone away. */ @@ -2044,7 +2036,7 @@ static inline void pqi_set_encryption_info( } /* - * Attempt to perform offload RAID mapping for a logical volume I/O. + * Attempt to perform RAID bypass mapping for a logical volume I/O. */ #define PQI_RAID_BYPASS_INELIGIBLE 1 @@ -2448,7 +2440,7 @@ static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) struct pqi_scsi_dev *device; device = io_request->scmd->device->hostdata; - device->offload_enabled = false; + device->raid_bypass_enabled = false; device->aio_enabled = false; } @@ -5002,7 +4994,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, if (pqi_is_logical_device(device)) { raid_bypassed = false; - if (device->offload_enabled && + if (device->raid_bypass_enabled && !blk_rq_is_passthrough(scmd->request)) { rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); @@ -5753,7 +5745,7 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); device = sdev->hostdata; - buffer[0] = device->offload_enabled ? '1' : '0'; + buffer[0] = device->raid_bypass_enabled ? '1' : '0'; buffer[1] = '\n'; buffer[2] = '\0'; -- cgit From a9f93392415eb0fc86c29f015822b36016278c72 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:55:31 -0500 Subject: scsi: smartpqi: add raid level show Display the RAID level via sysfs Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 5f1c607905bf..56c416f39e3a 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -144,7 +144,7 @@ static char *pqi_raid_level_to_string(u8 raid_level) if (raid_level < ARRAY_SIZE(raid_levels)) return raid_levels[raid_level]; - return ""; + return "RAID UNKNOWN"; } #define SA_RAID_0 0 @@ -5754,13 +5754,41 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, return 2; } +static ssize_t pqi_raid_level_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + char *raid_level; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + + if (pqi_is_logical_device(device)) + raid_level = pqi_raid_level_to_string(device->raid_level); + else + raid_level = "N/A"; + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); +} + static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); +static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); static struct device_attribute *pqi_sdev_attrs[] = { &dev_attr_sas_address, &dev_attr_ssd_smart_path_enabled, + &dev_attr_raid_level, NULL }; -- cgit From 8a994a04fc3a8edbcc0ba1d17219b6d8f4c38009 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:55:37 -0500 Subject: scsi: smartpqi: cleanup list initialization Better initialization of linked list heads. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 56c416f39e3a..21fbcf368af1 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -1591,11 +1591,8 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device; struct pqi_scsi_dev *next; struct pqi_scsi_dev *matching_device; - struct list_head add_list; - struct list_head delete_list; - - INIT_LIST_HEAD(&add_list); - INIT_LIST_HEAD(&delete_list); + LIST_HEAD(add_list); + LIST_HEAD(delete_list); /* * The idea here is to do as little work as possible while holding the @@ -1755,7 +1752,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) { int i; int rc; - struct list_head new_device_list_head; + LIST_HEAD(new_device_list_head); struct report_phys_lun_extended *physdev_list = NULL; struct report_log_lun_extended *logdev_list = NULL; struct report_phys_lun_extended_entry *phys_lun_ext_entry; @@ -1773,8 +1770,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) static char *out_of_memory_msg = "failed to allocate memory, device discovery stopped"; - INIT_LIST_HEAD(&new_device_list_head); - rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); if (rc) goto out; -- cgit From 5a259e32ba32c380537f3d186a311e528b9f9c94 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:55:43 -0500 Subject: scsi: smartpqi: add module parameters Add module parameters to disable heartbeat support and to disable shutting down the controller when a controller is taken offline. Reviewed-by: Scott Benesh Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 21fbcf368af1..7bf6222c19a7 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -122,6 +122,18 @@ module_param_named(disable_device_id_wildcards, MODULE_PARM_DESC(disable_device_id_wildcards, "Disable device ID wildcards."); +static int pqi_disable_heartbeat; +module_param_named(disable_heartbeat, + pqi_disable_heartbeat, int, 0644); +MODULE_PARM_DESC(disable_heartbeat, + "Disable heartbeat."); + +static int pqi_disable_ctrl_shutdown; +module_param_named(disable_ctrl_shutdown, + pqi_disable_ctrl_shutdown, int, 0644); +MODULE_PARM_DESC(disable_ctrl_shutdown, + "Disable controller shutdown when controller locked up."); + static char *pqi_lockup_action_param; module_param_named(lockup_action, pqi_lockup_action_param, charp, 0644); @@ -5962,10 +5974,16 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) switch (get_unaligned_le16(§ion->section_id)) { case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: - ctrl_info->heartbeat_counter = table_iomem_addr + - section_offset + - offsetof(struct pqi_config_table_heartbeat, - heartbeat_counter); + if (pqi_disable_heartbeat) + dev_warn(&ctrl_info->pci_dev->dev, + "heartbeat disabled by module parameter\n"); + else + ctrl_info->heartbeat_counter = + table_iomem_addr + + section_offset + + offsetof( + struct pqi_config_table_heartbeat, + heartbeat_counter); break; } @@ -6550,7 +6568,8 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) ctrl_info->controller_online = false; ctrl_info->pqi_mode_enabled = false; pqi_ctrl_block_requests(ctrl_info); - sis_shutdown_ctrl(ctrl_info); + if (!pqi_disable_ctrl_shutdown) + sis_shutdown_ctrl(ctrl_info); pci_disable_device(ctrl_info->pci_dev); dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); schedule_work(&ctrl_info->ctrl_offline_work); -- cgit From ebaec8e3ee4acda20293db6ba41d3467de5262b5 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Wed, 3 May 2017 18:55:49 -0500 Subject: scsi: smartpqi: remove writeq/readq function definitions Instead of rewriting write/readq, use existing functions Reviewed-by: Scott Benesh Signed-off-by: Corentin Labbe Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi.h | 31 ++----------------------------- 1 file changed, 2 insertions(+), 29 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index e9f113a0f453..07ec8a8877de 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -16,6 +16,8 @@ * */ +#include + #if !defined(_SMARTPQI_H) #define _SMARTPQI_H @@ -1175,33 +1177,4 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd); extern struct sas_function_template pqi_sas_transport_functions; -#if !defined(readq) -#define readq readq -static inline u64 readq(const volatile void __iomem *addr) -{ - u32 lower32; - u32 upper32; - - lower32 = readl(addr); - upper32 = readl(addr + 4); - - return ((u64)upper32 << 32) | lower32; -} -#endif - -#if !defined(writeq) -#define writeq writeq -static inline void writeq(u64 value, volatile void __iomem *addr) -{ - u32 lower32; - u32 upper32; - - lower32 = lower_32_bits(value); - upper32 = upper_32_bits(value); - - writel(lower32, addr); - writel(upper32, addr + 4); -} -#endif - #endif /* _SMARTPQI_H */ -- cgit From 2d154f5ff338137a69f2f2a313520b6da2e1eb16 Mon Sep 17 00:00:00 2001 From: Kevin Barnett Date: Wed, 3 May 2017 18:55:55 -0500 Subject: scsi: smartpqi: bump driver version Reviewed-by: Scott Benesh Reviewed-by: Gerry Morong Signed-off-by: Kevin Barnett Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 7bf6222c19a7..0b11ae7e96dc 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -40,13 +40,14 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "0.9.13-370" -#define DRIVER_MAJOR 0 -#define DRIVER_MINOR 9 -#define DRIVER_RELEASE 13 -#define DRIVER_REVISION 370 - -#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")" +#define DRIVER_VERSION "1.0.4-100" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 +#define DRIVER_RELEASE 4 +#define DRIVER_REVISION 100 + +#define DRIVER_NAME "Microsemi PQI Driver (v" \ + DRIVER_VERSION BUILD_TIMESTAMP ")" #define DRIVER_NAME_SHORT "smartpqi" #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) -- cgit From bfcc62ed7066268349e8e7955925bdaf4be0eec0 Mon Sep 17 00:00:00 2001 From: Kyle Fortin Date: Wed, 17 May 2017 16:21:54 -0400 Subject: scsi: libiscsi: use kvzalloc for iscsi_pool_init iscsiadm session login can fail with the following error: iscsiadm: Could not login to [iface: default, target: iqn.1986-03.com... iscsiadm: initiator reported error (9 - internal error) When /etc/iscsi/iscsid.conf sets node.session.cmds_max = 4096, it results in 64K-sized kmallocs per session. A system under fragmented slab pressure may not have any 64K objects available and fail iscsiadm session login. Even though memory objects of a smaller size are available, the large order allocation ends up failing. The kernel prints a warning and does dump_stack, like below: iscsid: page allocation failure: order:4, mode:0xc0d0 CPU: 0 PID: 2456 Comm: iscsid Not tainted 4.1.12-61.1.28.el6uek.x86_64 #2 Call Trace: [] dump_stack+0x63/0x83 [] warn_alloc_failed+0xea/0x140 [] __alloc_pages_slowpath+0x409/0x760 [] __alloc_pages_nodemask+0x2b1/0x2d0 [] ? dev_attr_host_ipaddress+0x20/0xffffffffffffc722 [] alloc_pages_current+0xaf/0x170 [] alloc_kmem_pages+0x31/0xd0 [] ? iscsi_transport_group+0x20/0xffffffffffffc7e2 [] kmalloc_order+0x18/0x50 [] kmalloc_order_trace+0x34/0xe0 [] ? transport_remove_classdev+0x70/0x70 [] __kmalloc+0x27d/0x2a0 [] ? complete_all+0x4d/0x60 [] iscsi_pool_init+0x69/0x160 [libiscsi] [] ? device_initialize+0xb0/0xd0 [] iscsi_session_setup+0x180/0x2f4 [libiscsi] [] ? iscsi_max_lun+0x20/0xfffffffffffffa9e [iscsi_tcp] [] iscsi_sw_tcp_session_create+0xcf/0x150 [iscsi_tcp] [] ? iscsi_max_lun+0x20/0xfffffffffffffa9e [iscsi_tcp] [] iscsi_if_create_session+0x33/0xd0 [] ? iscsi_max_lun+0x20/0xfffffffffffffa9e [iscsi_tcp] [] iscsi_if_recv_msg+0x508/0x8c0 [scsi_transport_iscsi] [] ? __alloc_pages_nodemask+0x19b/0x2d0 [] ? __kmalloc_node_track_caller+0x209/0x2c0 [] iscsi_if_rx+0x7c/0x200 [scsi_transport_iscsi] [] netlink_unicast+0x126/0x1c0 [] netlink_sendmsg+0x36c/0x400 [] sock_sendmsg+0x4d/0x60 [] ___sys_sendmsg+0x30a/0x330 [] ? handle_pte_fault+0x20c/0x230 [] ? __handle_mm_fault+0x1bc/0x330 [] ? handle_mm_fault+0xb2/0x1a0 [] __sys_sendmsg+0x49/0x90 [] SyS_sendmsg+0x19/0x20 [] system_call_fastpath+0x12/0x71 Use kvzalloc for iscsi_pool in iscsi_pool_init. Signed-off-by: Kyle Fortin Tested-by: Kyle Fortin Reviewed-by: Joseph Slember Reviewed-by: Lance Hartmann Acked-by: Lee Duncan Signed-off-by: Martin K. Petersen --- drivers/scsi/libiscsi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index dd6828f7f772..42381adf0769 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -2556,7 +2556,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) * the array. */ if (items) num_arrays++; - q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL); + q->pool = kvzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL); if (q->pool == NULL) return -ENOMEM; @@ -2590,7 +2590,7 @@ void iscsi_pool_free(struct iscsi_pool *q) for (i = 0; i < q->max; i++) kfree(q->pool[i]); - kfree(q->pool); + kvfree(q->pool); } EXPORT_SYMBOL_GPL(iscsi_pool_free); -- cgit From 4bbd458eaa789959f23f4c998d30b972715e1013 Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Thu, 18 May 2017 20:17:38 +0530 Subject: scsi: csiostor: add support for Chelsio T6 adapters Enable probe for T6 adapters, add code to flash T6 firmware and firmware config file, use T6 specific macros. Signed-off-by: Varun Prakash Signed-off-by: Martin K. Petersen --- drivers/scsi/csiostor/csio_hw.c | 79 ++++++++++++++++++++++++++++++------ drivers/scsi/csiostor/csio_hw_chip.h | 14 +++++++ drivers/scsi/csiostor/csio_hw_t5.c | 29 +------------ drivers/scsi/csiostor/csio_init.c | 6 ++- drivers/scsi/csiostor/csio_wr.c | 4 +- 5 files changed, 88 insertions(+), 44 deletions(-) diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index dab195f04da7..c6e18140759c 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -794,18 +794,24 @@ csio_hw_dev_ready(struct csio_hw *hw) { uint32_t reg; int cnt = 6; + int src_pf; while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) && (--cnt != 0)) mdelay(100); - if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) || - (SOURCEPF_G(reg) >= CSIO_MAX_PFN))) { + if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) + src_pf = SOURCEPF_G(reg); + else + src_pf = T6_SOURCEPF_G(reg); + + if ((cnt == 0) && (((int32_t)(src_pf) < 0) || + (src_pf >= CSIO_MAX_PFN))) { csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); return -EIO; } - hw->pfn = SOURCEPF_G(reg); + hw->pfn = src_pf; return 0; } @@ -1581,10 +1587,16 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) unsigned int mtype = 0, maddr = 0; uint32_t *cfg_data; int value_to_add = 0; + const char *fw_cfg_file; + + if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) + fw_cfg_file = FW_CFG_NAME_T5; + else + fw_cfg_file = FW_CFG_NAME_T6; - if (request_firmware(&cf, FW_CFG_NAME_T5, dev) < 0) { + if (request_firmware(&cf, fw_cfg_file, dev) < 0) { csio_err(hw, "could not find config file %s, err: %d\n", - FW_CFG_NAME_T5, ret); + fw_cfg_file, ret); return -ENOENT; } @@ -1623,9 +1635,8 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word); } if (ret == 0) { - csio_info(hw, "config file upgraded to %s\n", - FW_CFG_NAME_T5); - snprintf(path, 64, "%s%s", "/lib/firmware/", FW_CFG_NAME_T5); + csio_info(hw, "config file upgraded to %s\n", fw_cfg_file); + snprintf(path, 64, "%s%s", "/lib/firmware/", fw_cfg_file); } leave: @@ -1886,6 +1897,19 @@ static struct fw_info fw_info_array[] = { .intfver_iscsi = FW_INTFVER(T5, ISCSI), .intfver_fcoe = FW_INTFVER(T5, FCOE), }, + }, { + .chip = CHELSIO_T6, + .fs_name = FW_CFG_NAME_T6, + .fw_mod_name = FW_FNAME_T6, + .fw_hdr = { + .chip = FW_HDR_CHIP_T6, + .fw_ver = __cpu_to_be32(FW_VERSION(T6)), + .intfver_nic = FW_INTFVER(T6, NIC), + .intfver_vnic = FW_INTFVER(T6, VNIC), + .intfver_ri = FW_INTFVER(T6, RI), + .intfver_iscsi = FW_INTFVER(T6, ISCSI), + .intfver_fcoe = FW_INTFVER(T6, FCOE), + }, } }; @@ -2002,6 +2026,7 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset) struct device *dev = &pci_dev->dev ; const u8 *fw_data = NULL; unsigned int fw_size = 0; + const char *fw_bin_file; /* This is the firmware whose headers the driver was compiled * against @@ -2014,9 +2039,14 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset) return -EINVAL; } - if (request_firmware(&fw, FW_FNAME_T5, dev) < 0) { + if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) + fw_bin_file = FW_FNAME_T5; + else + fw_bin_file = FW_FNAME_T6; + + if (request_firmware(&fw, fw_bin_file, dev) < 0) { csio_err(hw, "could not find firmware image %s, err: %d\n", - FW_FNAME_T5, ret); + fw_bin_file, ret); } else { fw_data = fw->data; fw_size = fw->size; @@ -2241,9 +2271,14 @@ static void csio_hw_intr_enable(struct csio_hw *hw) { uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); - uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + u32 pf = 0; uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A); + if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) + pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + else + pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + /* * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up * by FW, so do nothing for INTX. @@ -2293,7 +2328,12 @@ csio_hw_intr_enable(struct csio_hw *hw) void csio_hw_intr_disable(struct csio_hw *hw) { - uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + u32 pf = 0; + + if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) + pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + else + pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) return; @@ -2918,6 +2958,8 @@ static void csio_cplsw_intr_handler(struct csio_hw *hw) */ static void csio_le_intr_handler(struct csio_hw *hw) { + enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id); + static struct intr_info le_intr_info[] = { { LIPMISS_F, "LE LIP miss", -1, 0 }, { LIP0_F, "LE 0 LIP error", -1, 0 }, @@ -2927,7 +2969,18 @@ static void csio_le_intr_handler(struct csio_hw *hw) { 0, NULL, 0, 0 } }; - if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info)) + static struct intr_info t6_le_intr_info[] = { + { T6_LIPMISS_F, "LE LIP miss", -1, 0 }, + { T6_LIP0_F, "LE 0 LIP error", -1, 0 }, + { TCAMINTPERR_F, "LE parity error", -1, 1 }, + { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 }, + { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, + (chip == CHELSIO_T5) ? + le_intr_info : t6_le_intr_info)) csio_hw_fatal_err(hw); } diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h index b56a11d817be..aaabdbe11d88 100644 --- a/drivers/scsi/csiostor/csio_hw_chip.h +++ b/drivers/scsi/csiostor/csio_hw_chip.h @@ -39,11 +39,15 @@ /* Define MACRO values */ #define CSIO_HW_T5 0x5000 #define CSIO_T5_FCOE_ASIC 0x5600 +#define CSIO_HW_T6 0x6000 +#define CSIO_T6_FCOE_ASIC 0x6600 #define CSIO_HW_CHIP_MASK 0xF000 #define T5_REGMAP_SIZE (332 * 1024) #define FW_FNAME_T5 "cxgb4/t5fw.bin" #define FW_CFG_NAME_T5 "cxgb4/t5-config.txt" +#define FW_FNAME_T6 "cxgb4/t6fw.bin" +#define FW_CFG_NAME_T6 "cxgb4/t6-config.txt" #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) #define CHELSIO_CHIP_FPGA 0x100 @@ -51,12 +55,17 @@ #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) #define CHELSIO_T5 0x5 +#define CHELSIO_T6 0x6 enum chip_type { T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), T5_FIRST_REV = T5_A0, T5_LAST_REV = T5_A1, + + T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0), + T6_FIRST_REV = T6_A0, + T6_LAST_REV = T6_A0, }; static inline int csio_is_t5(uint16_t chip) @@ -64,6 +73,11 @@ static inline int csio_is_t5(uint16_t chip) return (chip == CSIO_HW_T5); } +static inline int csio_is_t6(uint16_t chip) +{ + return (chip == CSIO_HW_T6); +} + /* Define MACRO DEFINITIONS */ #define CSIO_DEVICE(devid, idx) \ { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) } diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c index 3267f4f627c9..f24def6c6fd1 100644 --- a/drivers/scsi/csiostor/csio_hw_t5.c +++ b/drivers/scsi/csiostor/csio_hw_t5.c @@ -71,27 +71,6 @@ csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win) static void csio_t5_pcie_intr_handler(struct csio_hw *hw) { - static struct intr_info sysbus_intr_info[] = { - { RNPP_F, "RXNP array parity error", -1, 1 }, - { RPCP_F, "RXPC array parity error", -1, 1 }, - { RCIP_F, "RXCIF array parity error", -1, 1 }, - { RCCP_F, "Rx completions control array parity error", -1, 1 }, - { RFTP_F, "RXFT array parity error", -1, 1 }, - { 0, NULL, 0, 0 } - }; - static struct intr_info pcie_port_intr_info[] = { - { TPCP_F, "TXPC array parity error", -1, 1 }, - { TNPP_F, "TXNP array parity error", -1, 1 }, - { TFTP_F, "TXFT array parity error", -1, 1 }, - { TCAP_F, "TXCA array parity error", -1, 1 }, - { TCIP_F, "TXCIF array parity error", -1, 1 }, - { RCAP_F, "RXCA array parity error", -1, 1 }, - { OTDD_F, "outbound request TLP discarded", -1, 1 }, - { RDPE_F, "Rx data parity error", -1, 1 }, - { TDUE_F, "Tx uncorrectable data error", -1, 1 }, - { 0, NULL, 0, 0 } - }; - static struct intr_info pcie_intr_info[] = { { MSTGRPPERR_F, "Master Response Read Queue parity error", -1, 1 }, @@ -133,13 +112,7 @@ csio_t5_pcie_intr_handler(struct csio_hw *hw) }; int fat; - fat = csio_handle_intr_status(hw, - PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A, - sysbus_intr_info) + - csio_handle_intr_status(hw, - PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A, - pcie_port_intr_info) + - csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info); + fat = csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info); if (fat) csio_hw_fatal_err(hw); } diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index dbe416ff46c2..ea0c31086cc6 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -952,8 +952,9 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) struct csio_hw *hw; struct csio_lnode *ln; - /* probe only T5 cards */ - if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK))) + /* probe only T5 and T6 cards */ + if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)) && + !csio_is_t6((pdev->device & CSIO_HW_CHIP_MASK))) return -ENODEV; rv = csio_pci_init(pdev, &bars); @@ -1253,3 +1254,4 @@ MODULE_LICENSE(CSIO_DRV_LICENSE); MODULE_DEVICE_TABLE(pci, csio_pci_tbl); MODULE_VERSION(CSIO_DRV_VERSION); MODULE_FIRMWARE(FW_FNAME_T5); +MODULE_FIRMWARE(FW_FNAME_T6); diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index e8f18174f2e9..c0a17789752f 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c @@ -480,12 +480,14 @@ csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx, flq_idx = csio_q_iq_flq_idx(hw, iq_idx); if (flq_idx != -1) { + enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id); struct csio_q *flq = hw->wrm.q_arr[flq_idx]; iqp.fl0paden = 1; iqp.fl0packen = flq->un.fl.packen ? 1 : 0; iqp.fl0fbmin = X_FETCHBURSTMIN_64B; - iqp.fl0fbmax = X_FETCHBURSTMAX_512B; + iqp.fl0fbmax = ((chip == CHELSIO_T5) ? + X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B); iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ; iqp.fl0addr = csio_q_pstart(hw, flq_idx); } -- cgit From ddccd9528d89ca4db30dab5229f6a0cceb095781 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 18 May 2017 09:18:11 -0700 Subject: scsi: storvsc: use in place iterator function In 4.12-rc1, new functions were added to support iterating over elements in the vmbus event ring. This patch uses them to simplify the ring buffer handling in virtual SCSI driver as well. Signed-off-by: Stephen Hemminger Signed-off-by: Martin K. Petersen --- drivers/scsi/storvsc_drv.c | 44 +++++++++++++++----------------------------- 1 file changed, 15 insertions(+), 29 deletions(-) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index ae966dc3bbc5..f8a1649e4c63 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1149,13 +1149,9 @@ static void storvsc_on_receive(struct storvsc_device *stor_device, static void storvsc_on_channel_callback(void *context) { struct vmbus_channel *channel = (struct vmbus_channel *)context; + const struct vmpacket_descriptor *desc; struct hv_device *device; struct storvsc_device *stor_device; - u32 bytes_recvd; - u64 request_id; - unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)]; - struct storvsc_cmd_request *request; - int ret; if (channel->primary_channel != NULL) device = channel->primary_channel->device_obj; @@ -1166,32 +1162,22 @@ static void storvsc_on_channel_callback(void *context) if (!stor_device) return; - do { - ret = vmbus_recvpacket(channel, packet, - ALIGN((sizeof(struct vstor_packet) - - vmscsi_size_delta), 8), - &bytes_recvd, &request_id); - if (ret == 0 && bytes_recvd > 0) { - - request = (struct storvsc_cmd_request *) - (unsigned long)request_id; - - if ((request == &stor_device->init_request) || - (request == &stor_device->reset_request)) { - - memcpy(&request->vstor_packet, packet, - (sizeof(struct vstor_packet) - - vmscsi_size_delta)); - complete(&request->wait_event); - } else { - storvsc_on_receive(stor_device, - (struct vstor_packet *)packet, - request); - } + foreach_vmbus_pkt(desc, channel) { + void *packet = hv_pkt_data(desc); + struct storvsc_cmd_request *request; + + request = (struct storvsc_cmd_request *) + ((unsigned long)desc->trans_id); + + if (request == &stor_device->init_request || + request == &stor_device->reset_request) { + memcpy(&request->vstor_packet, packet, + (sizeof(struct vstor_packet) - vmscsi_size_delta)); + complete(&request->wait_event); } else { - break; + storvsc_on_receive(stor_device, packet, request); } - } while (1); + } } static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size, -- cgit From 2371cd90abe3fa1b88e15111abf2cc0a26db6e52 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 18 May 2017 09:18:12 -0700 Subject: scsi: storvsc: remove unnecessary channel inbound lock In storvsc driver, inbound messages do not go through inbound lock. The only effect of this lock was is to provide a barrier for connect and remove logic. Signed-off-by: Stephen Hemminger Signed-off-by: Martin K. Petersen --- drivers/hv/channel_mgmt.c | 1 - drivers/scsi/storvsc_drv.c | 8 +++----- include/linux/hyperv.h | 1 - 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 735f9363f2e4..685572bae1f0 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -332,7 +332,6 @@ static struct vmbus_channel *alloc_channel(void) if (!channel) return NULL; - spin_lock_init(&channel->inbound_lock); spin_lock_init(&channel->lock); INIT_LIST_HEAD(&channel->sc_list); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index f8a1649e4c63..8d955db6424f 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1206,13 +1206,13 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size, static int storvsc_dev_remove(struct hv_device *device) { struct storvsc_device *stor_device; - unsigned long flags; stor_device = hv_get_drvdata(device); - spin_lock_irqsave(&device->channel->inbound_lock, flags); stor_device->destroy = true; - spin_unlock_irqrestore(&device->channel->inbound_lock, flags); + + /* Make sure flag is set before waiting */ + wmb(); /* * At this point, all outbound traffic should be disable. We @@ -1229,9 +1229,7 @@ static int storvsc_dev_remove(struct hv_device *device) * we have drained - to drain the outgoing packets, we need to * allow incoming packets. */ - spin_lock_irqsave(&device->channel->inbound_lock, flags); hv_set_drvdata(device, NULL); - spin_unlock_irqrestore(&device->channel->inbound_lock, flags); /* Close the channel */ vmbus_close(device->channel); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index e09fc8290c2f..b7d7bbec74e0 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -744,7 +744,6 @@ struct vmbus_channel { u32 ringbuffer_pagecount; struct hv_ring_buffer_info outbound; /* send to parent */ struct hv_ring_buffer_info inbound; /* receive from parent */ - spinlock_t inbound_lock; struct vmbus_close_msg close_msg; -- cgit From 42c335f7e67029d2e01711f2f2bc6252277c8993 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 9 May 2017 15:34:44 -0700 Subject: scsi: csiostor: Avoid content leaks and casts When copying attributes, the len argument was padded out and the resulting memcpy() would copy beyond the end of the source buffer. Avoid this, and use size_t for val_len to avoid all the casts. Similarly, avoid source buffer casts and use void *. Additionally enforces val_len can be represented by u16 and that the DMA buffer was not overflowed. Fixes the size of mfa, which is not FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN (but it will be padded up to 4). This was noticed by the future CONFIG_FORTIFY_SOURCE checks. Cc: Daniel Micay Signed-off-by: Kees Cook Acked-by: Varun Prakash Signed-off-by: Martin K. Petersen --- drivers/scsi/csiostor/csio_lnode.c | 43 +++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index c00b2ff72b55..be5ee2d37815 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -238,14 +238,23 @@ csio_osname(uint8_t *buf, size_t buf_len) } static inline void -csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len) +csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len) { + uint16_t len; struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr; + + if (WARN_ON(val_len > U16_MAX)) + return; + + len = val_len; + ae->type = htons(type); len += 4; /* includes attribute type and length */ len = (len + 3) & ~3; /* should be multiple of 4 bytes */ ae->len = htons(len); - memcpy(ae->value, val, len); + memcpy(ae->value, val, val_len); + if (len > val_len) + memset(ae->value + val_len, 0, len - val_len); *ptr += len; } @@ -335,7 +344,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) numattrs++; val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED, - (uint8_t *)&val, + &val, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN); numattrs++; @@ -346,23 +355,22 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) else val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN); csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED, - (uint8_t *)&val, - FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN); + &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN); numattrs++; mfs = ln->ln_sparm.csp.sp_bb_data; csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE, - (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN); + &mfs, sizeof(mfs)); numattrs++; strcpy(buf, "csiostor"); csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf, - (uint16_t)strlen(buf)); + strlen(buf)); numattrs++; if (!csio_hostname(buf, sizeof(buf))) { csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME, - buf, (uint16_t)strlen(buf)); + buf, strlen(buf)); numattrs++; } attrib_blk->numattrs = htonl(numattrs); @@ -444,33 +452,32 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) strcpy(buf, "Chelsio Communications"); csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf, - (uint16_t)strlen(buf)); + strlen(buf)); numattrs++; csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER, - hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn)); + hw->vpd.sn, sizeof(hw->vpd.sn)); numattrs++; csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id, - (uint16_t)sizeof(hw->vpd.id)); + sizeof(hw->vpd.id)); numattrs++; csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION, - hw->model_desc, (uint16_t)strlen(hw->model_desc)); + hw->model_desc, strlen(hw->model_desc)); numattrs++; csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION, - hw->hw_ver, (uint16_t)sizeof(hw->hw_ver)); + hw->hw_ver, sizeof(hw->hw_ver)); numattrs++; csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION, - hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str)); + hw->fwrev_str, strlen(hw->fwrev_str)); numattrs++; if (!csio_osname(buf, sizeof(buf))) { csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION, - buf, (uint16_t)strlen(buf)); + buf, strlen(buf)); numattrs++; } csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD, - (uint8_t *)&maxpayload, - FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN); + &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN); len = (uint32_t)(pld - (uint8_t *)cmd); numattrs++; attrib_blk->numattrs = htonl(numattrs); @@ -1794,6 +1801,8 @@ csio_ln_mgmt_submit_req(struct csio_ioreq *io_req, struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); int rv; + BUG_ON(pld_len > pld->len); + io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */ io_req->fw_handle = (uintptr_t) (io_req); io_req->eq_idx = mgmtm->eq_idx; -- cgit From 5c146686e32085e76ad9e2957f3dee9b28fe4f22 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 18 May 2017 10:32:18 +0200 Subject: scsi: smartpqi: mark PM functions as __maybe_unused The newly added suspend/resume support causes harmless warnings when CONFIG_PM is disabled: smartpqi/smartpqi_init.c:5147:12: error: 'pqi_ctrl_wait_for_pending_io' defined but not used [-Werror=unused-function] smartpqi/smartpqi_init.c:2019:13: error: 'pqi_wait_until_lun_reset_finished' defined but not used [-Werror=unused-function] smartpqi/smartpqi_init.c:2013:13: error: 'pqi_wait_until_scan_finished' defined but not used [-Werror=unused-function] We can avoid the warnings by removing the #ifdef around the handlers and instead marking them as __maybe_unused, which will let gcc drop the unused code silently. Fixes: f44d210312a6 ("scsi: smartpqi: add suspend and resume support") Signed-off-by: Arnd Bergmann Signed-off-by: Martin K. Petersen --- drivers/scsi/smartpqi/smartpqi_init.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 0b11ae7e96dc..cb8f886e705c 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -6213,8 +6213,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) return 0; } -#if defined(CONFIG_PM) - static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) { unsigned int i; @@ -6321,8 +6319,6 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) return 0; } -#endif /* CONFIG_PM */ - static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) { @@ -6696,9 +6692,7 @@ static void pqi_process_module_params(void) pqi_process_lockup_action_param(); } -#if defined(CONFIG_PM) - -static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) +static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) { struct pqi_ctrl_info *ctrl_info; @@ -6728,7 +6722,7 @@ static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) return 0; } -static int pqi_resume(struct pci_dev *pci_dev) +static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) { int rc; struct pqi_ctrl_info *ctrl_info; @@ -6759,8 +6753,6 @@ static int pqi_resume(struct pci_dev *pci_dev) return pqi_ctrl_init_resume(ctrl_info); } -#endif /* CONFIG_PM */ - /* Define the PCI IDs for the controllers that we support. */ static const struct pci_device_id pqi_pci_id_table[] = { { -- cgit From eb045e046d5b2aab7710f82c2e5fb1403c69332b Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 22 May 2017 13:00:29 -0500 Subject: scsi: hisi_sas: add null check before indirect pointer dereference Add null check before indirectly dereferencing pointer task->lldd_task in statement u32 tag = slot->idx; Addresses-Coverity-ID: 1373843 Signed-off-by: Gustavo A. R. Silva Reviewed-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index d622db502ec9..f720d3ced851 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -963,7 +963,7 @@ static int hisi_sas_abort_task(struct sas_task *task) HISI_SAS_INT_ABT_DEV, 0); rc = hisi_sas_softreset_ata_disk(device); } - } else if (task->task_proto & SAS_PROTOCOL_SMP) { + } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { /* SMP */ struct hisi_sas_slot *slot = task->lldd_task; u32 tag = slot->idx; -- cgit From 75c9e65f30965a41e570a9f5edaabed812c092ea Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 22 May 2017 13:10:53 -0700 Subject: scsi: qla2xxx: Remove an unused structure member qla_tgt_cmd.free_work is not used by the qla2xxx driver. Hence remove that member of struct qla_tgt_cmd. Signed-off-by: Bart Van Assche Acked-by: Himanshu Madhani Reviewed-by: Christoph Hellwig Cc: Quinn Tran Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Andy Grover Cc: David Disseldorp Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_target.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index d64420251194..dae278859554 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -862,7 +862,6 @@ struct qla_tgt_cmd { struct se_cmd se_cmd; struct fc_port *sess; int state; - struct work_struct free_work; struct work_struct work; /* Sense buffer that will be mapped into outgoing status */ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; -- cgit From 1ab6207210b437e8c154e0e93a4c3fa57fc9edd1 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 26 May 2017 11:11:37 +0100 Subject: scsi: lpfc: fix spelling mistake "entrys" -> "entries" Trivial fix to spelling mistake in debugfs message Signed-off-by: Colin Ian King Acked-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 4bcb92c844ca..efd8a17ac2e5 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -323,7 +323,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) raw_index = phba->hbq_get[i]; getidx = le32_to_cpu(raw_index); len += snprintf(buf+len, size-len, - "entrys:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n", + "entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n", hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx, hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx); -- cgit From 78cfe97ffa7581cd8ce6db0232862f12f3bb0e68 Mon Sep 17 00:00:00 2001 From: "Milan P. Gandhi" Date: Thu, 1 Jun 2017 17:35:49 +0530 Subject: scsi: fcoe: Fix few small typos in fcoe.c This patch does a cleanup and fixes few small typos in fcoe.c Signed-off-by: Milan P. Gandhi Acked-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/fcoe/fcoe.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 90939f66bc0d..7b960d3293b7 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -519,7 +519,7 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) * @skb: The receive skb * @netdev: The associated net device * @ptype: The packet_type structure which was used to register this handler - * @orig_dev: The original net_device the the skb was received on. + * @orig_dev: The original net_device the skb was received on. * (in case dev is a bond) * * Returns: 0 for success @@ -542,7 +542,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, * @skb: The receive skb * @netdev: The associated net device * @ptype: The packet_type structure which was used to register this handler - * @orig_dev: The original net_device the the skb was received on. + * @orig_dev: The original net_device the skb was received on. * (in case dev is a bond) * * Returns: 0 for success @@ -2590,7 +2590,7 @@ module_exit(fcoe_exit); * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler * @seq: active sequence in the FLOGI or FDISC exchange * @fp: response frame, or error encoded in a pointer (timeout) - * @arg: pointer the the fcoe_ctlr structure + * @arg: pointer to the fcoe_ctlr structure * * This handles MAC address management for FCoE, then passes control on to * the libfc FLOGI response handler. @@ -2619,7 +2619,7 @@ done: * fcoe_logo_resp() - FCoE specific LOGO response handler * @seq: active sequence in the LOGO exchange * @fp: response frame, or error encoded in a pointer (timeout) - * @arg: pointer the the fcoe_ctlr structure + * @arg: pointer to the fcoe_ctlr structure * * This handles MAC address management for FCoE, then passes control on to * the libfc LOGO response handler. -- cgit From 3011b4826891244a4c8a8c668251705a1268f650 Mon Sep 17 00:00:00 2001 From: "Milan P. Gandhi" Date: Thu, 1 Jun 2017 17:38:55 +0530 Subject: scsi: fcoe: Remove an extra out label in _fcoe_create function This patch removes an extra out label in _fcoe_create function where we return if creation of FCOE interface is failed. Signed-off-by: Milan P. Gandhi Acked-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/fcoe/fcoe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 7b960d3293b7..ea21e7bb7ef5 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2258,7 +2258,7 @@ static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode, fcoe_interface_cleanup(fcoe); mutex_unlock(&fcoe_config_mutex); fcoe_ctlr_device_delete(ctlr_dev); - goto out; + return rc; } /* Make this the "master" N_Port */ @@ -2299,7 +2299,7 @@ static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode, out_nodev: rtnl_unlock(); mutex_unlock(&fcoe_config_mutex); -out: + return rc; } -- cgit From 96e6c633ec48ca487cc150d77810ac500bab35f8 Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Thu, 1 Jun 2017 12:48:33 +0530 Subject: scsi: csiostor: add check for supported fw version T6 FCoE support is added in fw version 1.16.45.0 so return error if fw version < 1.16.45.0 for T6 adapters. Signed-off-by: Varun Prakash Signed-off-by: Martin K. Petersen --- drivers/scsi/csiostor/csio_hw.c | 19 +++++++++++++++++++ drivers/scsi/csiostor/csio_hw.h | 1 + 2 files changed, 20 insertions(+) diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index c6e18140759c..2029ad225121 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -2068,6 +2068,17 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset) return ret; } +static int csio_hw_check_fwver(struct csio_hw *hw) +{ + if (csio_is_t6(hw->pdev->device & CSIO_HW_CHIP_MASK) && + (hw->fwrev < CSIO_MIN_T6_FW)) { + csio_hw_print_fw_version(hw, "T6 unsupported fw"); + return -1; + } + + return 0; +} + /* * csio_hw_configure - Configure HW * @hw - HW module @@ -2135,6 +2146,10 @@ csio_hw_configure(struct csio_hw *hw) if (rv != 0) goto out; + rv = csio_hw_check_fwver(hw); + if (rv < 0) + goto out; + /* If the firmware doesn't support Configuration Files, * return an error. */ @@ -2162,6 +2177,10 @@ csio_hw_configure(struct csio_hw *hw) } } else { + rv = csio_hw_check_fwver(hw); + if (rv < 0) + goto out; + if (hw->fw_state == CSIO_DEV_STATE_INIT) { hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h index 62758e830d3b..9acb89538e29 100644 --- a/drivers/scsi/csiostor/csio_hw.h +++ b/drivers/scsi/csiostor/csio_hw.h @@ -71,6 +71,7 @@ #define CSIO_MAX_CMD_PER_LUN 32 #define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024) #define CSIO_MAX_SECTOR_SIZE 128 +#define CSIO_MIN_T6_FW 0x01102D00 /* FW 1.16.45.0 */ /* Interrupts */ #define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode -- cgit From 5185b32887d25699e5398d70f5ada945ea838d99 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:33:48 -0700 Subject: scsi: qedf: Enable basic FDMI information. For libfc to register FDMI attributes we need to do two things: - Set the appropriate fc_host attributes that libfc will use to form the FDMI registration commands - Set lport->fdmi_enabled to 1 Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 56 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index a5c97342fd5d..7313bda304e0 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -22,6 +22,7 @@ #include #include #include "qedf.h" +#include const struct qed_fcoe_ops *qed_ops; @@ -1334,6 +1335,59 @@ static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf) ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac); } +static void qedf_setup_fdmi(struct qedf_ctx *qedf) +{ + struct fc_lport *lport = qedf->lport; + struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host); + u8 buf[8]; + int i, pos; + + /* + * fdmi_enabled needs to be set for libfc to execute FDMI registration. + */ + lport->fdmi_enabled = 1; + + /* + * Setup the necessary fc_host attributes to that will be used to fill + * in the FDMI information. + */ + + /* Get the PCI-e Device Serial Number Capability */ + pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN); + if (pos) { + pos += 4; + for (i = 0; i < 8; i++) + pci_read_config_byte(qedf->pdev, pos + i, &buf[i]); + + snprintf(fc_host->serial_number, + sizeof(fc_host->serial_number), + "%02X%02X%02X%02X%02X%02X%02X%02X", + buf[7], buf[6], buf[5], buf[4], + buf[3], buf[2], buf[1], buf[0]); + } else + snprintf(fc_host->serial_number, + sizeof(fc_host->serial_number), "Unknown"); + + snprintf(fc_host->manufacturer, + sizeof(fc_host->manufacturer), "%s", "Cavium Inc."); + + snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000"); + + snprintf(fc_host->model_description, sizeof(fc_host->model_description), + "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller" + "(FCoE)"); + + snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version), + "Rev %d", qedf->pdev->revision); + + snprintf(fc_host->driver_version, sizeof(fc_host->driver_version), + "%s", QEDF_VERSION); + + snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version), + "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION, + FW_REVISION_VERSION, FW_ENGINEERING_VERSION); +} + static int qedf_lport_setup(struct qedf_ctx *qedf) { struct fc_lport *lport = qedf->lport; @@ -1377,6 +1431,8 @@ static int qedf_lport_setup(struct qedf_ctx *qedf) snprintf(fc_host_symbolic_name(lport->host), 256, "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION); + qedf_setup_fdmi(qedf); + return 0; } -- cgit From 12d0b12c57cb7bf0aebf48da79f277330c2552c3 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:33:49 -0700 Subject: scsi: qedf: Update copyright to 2017. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/drv_fcoe_fw_funcs.c | 2 +- drivers/scsi/qedf/drv_fcoe_fw_funcs.h | 2 +- drivers/scsi/qedf/drv_scsi_fw_funcs.c | 2 +- drivers/scsi/qedf/drv_scsi_fw_funcs.h | 2 +- drivers/scsi/qedf/qedf.h | 2 +- drivers/scsi/qedf/qedf_attr.c | 2 +- drivers/scsi/qedf/qedf_dbg.h | 2 +- drivers/scsi/qedf/qedf_debugfs.c | 2 +- drivers/scsi/qedf/qedf_els.c | 2 +- drivers/scsi/qedf/qedf_fip.c | 2 +- drivers/scsi/qedf/qedf_hsi.h | 2 +- drivers/scsi/qedf/qedf_io.c | 2 +- drivers/scsi/qedf/qedf_main.c | 2 +- drivers/scsi/qedf/qedf_version.h | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c index 8c65e3b034dc..7d91e53562f8 100644 --- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c +++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c @@ -1,5 +1,5 @@ /* QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h index 617529b058f4..f9c50faa748e 100644 --- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h +++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h @@ -1,5 +1,5 @@ /* QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c index 11e0cc082ec0..5d5095e3d96d 100644 --- a/drivers/scsi/qedf/drv_scsi_fw_funcs.c +++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c @@ -1,5 +1,5 @@ /* QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h index 9cb45410bc45..8fbe6e4d0b4f 100644 --- a/drivers/scsi/qedf/drv_scsi_fw_funcs.h +++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h @@ -1,5 +1,5 @@ /* QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index 07ee88200e91..4d038926a455 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -1,6 +1,6 @@ /* * QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c index 47720611ad2c..1349f8a66e29 100644 --- a/drivers/scsi/qedf/qedf_attr.c +++ b/drivers/scsi/qedf/qedf_attr.c @@ -1,6 +1,6 @@ /* * QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h index 7d173f48a81e..50083cae84c3 100644 --- a/drivers/scsi/qedf/qedf_dbg.h +++ b/drivers/scsi/qedf/qedf_dbg.h @@ -1,6 +1,6 @@ /* * QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c index 00a1d6405ebe..2b1ef3075e93 100644 --- a/drivers/scsi/qedf/qedf_debugfs.c +++ b/drivers/scsi/qedf/qedf_debugfs.c @@ -1,6 +1,6 @@ /* * QLogic FCoE Offload Driver - * Copyright (c) 2016 QLogic Corporation + * Copyright (c) 2016-2017 QLogic Corporation * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c index 90627033bde6..69a365da3891 100644 --- a/drivers/scsi/qedf/qedf_els.c +++ b/drivers/scsi/qedf/qedf_els.c @@ -1,6 +1,6 @@ /* * QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c index e10b91cc3c62..2dfb8179ed0e 100644 --- a/drivers/scsi/qedf/qedf_fip.c +++ b/drivers/scsi/qedf/qedf_fip.c @@ -1,6 +1,6 @@ /* * QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h index dfd65dec2874..7faef80c5f7a 100644 --- a/drivers/scsi/qedf/qedf_hsi.h +++ b/drivers/scsi/qedf/qedf_hsi.h @@ -1,6 +1,6 @@ /* * QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 1d7f90d0adc1..ca9097bb7308 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -1,6 +1,6 @@ /* * QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 7313bda304e0..a3b040a18535 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1,6 +1,6 @@ /* * QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h index 4ae5f537a440..d46c487a6e73 100644 --- a/drivers/scsi/qedf/qedf_version.h +++ b/drivers/scsi/qedf/qedf_version.h @@ -1,6 +1,6 @@ /* * QLogic FCoE Offload Driver - * Copyright (c) 2016 Cavium Inc. + * Copyright (c) 2016-2017 Cavium Inc. * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of -- cgit From 914fff102e00455bb867871e612b65524839d271 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:33:50 -0700 Subject: scsi: qedf: Honor qed_ops->common->set_fp_int() return code. We need to check the return code the set_fp_int() callback in case we were not allocated any fastpath interrupts or there was an error setting up the fastpath interrupts from the qed perspective. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index a3b040a18535..549598f75c63 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2035,6 +2035,8 @@ static int qedf_setup_int(struct qedf_ctx *qedf) * Learn interrupt configuration */ rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus()); + if (rc <= 0) + return 0; rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info); if (rc) -- cgit From 53c51adbe6f83eba94fd39000f98cf4235a89e4c Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:33:51 -0700 Subject: scsi: qedf: Look at all descriptors when processing a clear virtual link. If there are multiple descriptors for a particular type in a clear virtual link we receive, we will not process it correctly but rather take the last value. This can cause us not to not flap the virtual link as the value from the descriptors that we compare against the our stored FCF or fc_lport values may not match. Change this to do a comparison when processing the each descriptor instead of at the end and then set a bool if we need to do the reset. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_fip.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c index 2dfb8179ed0e..64b04f245e15 100644 --- a/drivers/scsi/qedf/qedf_fip.c +++ b/drivers/scsi/qedf/qedf_fip.c @@ -156,10 +156,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb) struct fip_wwn_desc *wp; struct fip_vn_desc *vp; size_t rlen, dlen; - uint32_t cvl_port_id; - __u8 cvl_mac[ETH_ALEN]; u16 op; u8 sub; + bool do_reset = false; eth_hdr = (struct ethhdr *)skb_mac_header(skb); fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); @@ -190,8 +189,6 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb) return; } - cvl_port_id = 0; - memset(cvl_mac, 0, ETH_ALEN); /* * We need to loop through the CVL descriptors to determine * if we want to reset the fcoe link @@ -205,7 +202,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb) mp = (struct fip_mac_desc *)desc; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "fd_mac=%pM\n", mp->fd_mac); - ether_addr_copy(cvl_mac, mp->fd_mac); + if (ether_addr_equal(mp->fd_mac, + qedf->ctlr.sel_fcf->fcf_mac)) + do_reset = true; break; case FIP_DT_NAME: wp = (struct fip_wwn_desc *)desc; @@ -217,7 +216,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb) vp = (struct fip_vn_desc *)desc; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id)); - cvl_port_id = ntoh24(vp->fd_fc_id); + if (ntoh24(vp->fd_fc_id) == + qedf->lport->port_id) + do_reset = true; break; default: /* Ignore anything else */ @@ -228,11 +229,8 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb) } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, - "cvl_port_id=%06x cvl_mac=%pM.\n", cvl_port_id, - cvl_mac); - if (cvl_port_id == qedf->lport->port_id && - ether_addr_equal(cvl_mac, - qedf->ctlr.sel_fcf->fcf_mac)) { + "do_reset=%d.\n", do_reset); + if (do_reset) { fcoe_ctlr_link_down(&qedf->ctlr); qedf_wait_for_upload(qedf); fcoe_ctlr_link_up(&qedf->ctlr); -- cgit From ff34e8e84fbbd3e3f31a4d54cc0501aeaa155d43 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:33:52 -0700 Subject: scsi: qedf: Check that fcport is offloaded before dereferencing pointers in initiate_abts|cleanup. If an fcport is not offloaded then the members of the qedf_rport struct are undefined which may cause a system crash. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_io.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index ca9097bb7308..db160046f3e0 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -1476,8 +1476,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) { struct fc_lport *lport; struct qedf_rport *fcport = io_req->fcport; - struct fc_rport_priv *rdata = fcport->rdata; - struct qedf_ctx *qedf = fcport->qedf; + struct fc_rport_priv *rdata; + struct qedf_ctx *qedf; u16 xid; u32 r_a_tov = 0; int rc = 0; @@ -1485,15 +1485,18 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) struct fcoe_wqe *sqe; u16 sqe_idx; - r_a_tov = rdata->r_a_tov; - lport = qedf->lport; - + /* Sanity check qedf_rport before dereferencing any pointers */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { - QEDF_ERR(&(qedf->dbg_ctx), "tgt not offloaded\n"); + QEDF_ERR(NULL, "tgt not offloaded\n"); rc = 1; goto abts_err; } + rdata = fcport->rdata; + r_a_tov = rdata->r_a_tov; + qedf = fcport->qedf; + lport = qedf->lport; + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); rc = 1; @@ -1729,6 +1732,13 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req, return SUCCESS; } + /* Sanity check qedf_rport before dereferencing any pointers */ + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(NULL, "tgt not offloaded\n"); + rc = 1; + return SUCCESS; + } + qedf = fcport->qedf; if (!qedf) { QEDF_ERR(NULL, "qedf is NULL.\n"); -- cgit From c07c54b0cbb740dbc2f7fba247049bd9e25ac08e Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:33:53 -0700 Subject: scsi: qedf: Add fka_period SCSI host attribute to show fip keep alive period. Expose this information for interested applications. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_attr.c | 57 ++++++++++++++++++++++++++++--------------- 1 file changed, 38 insertions(+), 19 deletions(-) diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c index 1349f8a66e29..fa6727685627 100644 --- a/drivers/scsi/qedf/qedf_attr.c +++ b/drivers/scsi/qedf/qedf_attr.c @@ -8,6 +8,25 @@ */ #include "qedf.h" +inline bool qedf_is_vport(struct qedf_ctx *qedf) +{ + return qedf->lport->vport != NULL; +} + +/* Get base qedf for physical port from vport */ +static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf) +{ + struct fc_lport *lport; + struct fc_lport *base_lport; + + if (!(qedf_is_vport(qedf))) + return NULL; + + lport = qedf->lport; + base_lport = shost_priv(vport_to_shost(lport->vport)); + return lport_priv(base_lport); +} + static ssize_t qedf_fcoe_mac_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -26,34 +45,34 @@ qedf_fcoe_mac_show(struct device *dev, return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac); } +static ssize_t +qedf_fka_period_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fc_lport *lport = shost_priv(class_to_shost(dev)); + struct qedf_ctx *qedf = lport_priv(lport); + int fka_period = -1; + + if (qedf_is_vport(qedf)) + qedf = qedf_get_base_qedf(qedf); + + if (qedf->ctlr.sel_fcf) + fka_period = qedf->ctlr.sel_fcf->fka_period; + + return scnprintf(buf, PAGE_SIZE, "%d\n", fka_period); +} + static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL); +static DEVICE_ATTR(fka_period, S_IRUGO, qedf_fka_period_show, NULL); struct device_attribute *qedf_host_attrs[] = { &dev_attr_fcoe_mac, + &dev_attr_fka_period, NULL, }; extern const struct qed_fcoe_ops *qed_ops; -inline bool qedf_is_vport(struct qedf_ctx *qedf) -{ - return (!(qedf->lport->vport == NULL)); -} - -/* Get base qedf for physical port from vport */ -static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf) -{ - struct fc_lport *lport; - struct fc_lport *base_lport; - - if (!(qedf_is_vport(qedf))) - return NULL; - - lport = qedf->lport; - base_lport = shost_priv(vport_to_shost(lport->vport)); - return (struct qedf_ctx *)(lport_priv(base_lport)); -} - void qedf_capture_grc_dump(struct qedf_ctx *qedf) { struct qedf_ctx *base_qedf; -- cgit From 2b82a62f52e647944446a3b85a4131b279c078fb Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:33:54 -0700 Subject: scsi: qedf: Set qed logging level to QED_LEVEL_NOTICE. Reduce the logging level we set for qed messages pertaining to this PCI function so that unnecessary messages are not printed in the kernel message log. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 549598f75c63..7753fc5ab65b 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -95,7 +95,7 @@ module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO); MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed " "qed module during probe."); -static uint qedf_dp_level; +static uint qedf_dp_level = QED_LEVEL_NOTICE; module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO); MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module " "during probe (0-3: 0 more verbose)."); -- cgit From 5cf446d2aa4b92a1c1f079416e17fa9edee76ef8 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:33:55 -0700 Subject: scsi: qedf: Use same logic for SCSI host reset and FC lip_reset. We should be using the same logic to do a soft reset of the FCoE function whether it is initiated via sg_reset or the fc_host issue_lip attribute. Refactor the host reset and fcoe reset handlers to use the preferred logic which is currently contained in qedf_eh_host_reset(). Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 39 +++++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 7753fc5ab65b..6813b9be994e 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -640,27 +640,17 @@ void qedf_wait_for_upload(struct qedf_ctx *qedf) } } -/* Reset the host by gracefully logging out and then logging back in */ -static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd) +/* Performs soft reset of qedf_ctx by simulating a link down/up */ +static void qedf_ctx_soft_reset(struct fc_lport *lport) { - struct fc_lport *lport; struct qedf_ctx *qedf; - lport = shost_priv(sc_cmd->device->host); - if (lport->vport) { QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n"); - return SUCCESS; + return; } - qedf = (struct qedf_ctx *)lport_priv(lport); - - if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN || - test_bit(QEDF_UNLOADING, &qedf->flags) || - test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) - return FAILED; - - QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued..."); + qedf = lport_priv(lport); /* For host reset, essentially do a soft link up/down */ atomic_set(&qedf->link_state, QEDF_LINK_DOWN); @@ -672,6 +662,24 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd) qedf->vlan_id = 0; queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 0); +} + +/* Reset the host by gracefully logging out and then logging back in */ +static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport; + struct qedf_ctx *qedf; + + lport = shost_priv(sc_cmd->device->host); + qedf = lport_priv(lport); + + if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN || + test_bit(QEDF_UNLOADING, &qedf->flags)) + return FAILED; + + QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued..."); + + qedf_ctx_soft_reset(lport); return SUCCESS; } @@ -1669,8 +1677,7 @@ static int qedf_fcoe_reset(struct Scsi_Host *shost) { struct fc_lport *lport = shost_priv(shost); - fc_fabric_logoff(lport); - fc_fabric_login(lport); + qedf_ctx_soft_reset(lport); return 0; } -- cgit From b09fdc3aac6acc20587713e306692b12d31a2008 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:33:56 -0700 Subject: scsi: qedf: Add bus_reset No-op. We need to add a bus reset no-op as without it some of the LUNs attached to a vport may go offline when the error handler escalates to host reset due to not having a bus reset handler in the driver. What happens is we escalate to host reset which does a soft link down/link up to reset the adapter. However with multiple vports attached it's been observed that if the vports do log back into the target within 5 seconds, the SCSI layer offlines the devices most likely due to a TUR timing out to verify that the device is online. Adding a bus reset handler will cause the TUR to be sent after the bus reset handler where the devices will still be online if the bus reset is initiated by sg_reset (which is the case in the test that was failing). The bus reset will succeed and not needlessly bring the device offline/online. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 6813b9be994e..ef0dc566d70f 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -628,6 +628,16 @@ static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd) return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); } +static int qedf_eh_bus_reset(struct scsi_cmnd *sc_cmd) +{ + QEDF_ERR(NULL, "BUS RESET Issued...\n"); + /* + * Essentially a no-op but return SUCCESS to prevent + * unnecessary escalation to the host reset handler. + */ + return SUCCESS; +} + void qedf_wait_for_upload(struct qedf_ctx *qedf) { while (1) { @@ -705,6 +715,7 @@ static struct scsi_host_template qedf_host_template = { .eh_abort_handler = qedf_eh_abort, .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */ .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */ + .eh_bus_reset_handler = qedf_eh_bus_reset, .eh_host_reset_handler = qedf_eh_host_reset, .slave_configure = qedf_slave_configure, .dma_boundary = QED_HW_DMA_BOUNDARY, -- cgit From 384d5a9b412ea0fb60f999c13d39a1f007d87730 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:33:57 -0700 Subject: scsi: qedf: Add non-offload receive filters. Drop invalid or unexpected FCoE frames that come into the non-offload path since the FCoE firmware would not do the filtering for us. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index ef0dc566d70f..b7d17e177cf0 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2087,6 +2087,8 @@ static void qedf_recv_frame(struct qedf_ctx *qedf, u8 *dest_mac = NULL; struct fcoe_hdr *hp; struct qedf_rport *fcport; + struct fc_lport *vn_port; + u32 f_ctl; lport = qedf->lport; if (lport == NULL || lport->state == LPORT_ST_DISABLED) { @@ -2123,6 +2125,10 @@ static void qedf_recv_frame(struct qedf_ctx *qedf, fh = fc_frame_header_get(fp); + /* + * Invalid frame filters. + */ + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) { /* Drop FCP data. We dont this in L2 path */ @@ -2148,6 +2154,43 @@ static void qedf_recv_frame(struct qedf_ctx *qedf, return; } + if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) { + QEDF_ERR(&(qedf->dbg_ctx), "FC frame d_id mismatch with MAC %pM.\n", + dest_mac); + return; + } + + if (qedf->ctlr.state) { + if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) { + QEDF_ERR(&(qedf->dbg_ctx), "Wrong source address mac:%pM dest_addr:%pM.\n", + mac, qedf->ctlr.dest_addr); + kfree_skb(skb); + return; + } + } + + vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); + + /* + * If the destination ID from the frame header does not match what we + * have on record for lport and the search for a NPIV port came up + * empty then this is not addressed to our port so simply drop it. + */ + if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { + QEDF_ERR(&(qedf->dbg_ctx), "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n", + lport->port_id, ntoh24(fh->fh_d_id)); + kfree_skb(skb); + return; + } + + f_ctl = ntoh24(fh->fh_f_ctl); + if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) && + (f_ctl & FC_FC_EX_CTX)) { + /* Drop incoming ABTS response that has both SEQ/EX CTX set */ + kfree_skb(skb); + return; + } + /* * If a connection is uploading, drop incoming FCoE frames as there * is a small window where we could try to return a frame while libfc -- cgit From 57a3548a0164299d548b534806e436e5544997ea Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:33:58 -0700 Subject: scsi: qedf: Fixup unnecessary parantheses around test_bit operations. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_els.c | 6 +++--- drivers/scsi/qedf/qedf_io.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c index 69a365da3891..eb07f1de8afa 100644 --- a/drivers/scsi/qedf/qedf_els.c +++ b/drivers/scsi/qedf/qedf_els.c @@ -44,7 +44,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, goto els_err; } - if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op); rc = -EINVAL; goto els_err; @@ -225,7 +225,7 @@ int qedf_send_rrq(struct qedf_ioreq *aborted_io_req) fcport = aborted_io_req->fcport; /* Check that fcport is still offloaded */ - if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); return -EINVAL; } @@ -550,7 +550,7 @@ static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl) fcport = orig_io_req->fcport; /* Check that fcport is still offloaded */ - if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); return -EINVAL; } diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index db160046f3e0..ea37c78418d7 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -1847,7 +1847,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, return FAILED; } - if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n"); rc = FAILED; return FAILED; -- cgit From f7e8d57be143523189543dfaf183af92b4c85204 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:33:59 -0700 Subject: scsi: qedf: Move some prints to a debug level so they do not print when no debugging is enabled. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index b7d17e177cf0..4135354acea8 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -442,7 +442,8 @@ static void qedf_link_update(void *dev, struct qed_link_output *link) qedf_update_link_speed(qedf, link); if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) { - QEDF_ERR(&(qedf->dbg_ctx), "DCBx done.\n"); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "DCBx done.\n"); if (atomic_read(&qedf->link_down_tmo_valid) > 0) queue_delayed_work(qedf->link_update_wq, &qedf->link_recovery, 0); @@ -2155,14 +2156,15 @@ static void qedf_recv_frame(struct qedf_ctx *qedf, } if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) { - QEDF_ERR(&(qedf->dbg_ctx), "FC frame d_id mismatch with MAC %pM.\n", - dest_mac); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "FC frame d_id mismatch with MAC %pM.\n", dest_mac); return; } if (qedf->ctlr.state) { if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) { - QEDF_ERR(&(qedf->dbg_ctx), "Wrong source address mac:%pM dest_addr:%pM.\n", + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "Wrong source address: mac:%pM dest_addr:%pM.\n", mac, qedf->ctlr.dest_addr); kfree_skb(skb); return; @@ -2177,7 +2179,8 @@ static void qedf_recv_frame(struct qedf_ctx *qedf, * empty then this is not addressed to our port so simply drop it. */ if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { - QEDF_ERR(&(qedf->dbg_ctx), "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n", + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n", lport->port_id, ntoh24(fh->fh_d_id)); kfree_skb(skb); return; -- cgit From a7746f1e01d05066135a3c3ac7e8a857cab68ba7 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:34:00 -0700 Subject: scsi: qedf: Change cmd_per_lun in scsi_host_template to 32 to increase performance. Increase the default number of commands that the driver tells the SCSI mid-layer it can do to increase the default performance of the driver. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 4135354acea8..ed4bca1231a2 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -708,7 +708,7 @@ static struct scsi_host_template qedf_host_template = { .module = THIS_MODULE, .name = QEDF_MODULE_NAME, .this_id = -1, - .cmd_per_lun = 3, + .cmd_per_lun = 32, .use_clustering = ENABLE_CLUSTERING, .max_sectors = 0xffff, .queuecommand = qedf_queuecommand, -- cgit From 6088cfaafba205139f57aaffd8480c2f8b141dca Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:34:01 -0700 Subject: scsi: qedf: Add change_queue_depth member to scsi_host_template(). Add the change_queue_depth member to our SCSI host template so the queue depth of devices attached to qedf can be changed dynamically. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index ed4bca1231a2..5842676e3b71 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -722,6 +722,7 @@ static struct scsi_host_template qedf_host_template = { .dma_boundary = QED_HW_DMA_BOUNDARY, .sg_tablesize = QEDF_MAX_BDS_PER_CMD, .can_queue = FCOE_PARAMS_NUM_TASKS, + .change_queue_depth = scsi_change_queue_depth, }; static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen) -- cgit From e2e2c7da4b7b1c45479483e17bd8fbad628ffad1 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 31 May 2017 06:34:02 -0700 Subject: scsi: qedf: Update version number to 8.18.22.0. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_version.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h index d46c487a6e73..6fa442061c32 100644 --- a/drivers/scsi/qedf/qedf_version.h +++ b/drivers/scsi/qedf/qedf_version.h @@ -7,9 +7,9 @@ * this source tree. */ -#define QEDF_VERSION "8.10.7.0" +#define QEDF_VERSION "8.18.22.0" #define QEDF_DRIVER_MAJOR_VER 8 -#define QEDF_DRIVER_MINOR_VER 10 -#define QEDF_DRIVER_REV_VER 7 +#define QEDF_DRIVER_MINOR_VER 18 +#define QEDF_DRIVER_REV_VER 22 #define QEDF_DRIVER_ENG_VER 0 -- cgit From 3c4810ffdc8e4f34d387f59baf0abefcfa4ada6a Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:11:53 -0700 Subject: scsi: qla2xxx: Allow ABTS, PURX, RIDA on ATIOQ for ISP83XX/27XX Driver added mechanism to move ABTS/PUREX/RIDA mailbox to ATIO queue as part of commit id 41dc529a4602ac737020f423f84686a81de38e6d ("qla2xxx: Improve RSCN handling in driver"). This patch adds a check to only allow ABTS/PURX/RIDA to be moved to ATIO Queue for ISP83XX and ISP27XX. Cc: # 4.11 Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_init.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 0391fc317003..f6130e8b1ca1 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2946,7 +2946,8 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) } /* Move PUREX, ABTS RX & RIDA to ATIOQ */ - if (ql2xmvasynctoatio) { + if (ql2xmvasynctoatio && + (IS_QLA83XX(ha) || IS_QLA27XX(ha))) { if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) ha->fw_options[2] |= BIT_11; @@ -2958,7 +2959,9 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", __func__, ha->fw_options[1], ha->fw_options[2], ha->fw_options[3], vha->host->active_mode); - qla2x00_set_fw_options(vha, ha->fw_options); + + if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) + qla2x00_set_fw_options(vha, ha->fw_options); /* Update Serial Link options. */ if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) -- cgit From 8b631d87ba55f43dbd29b9e8b477301539c08815 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:11:54 -0700 Subject: scsi: qla2xxx: Replace usage of spin_lock with spin_lock_irqsave Convert usage of spin_lock to spin_lock_irqsave because qla2xxx driver can access all the data structures in an interrupt context. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_target.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index e766d8412384..a2e17a5794ab 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1762,13 +1762,13 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) { struct qla_tgt_sess_op *op; struct qla_tgt_cmd *cmd; + unsigned long flags; - spin_lock(&vha->cmd_list_lock); - + spin_lock_irqsave(&vha->cmd_list_lock, flags); list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { if (tag == op->atio.u.isp24.exchange_addr) { op->aborted = true; - spin_unlock(&vha->cmd_list_lock); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); return 1; } } @@ -1776,7 +1776,7 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { if (tag == op->atio.u.isp24.exchange_addr) { op->aborted = true; - spin_unlock(&vha->cmd_list_lock); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); return 1; } } @@ -1784,12 +1784,12 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { if (tag == cmd->atio.u.isp24.exchange_addr) { cmd->aborted = 1; - spin_unlock(&vha->cmd_list_lock); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); return 1; } } + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); - spin_unlock(&vha->cmd_list_lock); return 0; } @@ -1804,9 +1804,10 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, struct qla_tgt_sess_op *op; struct qla_tgt_cmd *cmd; uint32_t key; + unsigned long flags; key = sid_to_key(s_id); - spin_lock(&vha->cmd_list_lock); + spin_lock_irqsave(&vha->cmd_list_lock, flags); list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { uint32_t op_key; uint32_t op_lun; @@ -1839,7 +1840,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, if (cmd_key == key && cmd_lun == lun) cmd->aborted = 1; } - spin_unlock(&vha->cmd_list_lock); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); } /* ha->hardware_lock supposed to be held on entry */ @@ -4216,9 +4217,9 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, memcpy(&op->atio, atio, sizeof(*atio)); op->vha = vha; - spin_lock(&vha->cmd_list_lock); + spin_lock_irqsave(&vha->cmd_list_lock, flags); list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list); - spin_unlock(&vha->cmd_list_lock); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); INIT_WORK(&op->work, qlt_create_sess_from_atio); queue_work(qla_tgt_wq, &op->work); @@ -4529,12 +4530,13 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) struct qla_tgt_cmd *cmd; uint32_t key; int count = 0; + unsigned long flags; key = (((u32)s_id->b.domain << 16) | ((u32)s_id->b.area << 8) | ((u32)s_id->b.al_pa)); - spin_lock(&vha->cmd_list_lock); + spin_lock_irqsave(&vha->cmd_list_lock, flags); list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); @@ -4559,7 +4561,7 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) count++; } } - spin_unlock(&vha->cmd_list_lock); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); return count; } -- cgit From 383a298b2065a6f39f4ab2c33d84620ccad578f9 Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Fri, 2 Jun 2017 09:11:55 -0700 Subject: scsi: qla2xxx: Retain loop test for fwdump length exceeding buffer length Signed-off-by: Joe Carnuccio Signed-off-by: Himanshu Madhani Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_init.c | 8 ++++---- drivers/scsi/qla2xxx/qla_tmpl.c | 16 +++++++++------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f6130e8b1ca1..e4876f4220e4 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -6356,8 +6356,8 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, "-> template size %x bytes\n", dlen); if (dlen > risc_size * sizeof(*dcode)) { ql_log(ql_log_warn, vha, 0x0167, - "Failed fwdump template exceeds array by %x bytes\n", - (uint32_t)(dlen - risc_size * sizeof(*dcode))); + "Failed fwdump template exceeds array by %lx bytes\n", + (size_t)(dlen - risc_size * sizeof(*dcode))); goto default_template; } ha->fw_dump_template_len = dlen; @@ -6658,8 +6658,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) "-> template size %x bytes\n", dlen); if (dlen > risc_size * sizeof(*fwcode)) { ql_log(ql_log_warn, vha, 0x0177, - "Failed fwdump template exceeds array by %x bytes\n", - (uint32_t)(dlen - risc_size * sizeof(*fwcode))); + "Failed fwdump template exceeds array by %lx bytes\n", + (size_t)(dlen - risc_size * sizeof(*fwcode))); goto default_template; } ha->fw_dump_template_len = dlen; diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index c197972a3e2d..33142610882f 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -219,8 +219,6 @@ qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf) { if (buf) ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY; - ql_dbg(ql_dbg_misc + ql_dbg_verbose, NULL, 0xd011, - "Skipping entry %d\n", ent->hdr.entry_type); } static int @@ -818,6 +816,8 @@ qla27xx_walk_template(struct scsi_qla_host *vha, ql_dbg(ql_dbg_misc, vha, 0xd01a, "%s: entry count %lx\n", __func__, count); while (count--) { + if (buf && *len >= vha->hw->fw_dump_len) + break; if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len)) break; ent = qla27xx_next_entry(ent); @@ -825,18 +825,20 @@ qla27xx_walk_template(struct scsi_qla_host *vha, if (count) ql_dbg(ql_dbg_misc, vha, 0xd018, - "%s: residual count (%lx)\n", __func__, count); + "%s: entry residual count (%lx)\n", __func__, count); if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END) ql_dbg(ql_dbg_misc, vha, 0xd019, - "%s: missing end (%lx)\n", __func__, count); + "%s: missing end entry (%lx)\n", __func__, count); - ql_dbg(ql_dbg_misc, vha, 0xd01b, - "%s: len=%lx\n", __func__, *len); + if (buf && *len != vha->hw->fw_dump_len) + ql_dbg(ql_dbg_misc, vha, 0xd01b, + "%s: length=%#lx residual=%+ld\n", + __func__, *len, vha->hw->fw_dump_len - *len); if (buf) { ql_log(ql_log_warn, vha, 0xd015, - "Firmware dump saved to temp buffer (%ld/%p)\n", + "Firmware dump saved to temp buffer (%lu/%p)\n", vha->host_no, vha->hw->fw_dump); qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); } -- cgit From 66ee0fece9a0b91f59a7eb8dd39808a0ef5db02b Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:11:56 -0700 Subject: scsi: qla2xxx: Fix path recovery If the port is moved/changed, current code would trigger a deletion. If the port is already deleted, then do relogin. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_gs.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 9bc9aa9e164a..5acebaf57796 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3118,16 +3118,27 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) if (fcport) { /* cable moved. just plugged in */ - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post del sess\n", - __func__, __LINE__, fcport->port_name); - fcport->rscn_gen++; fcport->d_id = ea->id; fcport->scan_state = QLA_FCPORT_FOUND; fcport->flags |= FCF_FABRIC_DEVICE; - qlt_schedule_sess_for_deletion_lock(fcport); + switch (fcport->disc_state) { + case DSC_DELETED: + ql_dbg(ql_dbg_disc, vha, 0x210d, + "%s %d %8phC login\n", __func__, __LINE__, + fcport->port_name); + qla24xx_fcport_handle_login(vha, fcport); + break; + case DSC_DELETE_PEND: + break; + default: + ql_dbg(ql_dbg_disc, vha, 0x2064, + "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion_lock(fcport); + break; + } } else { /* create new fcport */ ql_dbg(ql_dbg_disc, vha, 0xffff, -- cgit From 694833ee00c4c56b3fc902515b7685a4186fe502 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:11:57 -0700 Subject: scsi: tcm_qla2xxx: Do not allow aborted cmd to advance. In case of hardware queue full, commands can loop between TCM stack and tcm_qla2xx shim layers for retry. While command is waiting for retry, task mgmt can get ahead and abort the cmmand that encountered queue full condition. Fix this by dropping the command, if task mgmt has already started the command free process. Acked-by: Nicholas Bellinger Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/tcm_qla2xxx.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 7443e4efa3ae..1131fe8e2dd2 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -686,6 +686,19 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) struct qla_tgt_cmd, se_cmd); int xmit_type = QLA_TGT_XMIT_STATUS; + if (cmd->aborted) { + /* + * Cmd can loop during Q-full. tcm_qla2xxx_aborted_task + * can get ahead of this cmd. tcm_qla2xxx_aborted_task + * already kick start the free. + */ + pr_debug( + "queue_data_in aborted cmd[%p] refcount %d transport_state %x, t_state %x, se_cmd_flags %x\n", + cmd, kref_read(&cmd->se_cmd.cmd_kref), + cmd->se_cmd.transport_state, cmd->se_cmd.t_state, + cmd->se_cmd.se_cmd_flags); + return 0; + } cmd->bufflen = se_cmd->data_length; cmd->sg = NULL; cmd->sg_cnt = 0; -- cgit From ba1758919906b1f04f239532e03b9fc5b49f3aa1 Mon Sep 17 00:00:00 2001 From: Sawan Chandak Date: Fri, 2 Jun 2017 09:11:58 -0700 Subject: scsi: qla2xxx: Use flag PFLG_DISCONNECTED. There is already flag defined PFLG_DISCONNECTED, which is set for PCI or register disconnect error condition. There is no need to have flag PCI_ERR, which has same purpose. Remove use of PCI_ERR flag and use PFLG_DISCONNECTED flag during error condition. Signed-off-by: Sawan Chandak Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 1 - drivers/scsi/qla2xxx/qla_mbx.c | 5 ++--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index eddbc1218a39..4127f35b669c 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -4017,7 +4017,6 @@ typedef struct scsi_qla_host { #define PFLG_DISCONNECTED 0 /* PCI device removed */ #define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */ #define PFLG_DRIVER_PROBING 2 /* PCI driver .probe */ -#define PCI_ERR 30 uint32_t device_flags; #define SWITCH_FOUND BIT_0 diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index cba1fc5e8be9..fffa1f7cd8d2 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -124,7 +124,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) } /* if PCI error, then avoid mbx processing.*/ - if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { + if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && + test_bit(UNLOADING, &base_vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x1191, "PCI error, exiting.\n"); return QLA_FUNCTION_TIMEOUT; @@ -384,8 +385,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) * then only PCI ERR flag would be set. * we will do premature exit for above case. */ - if (test_bit(UNLOADING, &base_vha->dpc_flags)) - set_bit(PCI_ERR, &base_vha->dpc_flags); ha->flags.mbox_busy = 0; rval = QLA_FUNCTION_TIMEOUT; goto premature_exit; -- cgit From f775bd14e44d23b1761ecdac637164654680111e Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:11:59 -0700 Subject: scsi: qla2xxx: Convert 32-bit LUN usage to 64-bit Acked-by: Nicholas Bellinger Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_target.c | 32 +++++++++++++++----------------- drivers/scsi/qla2xxx/qla_target.h | 4 ++-- drivers/scsi/qla2xxx/tcm_qla2xxx.c | 2 +- 3 files changed, 18 insertions(+), 20 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index a2e17a5794ab..a1f33b06019d 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1799,7 +1799,7 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) * for the same lun) */ static void abort_cmds_for_lun(struct scsi_qla_host *vha, - uint32_t lun, uint8_t *s_id) + u64 lun, uint8_t *s_id) { struct qla_tgt_sess_op *op; struct qla_tgt_cmd *cmd; @@ -1810,7 +1810,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, spin_lock_irqsave(&vha->cmd_list_lock, flags); list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { uint32_t op_key; - uint32_t op_lun; + u64 op_lun; op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); op_lun = scsilun_to_int( @@ -1832,7 +1832,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { uint32_t cmd_key; - uint32_t cmd_lun; + u64 cmd_lun; cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); cmd_lun = scsilun_to_int( @@ -1850,18 +1850,15 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct qla_hw_data *ha = vha->hw; struct se_session *se_sess = sess->se_sess; struct qla_tgt_mgmt_cmd *mcmd; + struct qla_tgt_cmd *cmd; struct se_cmd *se_cmd; - u32 lun = 0; int rc; bool found_lun = false; unsigned long flags; spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { - struct qla_tgt_cmd *cmd = - container_of(se_cmd, struct qla_tgt_cmd, se_cmd); if (se_cmd->tag == abts->exchange_addr_to_abort) { - lun = cmd->unpacked_lun; found_lun = true; break; } @@ -1895,12 +1892,13 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, } memset(mcmd, 0, sizeof(*mcmd)); + cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); mcmd->sess = sess; memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); mcmd->reset_count = vha->hw->chip_reset; mcmd->tmr_func = QLA_TGT_ABTS; - rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, cmd->unpacked_lun, mcmd->tmr_func, abts->exchange_addr_to_abort); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, @@ -4334,13 +4332,12 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt; struct fc_port *sess; - uint32_t lun, unpacked_lun; + u64 unpacked_lun; int fn; unsigned long flags; tgt = vha->vha_tgt.qla_tgt; - lun = a->u.isp24.fcp_cmnd.lun; fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; spin_lock_irqsave(&ha->tgt.sess_lock, flags); @@ -4348,7 +4345,8 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) a->u.isp24.fcp_hdr.s_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); + unpacked_lun = + scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); if (!sess) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024, @@ -4371,7 +4369,7 @@ static int __qlt_abort_task(struct scsi_qla_host *vha, struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct qla_hw_data *ha = vha->hw; struct qla_tgt_mgmt_cmd *mcmd; - uint32_t lun, unpacked_lun; + u64 unpacked_lun; int rc; mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); @@ -4387,8 +4385,8 @@ static int __qlt_abort_task(struct scsi_qla_host *vha, memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, sizeof(mcmd->orig_iocb.imm_ntfy)); - lun = a->u.isp24.fcp_cmnd.lun; - unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); + unpacked_lun = + scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); mcmd->reset_count = vha->hw->chip_reset; mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; @@ -5877,7 +5875,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, unsigned long flags; uint8_t *s_id = NULL; /* to hide compiler warnings */ int rc; - uint32_t lun, unpacked_lun; + u64 unpacked_lun; int fn; void *iocb; @@ -5913,9 +5911,9 @@ static void qlt_tmr_work(struct qla_tgt *tgt, } iocb = a; - lun = a->u.isp24.fcp_cmnd.lun; fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; - unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); + unpacked_lun = + scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); ha->tgt.tgt_ops->put_sess(sess); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index dae278859554..25ea90b8f6c9 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -675,7 +675,7 @@ struct qla_tgt_func_tmpl { int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, unsigned char *, uint32_t, int, int, int); void (*handle_data)(struct qla_tgt_cmd *); - int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t, + int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t, uint32_t); void (*free_cmd)(struct qla_tgt_cmd *); void (*free_mcmd)(struct qla_tgt_mgmt_cmd *); @@ -884,7 +884,7 @@ struct qla_tgt_cmd { int sg_cnt; /* SG segments count */ int bufflen; /* cmd buffer length */ int offset; - uint32_t unpacked_lun; + u64 unpacked_lun; enum dma_data_direction dma_data_direction; uint32_t reset_count; diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 1131fe8e2dd2..238abad4b481 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -595,7 +595,7 @@ static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd, /* * Called from qla_target.c:qlt_issue_task_mgmt() */ -static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, +static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun, uint16_t tmr_func, uint32_t tag) { struct fc_port *sess = mcmd->sess; -- cgit From b98ae0d748dbc80016c5cc2e926f33648d83353d Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:12:00 -0700 Subject: scsi: qla2xxx: Fix name server relogin Name server login is normally handle by FW. In some rare case where one of the switches is being updated, name server login could get affected. Trigger relogin to name server when driver detects this condition. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 2 ++ drivers/scsi/qla2xxx/qla_gs.c | 20 ++++++++++++++++++++ drivers/scsi/qla2xxx/qla_init.c | 38 +++++++++++++++++++++++++++++++++++++- drivers/scsi/qla2xxx/qla_isr.c | 17 +++++++++++++++++ 4 files changed, 76 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 4127f35b669c..51b262b236b4 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -252,6 +252,8 @@ #define NPH_F_PORT 0x7fe /* FFFFFE */ #define NPH_IP_BROADCAST 0x7ff /* FFFFFF */ +#define NPH_SNS_LID(ha) (IS_FWI2_CAPABLE(ha) ? NPH_SNS : SIMPLE_NAME_SERVER) + #define MAX_CMDSZ 16 /* SCSI maximum CDB size. */ #include "qla_fw.h" diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 5acebaf57796..ef8e8891d54f 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -124,6 +124,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, int rval; uint16_t comp_status; struct qla_hw_data *ha = vha->hw; + bool lid_is_sns = false; rval = QLA_FUNCTION_FAILED; if (ms_pkt->entry_status != 0) { @@ -155,6 +156,25 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, } else rval = QLA_SUCCESS; break; + case CS_PORT_LOGGED_OUT: + if (IS_FWI2_CAPABLE(ha)) { + if (le16_to_cpu(ms_pkt->loop_id.extended) == + NPH_SNS) + lid_is_sns = true; + } else { + if (le16_to_cpu(ms_pkt->loop_id.extended) == + SIMPLE_NAME_SERVER) + lid_is_sns = true; + } + if (lid_is_sns) { + ql_dbg(ql_dbg_async, vha, 0x502b, + "%s failed, Name server has logged out", + routine); + rval = QLA_NOT_LOGGED_IN; + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + } + break; default: ql_dbg(ql_dbg_disc, vha, 0x2033, "%s failed, completion status (%x) on port_id: " diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index e4876f4220e4..4ea4aa5bddaa 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1039,6 +1039,20 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) uint32_t id = 0, mask, rid; int rc; + switch (ea->event) { + case FCME_RELOGIN: + case FCME_RSCN: + case FCME_GIDPN_DONE: + case FCME_GPSC_DONE: + case FCME_GPNID_DONE: + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || + test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) + return; + break; + default: + break; + } + switch (ea->event) { case FCME_RELOGIN: if (test_bit(UNLOADING, &vha->dpc_flags)) @@ -4451,20 +4465,31 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x2045, "Register FC-4 TYPE failed.\n"); + if (test_bit(LOOP_RESYNC_NEEDED, + &vha->dpc_flags)) + break; } if (qla2x00_rff_id(vha)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x2049, "Register FC-4 Features failed.\n"); + if (test_bit(LOOP_RESYNC_NEEDED, + &vha->dpc_flags)) + break; } if (qla2x00_rnn_id(vha)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x204f, "Register Node Name failed.\n"); + if (test_bit(LOOP_RESYNC_NEEDED, + &vha->dpc_flags)) + break; } else if (qla2x00_rsnn_nn(vha)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x2053, "Register Symobilic Node Name failed.\n"); + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; } } @@ -4536,17 +4561,28 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { swl = NULL; + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + return rval; } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { swl = NULL; + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + return rval; } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { swl = NULL; + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + return rval; } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { swl = NULL; + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + return rval; } /* If other queries succeeded probe for FC-4 type */ - if (swl) + if (swl) { qla2x00_gff_id(vha, swl); + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + return rval; + } } swl_idx = 0; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 2572121b765b..1eb46eb45005 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -973,6 +973,23 @@ skip_rio: if (mb[1] == 0xffff) goto global_port_update; + if (mb[1] == NPH_SNS_LID(ha)) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + break; + } + + /* use handle_cnt for loop id/nport handle */ + if (IS_FWI2_CAPABLE(ha)) + handle_cnt = NPH_SNS; + else + handle_cnt = SIMPLE_NAME_SERVER; + if (mb[1] == handle_cnt) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + break; + } + /* Port logout */ fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); if (!fcport) -- cgit From 83548fe2fcbb78a233e8156feff4e167f1d0831e Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:12:01 -0700 Subject: scsi: qla2xxx: Cleanup debug message IDs Assign unique id to all traces and logs for debug purpose. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_attr.c | 2 +- drivers/scsi/qla2xxx/qla_bsg.c | 2 +- drivers/scsi/qla2xxx/qla_dbg.c | 2 +- drivers/scsi/qla2xxx/qla_dfs.c | 8 +- drivers/scsi/qla2xxx/qla_gs.c | 100 +++++++------ drivers/scsi/qla2xxx/qla_init.c | 290 +++++++++++++++++++------------------- drivers/scsi/qla2xxx/qla_isr.c | 18 +-- drivers/scsi/qla2xxx/qla_mbx.c | 52 +++---- drivers/scsi/qla2xxx/qla_os.c | 30 ++-- drivers/scsi/qla2xxx/qla_target.c | 217 ++++++++++++++-------------- 10 files changed, 352 insertions(+), 369 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 7c8d6c54ab70..a93eb42718e5 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -769,7 +769,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj, did.b.area = (type & 0x0000ff00) >> 8; did.b.al_pa = (type & 0x000000ff); - ql_log(ql_log_info, vha, 0x70e3, "portid=%02x%02x%02x done\n", + ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n", did.b.domain, did.b.area, did.b.al_pa); ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index ca3420de5a01..e093795a0371 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -2135,7 +2135,7 @@ qla8044_serdes_op(struct bsg_job *bsg_job) bsg_reply->reply_payload_rcv_len = sizeof(sr); break; default: - ql_dbg(ql_dbg_user, vha, 0x70cf, + ql_dbg(ql_dbg_user, vha, 0x7020, "Unknown serdes cmd %x.\n", sr.cmd); rval = -EINVAL; break; diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 88748a6ab73f..11e097e123bd 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -62,7 +62,7 @@ * | Misc | 0xd301 | 0xd031-0xd0ff | * | | | 0xd101-0xd1fe | * | | | 0xd214-0xd2fe | - * | Target Mode | 0xe080 | | + * | Target Mode | 0xe081 | | * | Target Mode Management | 0xf09b | 0xf002 | * | | | 0xf046-0xf049 | * | Target Mode Task Management | 0x1000d | | diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 989e17b0758c..391c50be2297 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -70,7 +70,7 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) qla2x00_gid_list_size(ha), &gid_list_dma, GFP_KERNEL); if (!gid_list) { - ql_dbg(ql_dbg_user, vha, 0x705c, + ql_dbg(ql_dbg_user, vha, 0x7018, "DMA allocation failed for %u\n", qla2x00_gid_list_size(ha)); return 0; @@ -370,7 +370,7 @@ create_nodes: ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database", S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops); if (!ha->tgt.dfs_tgt_port_database) { - ql_log(ql_log_warn, vha, 0xffff, + ql_log(ql_log_warn, vha, 0xd03f, "Unable to create debugFS tgt_port_database node.\n"); goto out; } @@ -386,8 +386,8 @@ create_nodes: ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess", S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops); if (!ha->tgt.dfs_tgt_sess) { - ql_log(ql_log_warn, vha, 0xffff, - "Unable to create debugFS tgt_sess node.\n"); + ql_log(ql_log_warn, vha, 0xd040, + "Unable to create debugFS tgt_sess node.\n"); goto out; } diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index ef8e8891d54f..540fec524ccb 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -2024,7 +2024,7 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - ql_dbg(ql_dbg_disc, vha, 0x20b1, + ql_dbg(ql_dbg_disc, vha, 0x201b, "Vendor Identifier = %s.\n", eiter->a.vendor_identifier); /* Update MS request size. */ @@ -2236,7 +2236,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha) } size += 4 + 4; - ql_dbg(ql_dbg_disc, vha, 0x20bc, + ql_dbg(ql_dbg_disc, vha, 0x2017, "Current_Speed = %x.\n", eiter->a.cur_speed); /* Max frame size. */ @@ -2281,7 +2281,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - ql_dbg(ql_dbg_disc, vha, 0x203d, + ql_dbg(ql_dbg_disc, vha, 0x201a, "HostName=%s.\n", eiter->a.host_name); /* Node Name */ @@ -2388,13 +2388,13 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + 4); size += 4 + 4; - ql_dbg(ql_dbg_disc, vha, 0x20c8, + ql_dbg(ql_dbg_disc, vha, 0x201c, "Port Id = %x.\n", eiter->a.port_id); /* Update MS request size. */ qla2x00_update_ms_fdmi_iocb(vha, size + 16); - ql_dbg(ql_dbg_disc, vha, 0x203e, + ql_dbg(ql_dbg_disc, vha, 0x2018, "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca, entries, size); @@ -2767,13 +2767,13 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport = ea->fcport; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %8phC login state %d \n", - __func__, fcport->port_name, fcport->fw_login_state); + ql_dbg(ql_dbg_disc, vha, 0x201d, + "%s %8phC login state %d\n", + __func__, fcport->port_name, fcport->fw_login_state); if (ea->sp->gen2 != fcport->login_gen) { /* PLOGI/PRLI/LOGO came in while cmd was out.*/ - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x201e, "%s %8phC generation changed rscn %d|%d login %d|%d \n", __func__, fcport->port_name, fcport->last_rscn_gen, fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen); @@ -2797,7 +2797,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea) if (atomic_read(&fcport->state) == FCS_ONLINE) break; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x201f, "%s %d %8phC post gnl\n", __func__, __LINE__, fcport->port_name); qla24xx_post_gnl_work(vha, fcport); @@ -2806,14 +2806,14 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea) } else { /* fcport->d_id.b24 != ea->id.b24 */ fcport->d_id.b24 = ea->id.b24; if (fcport->deleted == QLA_SESS_DELETED) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x2021, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); qlt_schedule_sess_for_deletion_lock(fcport); } } } else { /* ea->sp->gen1 != fcport->rscn_gen */ - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x2022, "%s %d %8phC post gidpn\n", __func__, __LINE__, fcport->port_name); /* rscn came in while cmd was out */ @@ -2823,18 +2823,18 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea) /* cable pulled */ if (ea->sp->gen1 == fcport->rscn_gen) { if (ea->sp->gen2 == fcport->login_gen) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x2042, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); qlt_schedule_sess_for_deletion_lock(fcport); } else { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x2045, "%s %d %8phC login\n", __func__, __LINE__, fcport->port_name); qla24xx_fcport_handle_login(vha, fcport); } } else { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x2049, "%s %d %8phC post gidpn\n", __func__, __LINE__, fcport->port_name); qla24xx_post_gidpn_work(vha, fcport); @@ -2861,7 +2861,7 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res) ea.rc = res; ea.event = FCME_GIDPN_DONE; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x204f, "Async done-%s res %x, WWPN %8phC ID %3phC \n", sp->name, res, fcport->port_name, id); @@ -2917,11 +2917,11 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport) if (rval != QLA_SUCCESS) goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0x206f, - "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n", - sp->name, fcport->port_name, - sp->handle, fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa); + ql_dbg(ql_dbg_disc, vha, 0x20a4, + "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n", + sp->name, fcport->port_name, + sp->handle, fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); return rval; done_free_sp: @@ -2972,7 +2972,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x2053, "Async done-%s res %x, WWPN %8phC \n", sp->name, res, fcport->port_name); @@ -2985,10 +2985,9 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) if ((ct_rsp->header.reason_code == CT_REASON_INVALID_COMMAND_CODE) || (ct_rsp->header.reason_code == - CT_REASON_COMMAND_UNSUPPORTED)) { - ql_dbg(ql_dbg_disc, vha, 0x205a, - "GPSC command unsupported, disabling " - "query.\n"); + CT_REASON_COMMAND_UNSUPPORTED)) { + ql_dbg(ql_dbg_disc, vha, 0x2019, + "GPSC command unsupported, disabling query.\n"); ha->flags.gpsc_supported = 0; res = QLA_SUCCESS; } @@ -3017,12 +3016,11 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) break; } - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n", - sp->name, - fcport->fabric_port_name, - be16_to_cpu(ct_rsp->rsp.gpsc.speeds), - be16_to_cpu(ct_rsp->rsp.gpsc.speed)); + ql_dbg(ql_dbg_disc, vha, 0x2054, + "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n", + sp->name, fcport->fabric_port_name, + be16_to_cpu(ct_rsp->rsp.gpsc.speeds), + be16_to_cpu(ct_rsp->rsp.gpsc.speed)); } done: memset(&ea, 0, sizeof(ea)); @@ -3078,11 +3076,11 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) if (rval != QLA_SUCCESS) goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", - sp->name, fcport->port_name, sp->handle, - fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa); + ql_dbg(ql_dbg_disc, vha, 0x205e, + "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", + sp->name, fcport->port_name, sp->handle, + fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); return rval; done_free_sp: @@ -3161,9 +3159,9 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) } } else { /* create new fcport */ - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post new sess\n", - __func__, __LINE__, ea->port_name); + ql_dbg(ql_dbg_disc, vha, 0x2065, + "%s %d %8phC post new sess\n", + __func__, __LINE__, ea->port_name); qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL); } @@ -3180,10 +3178,10 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res) struct event_arg ea; struct qla_work_evt *e; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async done-%s res %x ID %3phC. %8phC\n", - sp->name, res, ct_req->req.port_id.port_id, - ct_rsp->rsp.gpn_id.port_name); + ql_dbg(ql_dbg_disc, vha, 0x2066, + "Async done-%s res %x ID %3phC. %8phC\n", + sp->name, res, ct_req->req.port_id.port_id, + ct_rsp->rsp.gpn_id.port_name); memset(&ea, 0, sizeof(ea)); memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); @@ -3245,8 +3243,8 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); if (!sp->u.iocb_cmd.u.ctarg.req) { - ql_log(ql_log_warn, vha, 0xffff, - "Failed to allocate ct_sns request.\n"); + ql_log(ql_log_warn, vha, 0xd041, + "Failed to allocate ct_sns request.\n"); goto done_free_sp; } @@ -3254,8 +3252,8 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); if (!sp->u.iocb_cmd.u.ctarg.rsp) { - ql_log(ql_log_warn, vha, 0xffff, - "Failed to allocate ct_sns request.\n"); + ql_log(ql_log_warn, vha, 0xd042, + "Failed to allocate ct_sns request.\n"); goto done_free_sp; } @@ -3282,9 +3280,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) if (rval != QLA_SUCCESS) goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s hdl=%x ID %3phC.\n", sp->name, - sp->handle, ct_req->req.port_id.port_id); + ql_dbg(ql_dbg_disc, vha, 0x2067, + "Async-%s hdl=%x ID %3phC.\n", sp->name, + sp->handle, ct_req->req.port_id.port_id); return rval; done_free_sp: diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 4ea4aa5bddaa..c425d061cd80 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -141,7 +141,7 @@ qla2x00_async_login_sp_done(void *ptr, int res) struct srb_iocb *lio = &sp->u.iocb_cmd; struct event_arg ea; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20dd, "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); sp->fcport->flags &= ~FCF_ASYNC_SENT; @@ -334,31 +334,31 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, if (ea->rc) { /* rval */ if (fcport->login_retry == 0) { fcport->login_retry = vha->hw->login_retry_count; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "GNL failed Port login retry %8phN, retry cnt=%d.\n", - fcport->port_name, fcport->login_retry); + ql_dbg(ql_dbg_disc, vha, 0x20de, + "GNL failed Port login retry %8phN, retry cnt=%d.\n", + fcport->port_name, fcport->login_retry); } return; } if (fcport->last_rscn_gen != fcport->rscn_gen) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20df, "%s %8phC rscn gen changed rscn %d|%d \n", __func__, fcport->port_name, fcport->last_rscn_gen, fcport->rscn_gen); qla24xx_post_gidpn_work(vha, fcport); return; } else if (fcport->last_login_gen != fcport->login_gen) { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %8phC login gen changed login %d|%d \n", - __func__, fcport->port_name, - fcport->last_login_gen, fcport->login_gen); + ql_dbg(ql_dbg_disc, vha, 0x20e0, + "%s %8phC login gen changed login %d|%d\n", + __func__, fcport->port_name, + fcport->last_login_gen, fcport->login_gen); return; } n = ea->data[0] / sizeof(struct get_name_list_extended); - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20e1, "%s %d %8phC n %d %02x%02x%02x lid %d \n", __func__, __LINE__, fcport->port_name, n, fcport->d_id.b.domain, fcport->d_id.b.area, @@ -380,20 +380,20 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, loop_id = le16_to_cpu(e->nport_handle); loop_id = (loop_id & 0x7fff); - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n", - __func__, fcport->port_name, - e->current_login_state, fcport->fw_login_state, - id.b.domain, id.b.area, id.b.al_pa, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa, loop_id, fcport->loop_id); + ql_dbg(ql_dbg_disc, vha, 0x20e2, + "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n", + __func__, fcport->port_name, + e->current_login_state, fcport->fw_login_state, + id.b.domain, id.b.area, id.b.al_pa, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, loop_id, fcport->loop_id); if ((id.b24 != fcport->d_id.b24) || ((fcport->loop_id != FC_NO_LOOP_ID) && (fcport->loop_id != loop_id))) { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post del sess\n", - __func__, __LINE__, fcport->port_name); + ql_dbg(ql_dbg_disc, vha, 0x20e3, + "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); qlt_schedule_sess_for_deletion(fcport, 1); return; } @@ -416,9 +416,9 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, switch (e->current_login_state) { case DSC_LS_PRLI_COMP: - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post gpdb\n", - __func__, __LINE__, fcport->port_name); + ql_dbg(ql_dbg_disc, vha, 0x20e4, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, fcport->port_name); opt = PDO_FORCE_ADISC; qla24xx_post_gpdb_work(vha, fcport, opt); break; @@ -429,9 +429,9 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, qla2x00_find_new_loop_id(vha, fcport); fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; } - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC \n", - __func__, __LINE__, fcport->port_name); + ql_dbg(ql_dbg_disc, vha, 0x20e5, + "%s %d %8phC\n", + __func__, __LINE__, fcport->port_name); qla24xx_fcport_handle_login(vha, fcport); break; } @@ -456,7 +456,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, qla2x00_find_fcport_by_wwpn(vha, e->port_name, 0); - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20e6, "%s %d %8phC post del sess\n", __func__, __LINE__, conflict_fcport->port_name); @@ -487,7 +487,7 @@ qla24xx_async_gnl_sp_done(void *s, int res) u64 wwn; struct list_head h; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20e7, "Async done-%s res %x mb[1]=%x mb[2]=%x \n", sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], sp->u.iocb_cmd.u.mbx.in_mb[2]); @@ -512,7 +512,7 @@ qla24xx_async_gnl_sp_done(void *s, int res) set_bit(loop_id, vha->hw->loop_id_map); wwn = wwn_to_u64(e->port_name); - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8, "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n", __func__, (void *)&wwn, e->port_id[2], e->port_id[1], e->port_id[0], e->current_login_state, e->last_login_state, @@ -551,7 +551,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) if (!vha->flags.online) goto done; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20d9, "Async-gnlist WWPN %8phC \n", fcport->port_name); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); @@ -598,9 +598,9 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) if (rval != QLA_SUCCESS) goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s - OUT WWPN %8phC hndl %x\n", - sp->name, fcport->port_name, sp->handle); + ql_dbg(ql_dbg_disc, vha, 0x20da, + "Async-%s - OUT WWPN %8phC hndl %x\n", + sp->name, fcport->port_name, sp->handle); return rval; @@ -635,7 +635,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) int rval = QLA_SUCCESS; struct event_arg ea; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20db, "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", sp->name, res, fcport->port_name, mb[1], mb[2]); @@ -701,8 +701,8 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { - ql_log(ql_log_warn, vha, 0xffff, - "Failed to allocate port database structure.\n"); + ql_log(ql_log_warn, vha, 0xd043, + "Failed to allocate port database structure.\n"); goto done_free_sp; } memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); @@ -734,9 +734,9 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) if (rval != QLA_SUCCESS) goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s %8phC hndl %x opt %x\n", - sp->name, fcport->port_name, sp->handle, opt); + ql_dbg(ql_dbg_disc, vha, 0x20dc, + "Async-%s %8phC hndl %x opt %x\n", + sp->name, fcport->port_name, sp->handle, opt); return rval; @@ -760,27 +760,27 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) fcport->flags &= ~FCF_ASYNC_SENT; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20d2, "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, rval); if (ea->sp->gen2 != fcport->login_gen) { /* target side must have changed it. */ - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20d3, "%s %8phC generation changed rscn %d|%d login %d|%d \n", __func__, fcport->port_name, fcport->last_rscn_gen, fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen); return; } else if (ea->sp->gen1 != fcport->rscn_gen) { - ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n", + ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n", __func__, __LINE__, fcport->port_name); qla24xx_post_gidpn_work(vha, fcport); return; } if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post del sess\n", + ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); qlt_schedule_sess_for_deletion_lock(fcport); return; @@ -797,14 +797,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20d6, "%s %d %8phC post upd_fcport fcp_cnt %d\n", __func__, __LINE__, fcport->port_name, vha->fcport_count); qla24xx_post_upd_fcport_work(vha, fcport); } else { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20d7, "%s %d %8phC post gpsc fcp_cnt %d\n", __func__, __LINE__, fcport->port_name, vha->fcport_count); @@ -823,7 +823,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) if (fcport->scan_state != QLA_FCPORT_FOUND) return 0; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20d8, "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, fcport->login_pause, fcport->flags, @@ -854,14 +854,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) switch (fcport->disc_state) { case DSC_DELETED: if (fcport->loop_id == FC_NO_LOOP_ID) { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post gnl\n", - __func__, __LINE__, fcport->port_name); + ql_dbg(ql_dbg_disc, vha, 0x20bd, + "%s %d %8phC post gnl\n", + __func__, __LINE__, fcport->port_name); qla24xx_async_gnl(vha, fcport); } else { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post login\n", - __func__, __LINE__, fcport->port_name); + ql_dbg(ql_dbg_disc, vha, 0x20bf, + "%s %d %8phC post login\n", + __func__, __LINE__, fcport->port_name); fcport->disc_state = DSC_LOGIN_PEND; qla2x00_post_async_login_work(vha, fcport, NULL); } @@ -878,16 +878,16 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) if (fcport->flags & FCF_FCP2_DEVICE) { u8 opt = PDO_FORCE_ADISC; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post gpdb\n", - __func__, __LINE__, fcport->port_name); + ql_dbg(ql_dbg_disc, vha, 0x20c9, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, fcport->port_name); fcport->disc_state = DSC_GPDB; qla24xx_post_gpdb_work(vha, fcport, opt); } else { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post login \n", - __func__, __LINE__, fcport->port_name); + ql_dbg(ql_dbg_disc, vha, 0x20cf, + "%s %d %8phC post login\n", + __func__, __LINE__, fcport->port_name); fcport->disc_state = DSC_LOGIN_PEND; qla2x00_post_async_login_work(vha, fcport, NULL); } @@ -895,18 +895,18 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) break; case DSC_LOGIN_FAILED: - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post gidpn \n", - __func__, __LINE__, fcport->port_name); + ql_dbg(ql_dbg_disc, vha, 0x20d0, + "%s %d %8phC post gidpn\n", + __func__, __LINE__, fcport->port_name); qla24xx_post_gidpn_work(vha, fcport); break; case DSC_LOGIN_COMPLETE: /* recheck login state */ - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post gpdb \n", - __func__, __LINE__, fcport->port_name); + ql_dbg(ql_dbg_disc, vha, 0x20d1, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, fcport->port_name); qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC); break; @@ -923,10 +923,10 @@ void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea) { fcport->rscn_gen++; - ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, - "%s %8phC DS %d LS %d\n", - __func__, fcport->port_name, fcport->disc_state, - fcport->fw_login_state); + ql_dbg(ql_dbg_disc, fcport->vha, 0x210c, + "%s %8phC DS %d LS %d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state); if (fcport->flags & FCF_ASYNC_SENT) return; @@ -993,14 +993,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, return; } - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", - __func__, fcport->port_name, fcport->disc_state, - fcport->fw_login_state, fcport->login_pause, - fcport->deleted, fcport->conflict, - fcport->last_rscn_gen, fcport->rscn_gen, - fcport->last_login_gen, fcport->login_gen, - fcport->flags); + ql_dbg(ql_dbg_disc, vha, 0x2102, + "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->login_pause, + fcport->deleted, fcport->conflict, + fcport->last_rscn_gen, fcport->rscn_gen, + fcport->last_login_gen, fcport->login_gen, + fcport->flags); if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || (fcport->fw_login_state == DSC_LS_PRLI_PEND)) @@ -1023,7 +1023,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, } if (fcport->last_rscn_gen != fcport->rscn_gen) { - ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n", + ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n", __func__, __LINE__, fcport->port_name); qla24xx_async_gidpn(vha, fcport); @@ -1070,10 +1070,10 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) /* cable moved */ rc = qla24xx_post_gpnid_work(vha, &ea->id); if (rc) { - ql_log(ql_log_warn, vha, 0xffff, - "RSCN GPNID work failed %02x%02x%02x\n", - ea->id.b.domain, ea->id.b.area, - ea->id.b.al_pa); + ql_log(ql_log_warn, vha, 0xd044, + "RSCN GPNID work failed %02x%02x%02x\n", + ea->id.b.domain, ea->id.b.area, + ea->id.b.al_pa); } } else { ea->fcport = fcport; @@ -1084,14 +1084,14 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) case RSCN_DOM_ADDR: if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) { mask = 0xffff00; - ql_log(ql_dbg_async, vha, 0xffff, - "RSCN: Area 0x%06x was affected\n", - ea->id.b24); + ql_dbg(ql_dbg_async, vha, 0x5044, + "RSCN: Area 0x%06x was affected\n", + ea->id.b24); } else { mask = 0xff0000; - ql_log(ql_dbg_async, vha, 0xffff, - "RSCN: Domain 0x%06x was affected\n", - ea->id.b24); + ql_dbg(ql_dbg_async, vha, 0x507a, + "RSCN: Domain 0x%06x was affected\n", + ea->id.b24); } rid = ea->id.b24 & mask; @@ -1106,9 +1106,9 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) break; case RSCN_FAB_ADDR: default: - ql_log(ql_log_warn, vha, 0xffff, - "RSCN: Fabric was affected. Addr format %d\n", - ea->id.b.rsvd_1); + ql_log(ql_log_warn, vha, 0xd045, + "RSCN: Fabric was affected. Addr format %d\n", + ea->id.b.rsvd_1); qla2x00_mark_all_devices_lost(vha, 1); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); @@ -1319,15 +1319,15 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) * force a relogin attempt via implicit LOGO, PLOGI, and PRLI * requests. */ - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post gpdb\n", - __func__, __LINE__, ea->fcport->port_name); + ql_dbg(ql_dbg_disc, vha, 0x20ea, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, ea->fcport->port_name); ea->fcport->chip_reset = vha->hw->chip_reset; ea->fcport->logout_on_delete = 1; qla24xx_post_gpdb_work(vha, ea->fcport, 0); break; case MBS_COMMAND_ERROR: - ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC cmd error %x\n", + ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", __func__, __LINE__, ea->fcport->port_name, ea->data[1]); ea->fcport->flags &= ~FCF_ASYNC_SENT; @@ -1344,10 +1344,10 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) cid.b.al_pa = ea->iop[1] & 0xff; cid.b.rsvd_1 = 0; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC LoopID 0x%x in use post gnl\n", - __func__, __LINE__, ea->fcport->port_name, - ea->fcport->loop_id); + ql_dbg(ql_dbg_disc, vha, 0x20ec, + "%s %d %8phC LoopID 0x%x in use post gnl\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->loop_id); if (IS_SW_RESV_ADDR(cid)) { set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); @@ -1358,11 +1358,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) qla24xx_post_gnl_work(vha, ea->fcport); break; case MBS_PORT_ID_USED: - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n", - __func__, __LINE__, ea->fcport->port_name, - ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, - ea->fcport->d_id.b.al_pa); + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, + ea->fcport->d_id.b.al_pa); qla2x00_clear_loop_id(ea->fcport); qla24xx_post_gidpn_work(vha, ea->fcport); @@ -2969,10 +2969,10 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) ha->fw_options[2] &= ~BIT_11; } - ql_dbg(ql_dbg_init, vha, 0xffff, - "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", - __func__, ha->fw_options[1], ha->fw_options[2], - ha->fw_options[3], vha->host->active_mode); + ql_dbg(ql_dbg_init, vha, 0x00e8, + "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", + __func__, ha->fw_options[1], ha->fw_options[2], + ha->fw_options[3], vha->host->active_mode); if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) qla2x00_set_fw_options(vha, ha->fw_options); @@ -3053,7 +3053,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha) icb->rid = cpu_to_le16(rid); if (ha->flags.msix_enabled) { msix = &ha->msix_entries[1]; - ql_dbg(ql_dbg_init, vha, 0x00fd, + ql_dbg(ql_dbg_init, vha, 0x0019, "Registering vector 0x%x for base que.\n", msix->entry); icb->msix = cpu_to_le16(msix->entry); @@ -3183,7 +3183,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) /* FA-WWPN Status */ ha->flags.fawwpn_enabled = (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0; - ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n", + ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); } @@ -3857,10 +3857,10 @@ qla2x00_rport_del(void *data) fcport->drport = NULL; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); if (rport) { - ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, - "%s %8phN. rport %p roles %x \n", - __func__, fcport->port_name, rport, - rport->roles); + ql_dbg(ql_dbg_disc, fcport->vha, 0x210b, + "%s %8phN. rport %p roles %x\n", + __func__, fcport->port_name, rport, + rport->roles); fc_remote_port_delete(rport); } @@ -3900,7 +3900,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) fcport->logout_on_delete = 1; if (!fcport->ct_desc.ct_sns) { - ql_log(ql_log_warn, vha, 0xffff, + ql_log(ql_log_warn, vha, 0xd049, "Failed to allocate ct_sns request.\n"); kfree(fcport); fcport = NULL; @@ -4002,7 +4002,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { if (LOOP_TRANSITION(vha)) { - ql_dbg(ql_dbg_disc, vha, 0x201e, + ql_dbg(ql_dbg_disc, vha, 0x2099, "Needs RSCN update and loop transition.\n"); rval = QLA_FUNCTION_FAILED; } @@ -4102,7 +4102,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) if (rval != QLA_SUCCESS) goto cleanup_allocation; - ql_dbg(ql_dbg_disc, vha, 0x2017, + ql_dbg(ql_dbg_disc, vha, 0x2011, "Entries in ID list (%d).\n", entries); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, (uint8_t *)ha->gid_list, @@ -4111,7 +4111,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) /* Allocate temporary fcport for any new fcports discovered. */ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { - ql_log(ql_log_warn, vha, 0x2018, + ql_log(ql_log_warn, vha, 0x2012, "Memory allocation failed for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; goto cleanup_allocation; @@ -4126,7 +4126,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) fcport->port_type != FCT_BROADCAST && (fcport->flags & FCF_FABRIC_DEVICE) == 0) { - ql_dbg(ql_dbg_disc, vha, 0x2019, + ql_dbg(ql_dbg_disc, vha, 0x2096, "Marking port lost loop_id=0x%04x.\n", fcport->loop_id); @@ -4171,11 +4171,11 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) rval2 = qla2x00_get_port_database(vha, new_fcport, 0); if (rval2 != QLA_SUCCESS) { - ql_dbg(ql_dbg_disc, vha, 0x201a, + ql_dbg(ql_dbg_disc, vha, 0x2097, "Failed to retrieve fcport information " "-- get_port_database=%x, loop_id=0x%04x.\n", rval2, new_fcport->loop_id); - ql_dbg(ql_dbg_disc, vha, 0x201b, + ql_dbg(ql_dbg_disc, vha, 0x2105, "Scheduling resync.\n"); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); continue; @@ -4224,7 +4224,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { - ql_log(ql_log_warn, vha, 0x201c, + ql_log(ql_log_warn, vha, 0xd031, "Failed to allocate memory for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; goto cleanup_allocation; @@ -4247,7 +4247,7 @@ cleanup_allocation: kfree(new_fcport); if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_disc, vha, 0x201d, + ql_dbg(ql_dbg_disc, vha, 0x2098, "Configure local loop error exit: rval=%x.\n", rval); } @@ -4317,10 +4317,10 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) if (fcport->port_type == FCT_TARGET) rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %8phN. rport %p is %s mode \n", - __func__, fcport->port_name, rport, - (fcport->port_type == FCT_TARGET) ? "tgt" : "ini"); + ql_dbg(ql_dbg_disc, vha, 0x20ee, + "%s %8phN. rport %p is %s mode\n", + __func__, fcport->port_name, rport, + (fcport->port_type == FCT_TARGET) ? "tgt" : "ini"); fc_remote_port_rolechg(rport, rport_ids.roles); } @@ -4348,7 +4348,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) if (IS_SW_RESV_ADDR(fcport->d_id)) return; - ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC \n", + ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", __func__, fcport->port_name); if (IS_QLAFX00(vha->hw)) { @@ -4415,7 +4415,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) loop_id = SNS_FL_PORT; rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_disc, vha, 0x201f, + ql_dbg(ql_dbg_disc, vha, 0x20a0, "MBX_GET_PORT_NAME failed, No FL Port.\n"); vha->device_flags &= ~SWITCH_FOUND; @@ -4453,7 +4453,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) return rval; } if (mb[0] != MBS_COMMAND_COMPLETE) { - ql_dbg(ql_dbg_disc, vha, 0x2042, + ql_dbg(ql_dbg_disc, vha, 0x20a1, "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]); @@ -4463,7 +4463,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { if (qla2x00_rft_id(vha)) { /* EMPTY */ - ql_dbg(ql_dbg_disc, vha, 0x2045, + ql_dbg(ql_dbg_disc, vha, 0x20a2, "Register FC-4 TYPE failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) @@ -4471,7 +4471,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) } if (qla2x00_rff_id(vha)) { /* EMPTY */ - ql_dbg(ql_dbg_disc, vha, 0x2049, + ql_dbg(ql_dbg_disc, vha, 0x209a, "Register FC-4 Features failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) @@ -4479,14 +4479,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) } if (qla2x00_rnn_id(vha)) { /* EMPTY */ - ql_dbg(ql_dbg_disc, vha, 0x204f, + ql_dbg(ql_dbg_disc, vha, 0x2104, "Register Node Name failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; } else if (qla2x00_rsnn_nn(vha)) { /* EMPTY */ - ql_dbg(ql_dbg_disc, vha, 0x2053, + ql_dbg(ql_dbg_disc, vha, 0x209b, "Register Symobilic Node Name failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; @@ -4555,7 +4555,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) swl = ha->swl; if (!swl) { /*EMPTY*/ - ql_dbg(ql_dbg_disc, vha, 0x2054, + ql_dbg(ql_dbg_disc, vha, 0x209c, "GID_PT allocations failed, fallback on GA_NXT.\n"); } else { memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); @@ -4589,7 +4589,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) /* Allocate temporary fcport for any new fcports discovered. */ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { - ql_log(ql_log_warn, vha, 0x205e, + ql_log(ql_log_warn, vha, 0x209d, "Failed to allocate memory for fcport.\n"); return (QLA_MEMORY_ALLOC_FAILED); } @@ -4636,7 +4636,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) /* Send GA_NXT to the switch */ rval = qla2x00_ga_nxt(vha, new_fcport); if (rval != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x2064, + ql_log(ql_log_warn, vha, 0x209e, "SNS scan failed -- assuming " "zero-entry result.\n"); rval = QLA_SUCCESS; @@ -4649,7 +4649,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) wrap.b24 = new_fcport->d_id.b24; first_dev = 0; } else if (new_fcport->d_id.b24 == wrap.b24) { - ql_dbg(ql_dbg_disc, vha, 0x2065, + ql_dbg(ql_dbg_disc, vha, 0x209f, "Device wrap (%02x%02x%02x).\n", new_fcport->d_id.b.domain, new_fcport->d_id.b.area, @@ -4761,7 +4761,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) nxt_d_id.b24 = new_fcport->d_id.b24; new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { - ql_log(ql_log_warn, vha, 0x2066, + ql_log(ql_log_warn, vha, 0xd032, "Memory allocation failed for fcport.\n"); return (QLA_MEMORY_ALLOC_FAILED); } @@ -4792,7 +4792,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) (fcport->flags & FCF_FCP2_DEVICE) == 0 && fcport->port_type != FCT_INITIATOR && fcport->port_type != FCT_BROADCAST) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20f0, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); @@ -7367,10 +7367,10 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha) ha->fw_options[2] |= BIT_9; } - ql_dbg(ql_dbg_init, vha, 0xffff, - "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", - __func__, ha->fw_options[1], ha->fw_options[2], - ha->fw_options[3], vha->host->active_mode); + ql_dbg(ql_dbg_init, vha, 0x00e9, + "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", + __func__, ha->fw_options[1], ha->fw_options[2], + ha->fw_options[3], vha->host->active_mode); qla2x00_set_fw_options(vha, ha->fw_options); } @@ -7599,7 +7599,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v if (msix->in_use) continue; qpair->msix = msix; - ql_log(ql_dbg_multiq, vha, 0xc00f, + ql_dbg(ql_dbg_multiq, vha, 0xc00f, "Vector %x selected for qpair\n", msix->vector); break; } @@ -7642,7 +7642,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); if (!qpair->srb_mempool) { - ql_log(ql_log_warn, vha, 0x0191, + ql_log(ql_log_warn, vha, 0xd036, "Failed to create srb mempool for qpair %d\n", qpair->id); goto fail_mempool; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 1eb46eb45005..2984abcc29e7 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -829,7 +829,7 @@ skip_rio: fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); ql_dbg(ql_dbg_init + ql_dbg_verbose, - vha, 0x0144, "LOOP DOWN detected," + vha, 0x00d8, "LOOP DOWN detected," "restore WWPN %016llx\n", wwn_to_u64(vha->port_name)); } @@ -1718,7 +1718,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, case LSC_SCODE_NOXCB: vha->hw->exch_starvation++; if (vha->hw->exch_starvation > 5) { - ql_log(ql_log_warn, vha, 0xffff, + ql_log(ql_log_warn, vha, 0xd046, "Exchange starvation. Resetting RISC\n"); vha->hw->exch_starvation = 0; @@ -2391,8 +2391,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { ql_dbg(ql_dbg_io, fcport->vha, 0x301a, - "Mid-layer underflow " - "detected (0x%x of 0x%x bytes).\n", + "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16; @@ -2425,8 +2424,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) if (scsi_status & SS_RESIDUAL_UNDER) { if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { ql_dbg(ql_dbg_io, fcport->vha, 0x301d, - "Dropped frame(s) detected " - "(0x%x of 0x%x bytes).\n", + "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16 | lscsi_status; @@ -2437,8 +2435,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { ql_dbg(ql_dbg_io, fcport->vha, 0x301e, - "Mid-layer underflow " - "detected (0x%x of 0x%x bytes).\n", + "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16; @@ -2452,9 +2449,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) */ ql_dbg(ql_dbg_io, fcport->vha, 0x301f, - "Dropped frame(s) detected (0x%x " - "of 0x%x bytes).\n", resid, - scsi_bufflen(cp)); + "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", + resid, scsi_bufflen(cp)); res = DID_ERROR << 16 | lscsi_status; goto check_scsi_status; diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index fffa1f7cd8d2..5e74600b99c2 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -126,7 +126,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) /* if PCI error, then avoid mbx processing.*/ if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && test_bit(UNLOADING, &base_vha->dpc_flags)) { - ql_log(ql_log_warn, vha, 0x1191, + ql_log(ql_log_warn, vha, 0xd04e, "PCI error, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } @@ -170,7 +170,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) */ if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { /* Timeout occurred. Return error. */ - ql_log(ql_log_warn, vha, 0x1005, + ql_log(ql_log_warn, vha, 0xd035, "Cmd access timeout, cmd=0x%x, Exiting.\n", mcp->mb[0]); return QLA_FUNCTION_TIMEOUT; @@ -318,7 +318,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) mcp->mb[0] = MBS_LINK_DOWN_ERROR; ha->mcp = NULL; rval = QLA_FUNCTION_FAILED; - ql_log(ql_log_warn, vha, 0x1015, + ql_log(ql_log_warn, vha, 0xd048, "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); goto premature_exit; } @@ -360,7 +360,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) host_status = RD_REG_DWORD(®->isp24.host_status); hccr = RD_REG_DWORD(®->isp24.hccr); - ql_log(ql_log_warn, vha, 0x1119, + ql_log(ql_log_warn, vha, 0xd04c, "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], @@ -3212,7 +3212,7 @@ qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) if (!IS_QLA8044(vha->hw)) return QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; @@ -3228,7 +3228,7 @@ qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_mbx, vha, 0x1187, + ql_dbg(ql_dbg_mbx, vha, 0x11a1, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, @@ -3712,12 +3712,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, set_bit(VP_DPC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else if (rptid_entry->format == 2) { - ql_dbg(ql_dbg_async, vha, 0xffff, + ql_dbg(ql_dbg_async, vha, 0x505f, "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); - ql_dbg(ql_dbg_async, vha, 0xffff, + ql_dbg(ql_dbg_async, vha, 0x5075, "N2N: Remote WWPN %8phC.\n", rptid_entry->u.f2.port_name); @@ -5789,7 +5789,7 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha, if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) return QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, "Entered %s.\n", __func__); dd_dma = dma_map_single(&vha->hw->pdev->dev, @@ -5871,13 +5871,13 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_mbx, vha, 0xffff, + ql_dbg(ql_dbg_mbx, vha, 0x1018, "%s: %s Failed submission. %x.\n", __func__, sp->name, rval); goto done_free_sp; } - ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n", + ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", sp->name, sp->handle); wait_for_completion(&c->u.mbx.comp); @@ -5886,16 +5886,16 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) rval = c->u.mbx.rc; switch (rval) { case QLA_FUNCTION_TIMEOUT: - ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n", + ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", __func__, sp->name, rval); break; case QLA_SUCCESS: - ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n", + ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", __func__, sp->name); sp->free(sp); break; default: - ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n", + ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", __func__, sp->name, rval); sp->free(sp); break; @@ -5926,8 +5926,8 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { - ql_log(ql_log_warn, vha, 0xffff, - "Failed to allocate port database structure.\n"); + ql_log(ql_log_warn, vha, 0xd047, + "Failed to allocate port database structure.\n"); goto done_free_sp; } memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); @@ -5944,14 +5944,14 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) rval = qla24xx_send_mb_cmd(vha, &mc); if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_mbx, vha, 0xffff, + ql_dbg(ql_dbg_mbx, vha, 0x1193, "%s: %8phC fail\n", __func__, fcport->port_name); goto done_free_sp; } rval = __qla24xx_parse_gpdb(vha, fcport, pd); - ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n", + ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", __func__, fcport->port_name); done_free_sp: @@ -5970,10 +5970,10 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, /* Check for logged in state. */ if (pd->current_login_state != PDS_PRLI_COMPLETE && pd->last_login_state != PDS_PRLI_COMPLETE) { - ql_dbg(ql_dbg_mbx, vha, 0xffff, - "Unable to verify login-state (%x/%x) for " - "loop_id %x.\n", pd->current_login_state, - pd->last_login_state, fcport->loop_id); + ql_dbg(ql_dbg_mbx, vha, 0x119a, + "Unable to verify login-state (%x/%x) for loop_id %x.\n", + pd->current_login_state, pd->last_login_state, + fcport->loop_id); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } @@ -6039,12 +6039,12 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha, rval = qla24xx_send_mb_cmd(vha, &mc); if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_mbx, vha, 0xffff, - "%s: fail\n", __func__); + ql_dbg(ql_dbg_mbx, vha, 0x119b, + "%s: fail\n", __func__); } else { *entries = mc.mb[1]; - ql_dbg(ql_dbg_mbx, vha, 0xffff, - "%s: done\n", __func__); + ql_dbg(ql_dbg_mbx, vha, 0x119c, + "%s: done\n", __func__); } done: return rval; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 79f050256c55..cca6acaed087 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -377,7 +377,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, } ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); if (ha->base_qpair == NULL) { - ql_log(ql_log_warn, vha, 0x0182, + ql_log(ql_log_warn, vha, 0x00e0, "Failed to allocate base queue pair memory.\n"); goto fail_base_qpair; } @@ -1062,9 +1062,9 @@ static inline int test_fcport_count(scsi_qla_host_t *vha) int res; spin_lock_irqsave(&ha->tgt.sess_lock, flags); - ql_dbg(ql_dbg_init, vha, 0xffff, - "tgt %p, fcport_count=%d\n", - vha, vha->fcport_count); + ql_dbg(ql_dbg_init, vha, 0x00ec, + "tgt %p, fcport_count=%d\n", + vha, vha->fcport_count); res = (vha->fcport_count == 0); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); @@ -1975,7 +1975,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha) /* Queue pairs is the max value minus * the base queue pair */ ha->max_qpairs = ha->max_req_queues - 1; - ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3, "Max no of queues pairs: %d.\n", ha->max_qpairs); } ql_log_pci(ql_log_info, ha->pdev, 0x011c, @@ -3601,10 +3601,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, } else { int now; if (rport) { - ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, - "%s %8phN. rport %p roles %x \n", - __func__, fcport->port_name, rport, - rport->roles); + ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, + "%s %8phN. rport %p roles %x\n", + __func__, fcport->port_name, rport, + rport->roles); fc_remote_port_delete(rport); } qlt_do_generation_tick(vha, &now); @@ -3649,7 +3649,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, if (fcport->login_retry == 0) { fcport->login_retry = vha->hw->login_retry_count; - ql_dbg(ql_dbg_disc, vha, 0x2067, + ql_dbg(ql_dbg_disc, vha, 0x20a3, "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", fcport->port_name, fcport->loop_id, fcport->login_retry); } @@ -3673,8 +3673,8 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) { fc_port_t *fcport; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Mark all dev lost\n"); + ql_dbg(ql_dbg_disc, vha, 0x20f1, + "Mark all dev lost\n"); list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->scan_state = 0; @@ -4016,7 +4016,7 @@ qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) /* Now configure the dma buffer */ rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma); if (rval) { - ql_log(ql_log_fatal, vha, 0x00cf, + ql_log(ql_log_fatal, vha, 0xd033, "Setup extended login buffer ****FAILED****.\n"); qla2x00_free_exlogin_buffer(ha); } @@ -4303,7 +4303,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); if (!vha->gnl.l) { - ql_log(ql_log_fatal, vha, 0xffff, + ql_log(ql_log_fatal, vha, 0xd04a, "Alloc failed for name list.\n"); scsi_remove_host(vha->host); return NULL; @@ -4620,7 +4620,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) { fcport->login_retry--; if (fcport->flags & FCF_FABRIC_DEVICE) { - ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + ql_dbg(ql_dbg_disc, fcport->vha, 0x2108, "%s %8phC DS %d LS %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index a1f33b06019d..4ad09584d4a8 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -200,8 +200,8 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, host = btree_lookup32(&vha->hw->tgt.host_map, key); if (!host) - ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, - "Unable to find host %06x\n", key); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, + "Unable to find host %06x\n", key); return host; } @@ -252,19 +252,15 @@ static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, unsigned long flags; if (tgt->tgt_stop) { - ql_dbg(ql_dbg_async, vha, 0xffff, - "qla_target(%d): dropping unknown ATIO_TYPE7, " - "because tgt is being stopped", vha->vp_idx); + ql_dbg(ql_dbg_async, vha, 0x502c, + "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped", + vha->vp_idx); goto out_term; } u = kzalloc(sizeof(*u), GFP_ATOMIC); - if (u == NULL) { - ql_dbg(ql_dbg_async, vha, 0xffff, - "Alloc of struct unknown_atio (size %zd) failed", sizeof(*u)); - /* It should be harmless and on the next retry should work well */ + if (u == NULL) goto out_term; - } u->vha = vha; memcpy(&u->atio, atio, sizeof(*atio)); @@ -295,8 +291,8 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { if (u->aborted) { - ql_dbg(ql_dbg_async, vha, 0xffff, - "Freeing unknown %s %p, because of Abort", + ql_dbg(ql_dbg_async, vha, 0x502e, + "Freeing unknown %s %p, because of Abort\n", "ATIO_TYPE7", u); qlt_send_term_exchange(vha, NULL, &u->atio, ha_locked, 0); @@ -305,19 +301,18 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); if (host != NULL) { - ql_dbg(ql_dbg_async, vha, 0xffff, - "Requeuing unknown ATIO_TYPE7 %p", u); + ql_dbg(ql_dbg_async, vha, 0x502f, + "Requeuing unknown ATIO_TYPE7 %p\n", u); qlt_24xx_atio_pkt(host, &u->atio, ha_locked); } else if (tgt->tgt_stop) { - ql_dbg(ql_dbg_async, vha, 0xffff, - "Freeing unknown %s %p, because tgt is being stopped", - "ATIO_TYPE7", u); + ql_dbg(ql_dbg_async, vha, 0x503a, + "Freeing unknown %s %p, because tgt is being stopped\n", + "ATIO_TYPE7", u); qlt_send_term_exchange(vha, NULL, &u->atio, ha_locked, 0); } else { - ql_dbg(ql_dbg_async, vha, 0xffff, - "u %p, vha %p, host %p, sched again..", u, - vha, host); + ql_dbg(ql_dbg_async, vha, 0x503d, + "Reschedule u %p, vha %p, host %p\n", u, vha, host); if (!queued) { queued = 1; schedule_delayed_work(&vha->unknown_atio_work, @@ -411,7 +406,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, unsigned long flags; if (unlikely(!host)) { - ql_dbg(ql_dbg_tgt, vha, 0xffff, + ql_dbg(ql_dbg_tgt, vha, 0xe00a, "qla_target(%d): Response pkt (ABTS_RECV_24XX) " "received, with unknown vp_index %d\n", vha->vp_idx, entry->vp_index); @@ -565,9 +560,9 @@ void qla2x00_async_nack_sp_done(void *s, int res) struct scsi_qla_host *vha = sp->vha; unsigned long flags; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async done-%s res %x %8phC type %d\n", - sp->name, res, sp->fcport->port_name, sp->type); + ql_dbg(ql_dbg_disc, vha, 0x20f2, + "Async done-%s res %x %8phC type %d\n", + sp->name, res, sp->fcport->port_name, sp->type); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); sp->fcport->flags &= ~FCF_ASYNC_SENT; @@ -593,19 +588,19 @@ void qla2x00_async_nack_sp_done(void *s, int res) if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post upd_fcport fcp_cnt %d\n", - __func__, __LINE__, - sp->fcport->port_name, - vha->fcport_count); + ql_dbg(ql_dbg_disc, vha, 0x20f3, + "%s %d %8phC post upd_fcport fcp_cnt %d\n", + __func__, __LINE__, + sp->fcport->port_name, + vha->fcport_count); qla24xx_post_upd_fcport_work(vha, sp->fcport); } else { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post gpsc fcp_cnt %d\n", - __func__, __LINE__, - sp->fcport->port_name, - vha->fcport_count); + ql_dbg(ql_dbg_disc, vha, 0x20f5, + "%s %d %8phC post gpsc fcp_cnt %d\n", + __func__, __LINE__, + sp->fcport->port_name, + vha->fcport_count); qla24xx_post_gpsc_work(vha, sp->fcport); } @@ -664,9 +659,9 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, if (rval != QLA_SUCCESS) goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s %8phC hndl %x %s\n", - sp->name, fcport->port_name, sp->handle, c); + ql_dbg(ql_dbg_disc, vha, 0x20f4, + "Async-%s %8phC hndl %x %s\n", + sp->name, fcport->port_name, sp->handle, c); return rval; @@ -688,7 +683,7 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) t = qlt_create_sess(vha, e->u.nack.fcport, 0); mutex_unlock(&vha->vha_tgt.tgt_mutex); if (t) { - ql_log(ql_log_info, vha, 0xffff, + ql_log(ql_log_info, vha, 0xd034, "%s create sess success %p", __func__, t); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* create sess has an extra kref */ @@ -757,7 +752,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) } if (!kref_get_unless_zero(&sess->sess_kref)) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x2107, "%s: kref_get fail sess %8phC \n", __func__, sess->port_name); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); @@ -1098,7 +1093,7 @@ void qlt_unreg_sess(struct fc_port *sess) { struct scsi_qla_host *vha = sess->vha; - ql_dbg(ql_dbg_disc, sess->vha, 0xffff, + ql_dbg(ql_dbg_disc, sess->vha, 0x210a, "%s sess %p for deletion %8phC\n", __func__, sess, sess->port_name); @@ -1288,7 +1283,7 @@ static struct fc_port *qlt_create_sess( if (fcport->se_sess) { if (!kref_get_unless_zero(&sess->sess_kref)) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20f6, "%s: kref_get_unless_zero failed for %8phC\n", __func__, sess->port_name); return NULL; @@ -1310,7 +1305,7 @@ static struct fc_port *qlt_create_sess( if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, &fcport->port_name[0], sess) < 0) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015, "(%d) %8phC check_initiator_node_acl failed\n", vha->vp_idx, fcport->port_name); return NULL; @@ -1321,7 +1316,7 @@ static struct fc_port *qlt_create_sess( * fc_port access across ->tgt.sess_lock reaquire. */ if (!kref_get_unless_zero(&sess->sess_kref)) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20f7, "%s: kref_get_unless_zero failed for %8phC\n", __func__, sess->port_name); return NULL; @@ -2139,7 +2134,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) ELS_PRLO || mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == ELS_TPRLO) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x2106, "TM response logo %phC status %#x state %#x", mcmd->sess->port_name, mcmd->fc_tm_rsp, mcmd->flags); @@ -2497,35 +2492,35 @@ static void qlt_print_dif_err(struct qla_tgt_prm *prm) /* ASCQ */ switch (prm->sense_buffer[13]) { case 1: - ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b, "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " "se_cmd=%p tag[%x]", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr); break; case 2: - ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c, "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " "se_cmd=%p tag[%x]", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr); break; case 3: - ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f, "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " "se_cmd=%p tag[%x]", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr); break; default: - ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + ql_dbg(ql_dbg_tgt_dif, vha, 0xe010, "BE detected Dif ERR: lba[%llx|%lld] len[%x] " "se_cmd=%p tag[%x]", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr); break; } - ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16); + ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16); } } @@ -3285,15 +3280,12 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, /* check appl tag */ if (cmd->e_app_tag != cmd->a_app_tag) { - ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, - "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " - "Ref[%x|%x], App[%x|%x], " - "Guard [%x|%x] cmd=%p ox_id[%04x]", - cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, - cmd->a_ref_tag, cmd->e_ref_tag, - cmd->a_app_tag, cmd->e_app_tag, - cmd->a_guard, cmd->e_guard, - cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); + ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d, + "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, + cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, + cmd->atio.u.isp24.fcp_hdr.ox_id); cmd->dif_err_code = DIF_ERR_APP; scsi_status = SAM_STAT_CHECK_CONDITION; @@ -3304,15 +3296,12 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, /* check ref tag */ if (cmd->e_ref_tag != cmd->a_ref_tag) { - ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, - "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " - "Ref[%x|%x], App[%x|%x], " - "Guard[%x|%x] cmd=%p ox_id[%04x] ", - cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, - cmd->a_ref_tag, cmd->e_ref_tag, - cmd->a_app_tag, cmd->e_app_tag, - cmd->a_guard, cmd->e_guard, - cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); + ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e, + "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, + cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, + cmd->atio.u.isp24.fcp_hdr.ox_id); cmd->dif_err_code = DIF_ERR_REF; scsi_status = SAM_STAT_CHECK_CONDITION; @@ -3324,15 +3313,13 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, /* check guard */ if (cmd->e_guard != cmd->a_guard) { - ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, - "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " - "Ref[%x|%x], App[%x|%x], " - "Guard [%x|%x] cmd=%p ox_id[%04x]", - cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, - cmd->a_ref_tag, cmd->e_ref_tag, - cmd->a_app_tag, cmd->e_app_tag, - cmd->a_guard, cmd->e_guard, - cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); + ql_dbg(ql_dbg_tgt_dif, vha, 0xe012, + "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, + cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, + cmd->atio.u.isp24.fcp_hdr.ox_id); + cmd->dif_err_code = DIF_ERR_GRD; scsi_status = SAM_STAT_CHECK_CONDITION; sense_key = ABORTED_COMMAND; @@ -3450,8 +3437,10 @@ done: spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); } -/* If hardware_lock held on entry, might drop it, then reaquire */ -/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ +/* + * If hardware_lock held on entry, might drop it, then reaquire + * This function sends the appropriate CTIO to ISP 2xxx or 24xx + */ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio) @@ -3462,7 +3451,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, int ret = 0; uint16_t temp; - ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); + ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); if (pkt == NULL) { @@ -3615,10 +3604,10 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd) * 1) XFER Rdy completion + CMD_T_ABORT * 2) TCM TMR - drain_state_list */ - ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, - "multiple abort. %p transport_state %x, t_state %x," - " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state, - cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016, + "multiple abort. %p transport_state %x, t_state %x, " + "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state, + cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); return EIO; } cmd->aborted = 1; @@ -3670,7 +3659,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, int term = 0; if (cmd->se_cmd.prot_op) - ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + ql_dbg(ql_dbg_tgt_dif, vha, 0xe013, "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " "se_cmd=%p tag[%x] op %#x/%s", cmd->lba, cmd->lba, @@ -3884,7 +3873,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, */ cmd->sess->logout_on_delete = 0; cmd->sess->send_els_logo = 1; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20f8, "%s %d %8phC post del sess\n", __func__, __LINE__, cmd->sess->port_name); @@ -4227,7 +4216,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, /* Another WWN used to have our s_id. Our PLOGI scheduled its * session deletion, but it's still in sess_del_work wq */ if (sess->deleted) { - ql_dbg(ql_dbg_io, vha, 0x3061, + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, "New command while old session %p is being deleted\n", sess); return -EFAULT; @@ -4237,7 +4226,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. */ if (!kref_get_unless_zero(&sess->sess_kref)) { - ql_dbg(ql_dbg_tgt, vha, 0xffff, + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, "%s: kref_get fail, %8phC oxid %x \n", __func__, sess->port_name, be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); @@ -4492,7 +4481,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, * Another wwn used to have our s_id/loop_id * kill the session, but don't free the loop_id */ - ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, + ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4672,9 +4661,9 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, sess->keep_nport_handle = ((sess->loop_id == loop_id) && (sess->d_id.b24 == port_id.b24)); - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post del sess\n", - __func__, __LINE__, sess->port_name); + ql_dbg(ql_dbg_disc, vha, 0x20f9, + "%s %d %8phC post del sess\n", + __func__, __LINE__, sess->port_name); qlt_schedule_sess_for_deletion_lock(sess); @@ -4744,7 +4733,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, /* Make session global (not used in fabric mode) */ if (ha->current_topology != ISP_CFG_F) { if (sess) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20fa, "%s %d %8phC post nack\n", __func__, __LINE__, sess->port_name); qla24xx_post_nack_work(vha, sess, iocb, @@ -4757,7 +4746,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, } } else { if (sess) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20fb, "%s %d %8phC post nack\n", __func__, __LINE__, sess->port_name); qla24xx_post_nack_work(vha, sess, iocb, @@ -4791,7 +4780,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20fc, "%s: logo %llx res %d sess %p ", __func__, wwn, res, sess); if (res == 0) { @@ -4823,7 +4812,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, sess = qla2x00_find_fcport_by_wwpn(vha, iocb->u.isp24.port_name, 1); if (sess) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20fd, "sess %p lid %d|%d DS %d LS %d\n", sess, sess->loop_id, loop_id, sess->disc_state, sess->fw_login_state); @@ -5597,17 +5586,17 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, break; case MBA_REJECTED_FCP_CMD: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, - "qla_target(%d): Async event LS_REJECT occurred " - "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, - le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), - le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017, + "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", + vha->vp_idx, + le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); if (le16_to_cpu(mailbox[3]) == 1) { /* exchange starvation. */ vha->hw->exch_starvation++; if (vha->hw->exch_starvation > 5) { - ql_log(ql_log_warn, vha, 0xffff, + ql_log(ql_log_warn, vha, 0xd03a, "Exchange starvation-. Resetting RISC\n"); vha->hw->exch_starvation = 0; @@ -5707,12 +5696,12 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, case MODE_DUAL: if (newfcport) { if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20fe, "%s %d %8phC post upd_fcport fcp_cnt %d\n", __func__, __LINE__, fcport->port_name, vha->fcport_count); qla24xx_post_upd_fcport_work(vha, fcport); } else { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc, vha, 0x20ff, "%s %d %8phC post gpsc fcp_cnt %d\n", __func__, __LINE__, fcport->port_name, vha->fcport_count); qla24xx_post_gpsc_work(vha, fcport); @@ -5838,7 +5827,7 @@ static void qlt_abort_work(struct qla_tgt *tgt, } if (!kref_get_unless_zero(&sess->sess_kref)) { - ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, + ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c, "%s: kref_get fail %8phC \n", __func__, sess->port_name); sess = NULL; @@ -5902,7 +5891,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, } if (!kref_get_unless_zero(&sess->sess_kref)) { - ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, + ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020, "%s: kref_get fail %8phC\n", __func__, sess->port_name); sess = NULL; @@ -6251,7 +6240,7 @@ qlt_enable_vha(struct scsi_qla_host *vha) qla24xx_enable_vp(vha); } else { if (ha->msix_entries) { - ql_dbg(ql_dbg_tgt, vha, 0xffff, + ql_dbg(ql_dbg_tgt, vha, 0xe081, "%s: host%ld : vector %d cpu %d\n", __func__, vha->host_no, ha->msix_entries[rspq_ent].vector, @@ -6387,7 +6376,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) * can not be trusted. There is no point in passing * it further up. */ - ql_log(ql_log_warn, vha, 0xffff, + ql_log(ql_log_warn, vha, 0xd03c, "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", pkt->u.isp24.fcp_hdr.s_id, be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), @@ -6744,7 +6733,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) rc = btree_init32(&ha->tgt.host_map); if (rc) - ql_log(ql_log_info, base_vha, 0xffff, + ql_log(ql_log_info, base_vha, 0xd03d, "Unable to initialize ha->host_map btree\n"); qlt_update_vp_map(base_vha, SET_VP_IDX); @@ -6872,25 +6861,25 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) case SET_AL_PA: slot = btree_lookup32(&vha->hw->tgt.host_map, key); if (!slot) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018, "Save vha in host_map %p %06x\n", vha, key); rc = btree_insert32(&vha->hw->tgt.host_map, key, vha, GFP_ATOMIC); if (rc) - ql_log(ql_log_info, vha, 0xffff, + ql_log(ql_log_info, vha, 0xd03e, "Unable to insert s_id into host_map: %06x\n", key); return; } - ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, - "replace existing vha in host_map %p %06x\n", vha, key); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, + "replace existing vha in host_map %p %06x\n", vha, key); btree_update32(&vha->hw->tgt.host_map, key, vha); break; case RESET_VP_IDX: vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; break; case RESET_AL_PA: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, "clear vha in host_map %p %06x\n", vha, key); slot = btree_lookup32(&vha->hw->tgt.host_map, key); if (slot) @@ -6952,7 +6941,7 @@ int __init qlt_init(void) sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct qla_tgt_mgmt_cmd), 0, NULL); if (!qla_tgt_mgmt_cmd_cachep) { - ql_log(ql_log_fatal, NULL, 0xe06d, + ql_log(ql_log_fatal, NULL, 0xd04b, "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); return -ENOMEM; } -- cgit From f7e761f56c711907ccf342532120f826d5ccff3f Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:12:02 -0700 Subject: scsi: qla2xxx: Turn on FW option for exchange check Tell FW to track exchange/cmd state to prevent driver from using stale exchange or exchange that is not meant for this command. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_init.c | 18 ++++++++++++++++++ drivers/scsi/qla2xxx/qla_target.c | 26 +++++++++++++++----------- drivers/scsi/qla2xxx/qla_target.h | 2 +- 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index c425d061cd80..f92e74639bb1 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2969,6 +2969,18 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) ha->fw_options[2] &= ~BIT_11; } + if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + /* + * Tell FW to track each exchange to prevent + * driver from using stale exchange. + */ + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) + ha->fw_options[2] |= BIT_4; + else + ha->fw_options[2] &= ~BIT_4; + } + ql_dbg(ql_dbg_init, vha, 0x00e8, "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", __func__, ha->fw_options[1], ha->fw_options[2], @@ -7361,6 +7373,12 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha) ha->fw_options[2] &= ~BIT_11; } + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) + ha->fw_options[2] |= BIT_4; + else + ha->fw_options[2] &= ~BIT_4; + if (ql2xetsenable) { /* Enable ETS Burst. */ memset(ha->fw_options, 0, sizeof(ha->fw_options)); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 4ad09584d4a8..3cdffdd90279 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2016,8 +2016,9 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio->exchange_addr = atio->u.isp24.exchange_addr; - ctio->u.status1.flags = (atio->u.isp24.attr << 9) | - cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); + temp = (atio->u.isp24.attr << 9)| + CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; + ctio->u.status1.flags = cpu_to_le16(temp); temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); ctio->u.status1.ox_id = cpu_to_le16(temp); ctio->u.status1.scsi_status = @@ -2070,8 +2071,9 @@ void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio->exchange_addr = atio->u.isp24.exchange_addr; - ctio->u.status1.flags = (atio->u.isp24.attr << 9) | - cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); + temp = (atio->u.isp24.attr << 9) | + CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; + ctio->u.status1.flags = cpu_to_le16(temp); temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); ctio->u.status1.ox_id = cpu_to_le16(temp); ctio->u.status1.scsi_status = @@ -2359,7 +2361,8 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; pkt->exchange_addr = atio->u.isp24.exchange_addr; - pkt->u.status0.flags |= (atio->u.isp24.attr << 9); + temp = atio->u.isp24.attr << 9; + pkt->u.status0.flags |= cpu_to_le16(temp); temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); pkt->u.status0.ox_id = cpu_to_le16(temp); pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); @@ -3484,9 +3487,9 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio24->exchange_addr = atio->u.isp24.exchange_addr; - ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | - cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | - CTIO7_FLAGS_TERMINATE); + temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | + CTIO7_FLAGS_TERMINATE; + ctio24->u.status1.flags = cpu_to_le16(temp); temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); ctio24->u.status1.ox_id = cpu_to_le16(temp); @@ -4978,6 +4981,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, request_t *pkt; struct fc_port *sess = NULL; unsigned long flags; + u16 temp; spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, @@ -5010,10 +5014,10 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio24->exchange_addr = atio->u.isp24.exchange_addr; - ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | - cpu_to_le16( + temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | - CTIO7_FLAGS_DONT_RET_CTIO); + CTIO7_FLAGS_DONT_RET_CTIO; + ctio24->u.status1.flags = cpu_to_le16(temp); /* * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, * if the explicit conformation is used. diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 25ea90b8f6c9..07ff565485b7 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -426,7 +426,7 @@ struct ctio7_to_24xx { } status0; struct { uint16_t sense_length; - uint16_t flags; + __le16 flags; uint32_t residual; __le16 ox_id; uint16_t scsi_status; -- cgit From 99e1b683c4be3fee5cff824af18411cf8cc568d0 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:12:03 -0700 Subject: scsi: qla2xxx: Add ql2xiniexchg parameter Previously, the ql2xexchoffld module parameter was used to control the max number of exchanges to be offload onto host memory. Module parameter ql_dm_tgt_ex_pct was used to control the percentage of exchanges allocated to the Target side. With this patch, module parameter ql_dm_tgt_ex_pct is no longer used to control exchanges for the driver. New module parameter ql2xiniexchg is added to control exchanges between target mode and initiator mode. With the updated module parameters, users can control the exact number of exchanges for either Initiator or Target. The exchange offload feature will be automatically enabled when the total number of exchanges exceeds 2048 limit. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 6 +- drivers/scsi/qla2xxx/qla_gbl.h | 3 +- drivers/scsi/qla2xxx/qla_init.c | 2 +- drivers/scsi/qla2xxx/qla_inline.h | 16 +++++ drivers/scsi/qla2xxx/qla_mbx.c | 14 ++-- drivers/scsi/qla2xxx/qla_os.c | 136 +++++++++++++++++++++++++++----------- drivers/scsi/qla2xxx/qla_target.c | 48 +++----------- 7 files changed, 140 insertions(+), 85 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 51b262b236b4..ddf93efe3986 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -286,7 +286,7 @@ struct name_list_extended { #define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ #define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */ #define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ -#define EXTENDED_EXCH_ENTRY_CNT 32768 /* Entries for offload case */ +#define FW_DEF_EXCHANGES_CNT 2048 struct req_que; struct qla_tgt_sess; @@ -3593,6 +3593,10 @@ struct qla_hw_data { #define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) #define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) #define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) +#define IS_EXCHG_OFFLD_CAPABLE(ha) \ + (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) +#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \ + (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) /* HBA serial number */ uint8_t serial0; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 5b2451745e9f..f8540f5c9e5d 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -137,6 +137,7 @@ extern int ql2xmdcapmask; extern int ql2xmdenable; extern int ql2xexlogins; extern int ql2xexchoffld; +extern int ql2xiniexchg; extern int ql2xfwholdabts; extern int ql2xmvasynctoatio; @@ -839,7 +840,7 @@ extern int qla_get_exlogin_status(scsi_qla_host_t *, uint16_t *, uint16_t *); extern int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr); extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *); -extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t); +extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *); extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *); int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f92e74639bb1..08000aebe8d4 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2723,7 +2723,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) if (ql2xexlogins) ha->flags.exlogins_enabled = 1; - if (ql2xexchoffld) + if (qla_is_exch_offld_enabled(vha)) ha->flags.exchoffld_enabled = 1; rval = qla2x00_execute_fw(vha, srisc_address); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index c61a6a871c8e..9996ec0daab1 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -307,3 +307,19 @@ qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay) fcport->retry_delay_timestamp = jiffies + (retry_delay * HZ / 10); } + +static inline bool +qla_is_exch_offld_enabled(struct scsi_qla_host *vha) +{ + if (qla_ini_mode_enabled(vha) && + (ql2xiniexchg > FW_DEF_EXCHANGES_CNT)) + return true; + else if (qla_tgt_mode_enabled(vha) && + (ql2xexchoffld > FW_DEF_EXCHANGES_CNT)) + return true; + else if (qla_dual_mode_enabled(vha) && + ((ql2xiniexchg + ql2xexchoffld) > FW_DEF_EXCHANGES_CNT)) + return true; + else + return false; +} diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 5e74600b99c2..bebac42d9e9e 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -821,7 +821,7 @@ qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, */ #define CONFIG_XCHOFFLD_MEM 0x3 int -qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) +qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; @@ -834,12 +834,12 @@ qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; mcp->mb[1] = CONFIG_XCHOFFLD_MEM; - mcp->mb[2] = MSW(phys_addr); - mcp->mb[3] = LSW(phys_addr); - mcp->mb[6] = MSW(MSD(phys_addr)); - mcp->mb[7] = LSW(MSD(phys_addr)); - mcp->mb[8] = MSW(ha->exlogin_size); - mcp->mb[9] = LSW(ha->exlogin_size); + mcp->mb[2] = MSW(ha->exchoffld_buf_dma); + mcp->mb[3] = LSW(ha->exchoffld_buf_dma); + mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); + mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); + mcp->mb[8] = MSW(ha->exchoffld_size); + mcp->mb[9] = LSW(ha->exchoffld_size); mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_11|MBX_0; mcp->tov = MBX_TOV_SECONDS; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index cca6acaed087..dcf50aa61e9d 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -224,11 +224,15 @@ MODULE_PARM_DESC(ql2xexlogins, "Number of extended Logins. " "0 (Default)- Disabled."); -int ql2xexchoffld = 0; -module_param(ql2xexchoffld, uint, S_IRUGO|S_IWUSR); +int ql2xexchoffld = 1024; +module_param(ql2xexchoffld, uint, 0644); MODULE_PARM_DESC(ql2xexchoffld, - "Number of exchanges to offload. " - "0 (Default)- Disabled."); + "Number of target exchanges."); + +int ql2xiniexchg = 1024; +module_param(ql2xiniexchg, uint, 0644); +MODULE_PARM_DESC(ql2xiniexchg, + "Number of initiator exchanges."); int ql2xfwholdabts = 0; module_param(ql2xfwholdabts, int, S_IRUGO); @@ -3986,6 +3990,9 @@ qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) if (!ql2xexlogins) return QLA_SUCCESS; + if (!IS_EXLOGIN_OFFLD_CAPABLE(ha)) + return QLA_SUCCESS; + ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins); max_cnt = 0; rval = qla_get_exlogin_status(vha, &size, &max_cnt); @@ -3996,21 +4003,27 @@ qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) } temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; - ha->exlogin_size = (size * temp); - ql_log(ql_log_info, vha, 0xd024, - "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", - max_cnt, size, temp); - - ql_log(ql_log_info, vha, 0xd025, "EXLOGIN: requested size=0x%x\n", - ha->exlogin_size); - - /* Get consistent memory for extended logins */ - ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, - ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); - if (!ha->exlogin_buf) { - ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, + temp *= size; + + if (temp != ha->exlogin_size) { + qla2x00_free_exlogin_buffer(ha); + ha->exlogin_size = temp; + + ql_log(ql_log_info, vha, 0xd024, + "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", + max_cnt, size, temp); + + ql_log(ql_log_info, vha, 0xd025, + "EXLOGIN: requested size=0x%x\n", ha->exlogin_size); + + /* Get consistent memory for extended logins */ + ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, + ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); + if (!ha->exlogin_buf) { + ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, "Failed to allocate memory for exlogin_buf_dma.\n"); - return -ENOMEM; + return -ENOMEM; + } } /* Now configure the dma buffer */ @@ -4041,15 +4054,49 @@ qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) } } +static void +qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) +{ + u32 temp; + *ret_cnt = FW_DEF_EXCHANGES_CNT; + + if (qla_ini_mode_enabled(vha)) { + if (ql2xiniexchg > max_cnt) + ql2xiniexchg = max_cnt; + + if (ql2xiniexchg > FW_DEF_EXCHANGES_CNT) + *ret_cnt = ql2xiniexchg; + } else if (qla_tgt_mode_enabled(vha)) { + if (ql2xexchoffld > max_cnt) + ql2xexchoffld = max_cnt; + + if (ql2xexchoffld > FW_DEF_EXCHANGES_CNT) + *ret_cnt = ql2xexchoffld; + } else if (qla_dual_mode_enabled(vha)) { + temp = ql2xiniexchg + ql2xexchoffld; + if (temp > max_cnt) { + ql2xiniexchg -= (temp - max_cnt)/2; + ql2xexchoffld -= (((temp - max_cnt)/2) + 1); + temp = max_cnt; + } + + if (temp > FW_DEF_EXCHANGES_CNT) + *ret_cnt = temp; + } +} + int qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) { int rval; - uint16_t size, max_cnt, temp; + u16 size, max_cnt; + u32 temp; struct qla_hw_data *ha = vha->hw; - /* Return if we don't need to alloacate any extended logins */ - if (!ql2xexchoffld) + if (!ha->flags.exchoffld_enabled) + return QLA_SUCCESS; + + if (!IS_EXCHG_OFFLD_CAPABLE(ha)) return QLA_SUCCESS; ql_log(ql_log_info, vha, 0xd014, @@ -4063,30 +4110,45 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) return rval; } - temp = (ql2xexchoffld > max_cnt) ? max_cnt : ql2xexchoffld; - ha->exchoffld_size = (size * temp); - ql_log(ql_log_info, vha, 0xd016, - "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n", - max_cnt, size, temp); + qla2x00_number_of_exch(vha, &temp, max_cnt); + temp *= size; - ql_log(ql_log_info, vha, 0xd017, - "Exchange Buffers requested size = 0x%x\n", ha->exchoffld_size); - - /* Get consistent memory for extended logins */ - ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, - ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); - if (!ha->exchoffld_buf) { - ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, - "Failed to allocate memory for exchoffld_buf_dma.\n"); - return -ENOMEM; + if (temp != ha->exchoffld_size) { + qla2x00_free_exchoffld_buffer(ha); + ha->exchoffld_size = temp; + + ql_log(ql_log_info, vha, 0xd016, + "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n", + max_cnt, size, temp); + + ql_log(ql_log_info, vha, 0xd017, + "Exchange Buffers requested size = 0x%x\n", + ha->exchoffld_size); + + /* Get consistent memory for extended logins */ + ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, + ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); + if (!ha->exchoffld_buf) { + ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, + "Failed to allocate memory for exchoffld_buf_dma.\n"); + return -ENOMEM; + } } /* Now configure the dma buffer */ - rval = qla_set_exchoffld_mem_cfg(vha, ha->exchoffld_buf_dma); + rval = qla_set_exchoffld_mem_cfg(vha); if (rval) { ql_log(ql_log_fatal, vha, 0xd02e, "Setup exchange offload buffer ****FAILED****.\n"); qla2x00_free_exchoffld_buffer(ha); + } else { + /* re-adjust number of target exchange */ + struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb; + + if (qla_ini_mode_enabled(vha)) + icb->exchange_count = 0; + else + icb->exchange_count = cpu_to_le16(ql2xexchoffld); } return rval; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 3cdffdd90279..324048476d9e 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -59,7 +59,7 @@ MODULE_PARM_DESC(qlini_mode, "when ready " "\"enabled\" (default) - initiator mode will always stay enabled."); -static int ql_dm_tgt_ex_pct = 50; +static int ql_dm_tgt_ex_pct = 0; module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql_dm_tgt_ex_pct, "For Dual Mode (qlini_mode=dual), this parameter determines " @@ -6438,8 +6438,9 @@ void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) { struct qla_hw_data *ha = vha->hw; - u32 tmp; - u16 t; + + if (!QLA_TGT_MODE_ENABLED()) + return; if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { if (!ha->tgt.saved_set) { @@ -6454,24 +6455,10 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) ha->tgt.saved_set = 1; } - if (qla_tgt_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha)) nv->exchange_count = cpu_to_le16(0xFFFF); - } else { /* dual */ - if (ql_dm_tgt_ex_pct > 100) { - ql_dm_tgt_ex_pct = 50; - } else if (ql_dm_tgt_ex_pct == 100) { - /* leave some for FW */ - ql_dm_tgt_ex_pct = 95; - } - - tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct; - tmp = tmp/100; - if (tmp > 0xffff) - tmp = 0xffff; - - t = tmp & 0xffff; - nv->exchange_count = cpu_to_le16(t); - } + else /* dual */ + nv->exchange_count = cpu_to_le16(ql2xexchoffld); /* Enable target mode */ nv->firmware_options_1 |= cpu_to_le32(BIT_4); @@ -6556,8 +6543,6 @@ void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) { struct qla_hw_data *ha = vha->hw; - u32 tmp; - u16 t; if (!QLA_TGT_MODE_ENABLED()) return; @@ -6575,23 +6560,10 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) ha->tgt.saved_set = 1; } - if (qla_tgt_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha)) nv->exchange_count = cpu_to_le16(0xFFFF); - } else { /* dual */ - if (ql_dm_tgt_ex_pct > 100) { - ql_dm_tgt_ex_pct = 50; - } else if (ql_dm_tgt_ex_pct == 100) { - /* leave some for FW */ - ql_dm_tgt_ex_pct = 95; - } - - tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct; - tmp = tmp/100; - if (tmp > 0xffff) - tmp = 0xffff; - t = tmp & 0xffff; - nv->exchange_count = cpu_to_le16(t); - } + else /* dual */ + nv->exchange_count = cpu_to_le16(ql2xexchoffld); /* Enable target mode */ nv->firmware_options_1 |= cpu_to_le32(BIT_4); -- cgit From 3a33dc95b00be33f150bc357ab67331cdba7fc88 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:12:04 -0700 Subject: scsi: qla2xxx: Remove redundant wait when target is stopped. Current code already destroy all target sessions when target Mode is stopped. Target core would waits for all commands that belong to each session to purge. The extra wait for interrupts to settle down is not relevant. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_target.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 324048476d9e..f9ccf845d084 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1427,6 +1427,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt) if (npiv_vports) { mutex_unlock(&qla_tgt_mutex); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, + "NPIV is in use. Can not stop target\n"); return -EPERM; } } @@ -1437,7 +1439,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt) return -EPERM; } - ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", + ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", vha->host_no, vha); /* * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. @@ -1480,9 +1482,7 @@ EXPORT_SYMBOL(qlt_stop_phase1); /* Called by tcm_qla2xxx configfs code */ void qlt_stop_phase2(struct qla_tgt *tgt) { - struct qla_hw_data *ha = tgt->ha; - scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); - unsigned long flags; + scsi_qla_host_t *vha = tgt->vha; if (tgt->tgt_stopped) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, @@ -1490,24 +1490,19 @@ void qlt_stop_phase2(struct qla_tgt *tgt) dump_stack(); return; } - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, - "Waiting for %d IRQ commands to complete (tgt %p)", - tgt->irq_cmd_count, tgt); + if (!tgt->tgt_stop) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, + "%s: phase1 stop is not completed\n", __func__); + dump_stack(); + return; + } mutex_lock(&vha->vha_tgt.tgt_mutex); - spin_lock_irqsave(&ha->hardware_lock, flags); - while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) { - spin_unlock_irqrestore(&ha->hardware_lock, flags); - udelay(2); - spin_lock_irqsave(&ha->hardware_lock, flags); - } tgt->tgt_stop = 0; tgt->tgt_stopped = 1; - spin_unlock_irqrestore(&ha->hardware_lock, flags); mutex_unlock(&vha->vha_tgt.tgt_mutex); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished", + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", tgt); } EXPORT_SYMBOL(qlt_stop_phase2); @@ -1517,6 +1512,10 @@ static void qlt_release(struct qla_tgt *tgt) { scsi_qla_host_t *vha = tgt->vha; + if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop && + !tgt->tgt_stopped) + qlt_stop_phase1(tgt); + if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) qlt_stop_phase2(tgt); @@ -5531,7 +5530,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; int login_code; - if (!ha->tgt.tgt_ops) + if (!tgt || tgt->tgt_stop || tgt->tgt_stopped) return; if (unlikely(tgt == NULL)) { -- cgit From 2da52737521a2a65eb9b2323a0748047a454e86c Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:12:05 -0700 Subject: scsi: qla2xxx: Accelerate SCSI BUSY status generation in target mode Accelerate generation of SCSI busy to let initiators slow down when target is running low in resources. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_init.c | 13 +++++++++++-- drivers/scsi/qla2xxx/qla_mbx.c | 2 ++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 08000aebe8d4..436968ad4484 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -7374,10 +7374,19 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha) } if (qla_tgt_mode_enabled(vha) || - qla_dual_mode_enabled(vha)) + qla_dual_mode_enabled(vha)) { + /* FW auto send SCSI status during */ + ha->fw_options[1] |= BIT_8; + ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8; + + /* FW perform Exchange validation */ ha->fw_options[2] |= BIT_4; - else + } else { + ha->fw_options[1] &= ~BIT_8; + ha->fw_options[10] &= 0x00ff; + ha->fw_options[2] &= ~BIT_4; + } if (ql2xetsenable) { /* Enable ETS Burst. */ diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index bebac42d9e9e..f02a2baffb5b 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1048,6 +1048,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->in_mb |= MBX_1; + mcp->mb[10] = fwopts[10]; + mcp->out_mb |= MBX_10; } else { mcp->mb[10] = fwopts[10]; mcp->mb[11] = fwopts[11]; -- cgit From ba68a63561deb328e1ad0940cea0932e8962ef0e Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:12:06 -0700 Subject: scsi: qla2xxx: Remove unused irq_cmd_count field. When driver is unloaded, all sessions are torn down, all commmands are flushed, chip is reset to ensure there is no knowledge of target mode in ISP. The irq_cmd_count field was used to make sure all commands are processed on top of that. The irq_cmd_count is now redundant and not needed. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_target.c | 9 +-------- drivers/scsi/qla2xxx/qla_target.h | 1 - 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index f9ccf845d084..a8e57072019b 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -5355,8 +5355,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) * Otherwise, some commands can stuck. */ - tgt->irq_cmd_count++; - switch (pkt->entry_type) { case CTIO_CRC2: case CTIO_TYPE7: @@ -5382,10 +5380,8 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) } rc = qlt_chk_qfull_thresh_hold(vha, atio, true); - if (rc != 0) { - tgt->irq_cmd_count--; + if (rc != 0) return; - } rc = qlt_handle_cmd_for_atio(vha, atio); if (unlikely(rc != 0)) { @@ -5517,7 +5513,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) break; } - tgt->irq_cmd_count--; } /* @@ -5547,7 +5542,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, * Otherwise, some commands can stuck. */ - tgt->irq_cmd_count++; switch (code) { case MBA_RESET: /* Reset */ @@ -5635,7 +5629,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, break; } - tgt->irq_cmd_count--; } static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 07ff565485b7..c328a267c4c3 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -790,7 +790,6 @@ struct qla_tgt { * because req_pkt() can drop/reaquire HW lock inside. Protected by * HW lock. */ - int irq_cmd_count; int atio_irq_cmd_count; int datasegs_per_cmd, datasegs_per_cont, sg_tablesize; -- cgit From d63b328f0904a1e04368de5cf1d27c0f2d0bc554 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 2 Jun 2017 09:12:07 -0700 Subject: scsi: qla2xxx: Remove extra register read Remove extra register read in each interrupt processing to improve performance. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_iocb.c | 4 +++- drivers/scsi/qla2xxx/qla_target.c | 11 ++--------- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index ea027f6a7fd4..8404f17f3c6c 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -464,7 +464,9 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) req->ring_ptr++; /* Set chip new ring index. */ - if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (ha->mqenable || IS_QLA27XX(ha)) { + WRT_REG_DWORD(req->req_q_in, req->ring_index); + } else if (IS_QLA83XX(ha)) { WRT_REG_DWORD(req->req_q_in, req->ring_index); RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); } else if (IS_QLAFX00(ha)) { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index a8e57072019b..88eea4d34487 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2252,11 +2252,10 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, uint32_t req_cnt) { - uint32_t cnt, cnt_in; + uint32_t cnt; if (vha->req->cnt < (req_cnt + 2)) { cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out); - cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in); if (vha->req->ring_index < cnt) vha->req->cnt = cnt - vha->req->ring_index; @@ -2264,14 +2263,8 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, vha->req->cnt = vha->req->length - (vha->req->ring_index - cnt); - if (unlikely(vha->req->cnt < (req_cnt + 2))) { - ql_dbg(ql_dbg_io, vha, 0x305a, - "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n", - vha->vp_idx, vha->req->ring_index, - vha->req->cnt, req_cnt, cnt, cnt_in, - vha->req->length); + if (unlikely(vha->req->cnt < (req_cnt + 2))) return -EAGAIN; - } } vha->req->cnt -= req_cnt; -- cgit From 16a611154dc11d73dfa99ae2089a1f1ef45db7f8 Mon Sep 17 00:00:00 2001 From: "Dupuis, Chad" Date: Fri, 2 Jun 2017 12:02:05 -0700 Subject: scsi: qedf: Check if sense buffer has been allocated during completion sc_cmd->sense_buffer is not guaranteed to be allocated so we need to sc_cmd->check if the pointer is NULL before trying to copy anything into it. Fixes the crash: [ 143.793176] [0000:00:00.0]:[qedf_eh_device_reset:626]: LUN RESET Issued... [ 143.802996] BUG: unable to handle kernel NULL pointer dereference at (null) [ 143.803063] IP: qedf_parse_fcp_rsp+0xe2/0x290 [qedf] [ 143.803077] PGD 0 [ 143.803078] P4D 0 [ 143.803103] Oops: 0002 [#1] SMP [ 143.803115] Modules linked in: msr(E) ebtable_filter(E) ebtables(E) ip6table_filter(E) ip6_tables(E) iptable_filter(E) ip_tables(E) x_tables(E) raw(E) scsi_transport_iscsi(E) br_netfilter(E) bridge(E) iscsi_ibft(E) iscsi_boot_sysfs(E) intel_rapl(E) sb_edac(E) x86_pkg_temp_thermal(E) intel_powerclamp(E) coretemp(E) kvm_intel(E) kvm(E) irqbypass(E) crct10dif_pclmul(E) crc32_pclmul(E) xfs(E) ghash_clmulni_intel(E) pcbc(E) aesni_intel(E) aes_x86_64(E) crypto_simd(E) ipmi_ssif(E) glue_helper(E) iTCO_wdt(E) iTCO_vendor_support(E) lpc_ich(E) ipmi_si(E) pcspkr(E) hpilo(E) ioatdma(E) cryptd(E) ipmi_devintf(E) hpwdt(E) mfd_core(E) shpchp(E) dca(E) thermal(E) pcc_cpufreq(E) ipmi_msghandler(E) acpi_cpufreq(E) af_packet(E) btrfs(E) xor(E) raid6_pq(E) sr_mod(E) cdrom(E) ata_generic(E) sd_mod(E) 8021q(E) garp(E) [ 143.803302] stp(E) llc(E) mrp(E) bnx2fc(E) cnic(E) uio(E) mgag200(E) ata_piix(E) i2c_algo_bit(E) drm_kms_helper(E) syscopyarea(E) sysfillrect(E) sysimgblt(E) ahci(E) fb_sys_fops(E) bnx2x(E) qedf(E) serio_raw(E) libahci(E) ttm(E) uhci_hcd(E) ehci_pci(E) qed(E) mdio(E) libcrc32c(E) ehci_hcd(E) crc32c_intel(E) drm(E) libata(E) usbcore(E) tg3(E) ptp(E) hpsa(E) pps_core(E) scsi_transport_sas(E) libphy(E) wmi(E) button(E) fcoe(E) libfcoe(E) libfc(E) scsi_transport_fc(E) sg(E) dm_multipath(E) dm_mod(E) scsi_dh_rdac(E) scsi_dh_emc(E) scsi_dh_alua(E) scsi_mod(E) autofs4(E) [ 143.803438] CPU: 31 PID: 494 Comm: kworker/31:2 Tainted: G E 4.12.0-rc1-69-default+ #1 [ 143.803461] Hardware name: HP ProLiant DL380p Gen8, BIOS P70 08/20/2012 [ 143.803480] Workqueue: qedf_io_wq qedf_fp_io_handler [qedf] [ 143.803496] task: ffff8804181a0000 task.stack: ffffc90003b64000 [ 143.803514] RIP: 0010:qedf_parse_fcp_rsp+0xe2/0x290 [qedf] [ 143.803529] RSP: 0018:ffffc90003b67dc8 EFLAGS: 00010246 [ 143.803544] RAX: 0000000000000000 RBX: ffff880401abdd48 RCX: 000000000000000c [ 143.803563] RDX: 0000000000000060 RSI: ffffffffa039c740 RDI: 0000000000000000 [ 143.803581] RBP: ffffc90003b67df0 R08: ffffffffa039dba8 R09: 0000000000000000 [ 143.803600] R10: 0000000000000000 R11: 0000000000000018 R12: 0000000000000000 [ 143.803619] R13: ffff88040ac80bc8 R14: 0000000000000008 R15: ffff880407c14008 [ 143.803638] FS: 0000000000000000(0000) GS:ffff88043f7c0000(0000) knlGS:0000000000000000 [ 143.804360] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 143.805065] CR2: 0000000000000000 CR3: 0000000001c09000 CR4: 00000000000406e0 [ 143.805753] Call Trace: [ 143.806436] qedf_process_tmf_compl+0x19/0x30 [qedf] [ 143.807124] qedf_process_cqe+0x265/0x280 [qedf] [ 143.807800] qedf_fp_io_handler+0x26/0x60 [qedf] [ 143.808469] process_one_work+0x138/0x370 [ 143.809133] worker_thread+0x4d/0x3b0 [ 143.809797] kthread+0x109/0x140 [ 143.810451] ? rescuer_thread+0x320/0x320 [ 143.811100] ? kthread_park+0x60/0x60 [ 143.811743] ret_from_fork+0x2c/0x40 Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_io.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index ea37c78418d7..ded386036c27 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -1041,10 +1041,13 @@ static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req, fcp_sns_len = SCSI_SENSE_BUFFERSIZE; } - memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); - if (fcp_sns_len) - memcpy(sc_cmd->sense_buffer, sense_data, - fcp_sns_len); + /* The sense buffer can be NULL for TMF commands */ + if (sc_cmd->sense_buffer) { + memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (fcp_sns_len) + memcpy(sc_cmd->sense_buffer, sense_data, + fcp_sns_len); + } } static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req) -- cgit From 52eacd619c5ab611db3b193cb213e0e892d5984f Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Fri, 2 Jun 2017 13:32:25 +0200 Subject: scsi: qla2xxx: remove writeq/readq function definitions Instead of rewriting write/readq, use linux/io-64-nonatomic-lo-hi.h which already have them. Signed-off-by: Corentin Labbe Reviewed-by: Bart Van Assche Acked-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_nx.h | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 77624eac95a4..71a41093530e 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -7,6 +7,8 @@ #ifndef __QLA_NX_H #define __QLA_NX_H +#include + /* * Following are the states of the Phantom. Phantom will set them and * Host will read to check if the fields are correct. @@ -819,21 +821,6 @@ struct qla82xx_uri_data_desc{ #define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0) #define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4) -#ifndef readq -static inline u64 readq(void __iomem *addr) -{ - return readl(addr) | (((u64) readl(addr + 4)) << 32LL); -} -#endif - -#ifndef writeq -static inline void writeq(u64 val, void __iomem *addr) -{ - writel(((u32) (val)), (addr)); - writel(((u32) (val >> 32)), (addr + 4)); -} -#endif - /* Request and response queue size */ #define REQUEST_ENTRY_CNT_82XX 128 /* Number of request entries. */ #define RESPONSE_ENTRY_CNT_82XX 128 /* Number of response entries.*/ -- cgit From 6ff8a3b2a1e9ad5f0b559e7ef3b991918ef1caf9 Mon Sep 17 00:00:00 2001 From: Michael Schmitz Date: Mon, 5 Jun 2017 19:37:59 +1200 Subject: scsi: atari_scsi: Use m68k_realnum_memory for FastRAM test m68k_num_memory is unsuitable to test for the presence of FastRAM on CT60 if the kernel is located in FastRAM: in arch/m68k/mm/motorola.c the ST-RAM chunk is skipped and m68k_num_memory is decremented in this case. m68k_realnum_memory still contains the actual number of RAM chunks so use that. Signed-off-by: Michael Schmitz Tested-by: Christian T. Steigies Signed-off-by: Martin K. Petersen --- drivers/scsi/atari_scsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index f792420c533e..a75feebe6ad6 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -776,7 +776,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev) * from/to alternative Ram. */ if (ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(EXTD_DMA) && - m68k_num_memory > 1) { + m68k_realnum_memory > 1) { atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI"); if (!atari_dma_buffer) { pr_err(PFX "can't allocate ST-RAM double buffer\n"); -- cgit From 566ec9ad315b46e7056472f4cd1d1b79bbad62da Mon Sep 17 00:00:00 2001 From: Szymon Mielczarek Date: Mon, 5 Jun 2017 11:36:54 +0300 Subject: scsi: ufs: Tidy clocks list head usage Move the initialization of clocks list head to ufshcd_alloc_host() so that every driver doesn't have to do it. Remove checks for the list head being NULL because that is not possible. Signed-off-by: Szymon Mielczarek Signed-off-by: Adrian Hunter Reviewed-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/tc-dwc-g210-pci.c | 2 -- drivers/scsi/ufs/ufshcd-pci.c | 2 -- drivers/scsi/ufs/ufshcd-pltfrm.c | 2 -- drivers/scsi/ufs/ufshcd.c | 12 +++++++----- 4 files changed, 7 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c index c09a0fef0fe6..325d5e14fc0d 100644 --- a/drivers/scsi/ufs/tc-dwc-g210-pci.c +++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c @@ -130,8 +130,6 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - INIT_LIST_HEAD(&hba->clk_list_head); - hba->vops = &tc_dwc_g210_pci_hba_vops; err = ufshcd_init(hba, mmio_base, pdev->irq); diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 52b546fb509b..5dd4122cbd85 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -143,8 +143,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - INIT_LIST_HEAD(&hba->clk_list_head); - err = ufshcd_init(hba, mmio_base, pdev->irq); if (err) { dev_err(&pdev->dev, "Initialization failed\n"); diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 8e5e6c04c035..e82bde077296 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -58,8 +58,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) if (!np) goto out; - INIT_LIST_HEAD(&hba->clk_list_head); - cnt = of_property_count_strings(np, "clock-names"); if (!cnt || (cnt == -EINVAL)) { dev_info(dev, "%s: Unable to find clocks, assuming enabled\n", diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index ffe8d8608818..88ccd63f83c1 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -314,7 +314,7 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba) struct ufs_clk_info *clki; struct list_head *head = &hba->clk_list_head; - if (!head || list_empty(head)) + if (list_empty(head)) return; list_for_each_entry(clki, head, list) { @@ -869,7 +869,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) ktime_t start = ktime_get(); bool clk_state_changed = false; - if (!head || list_empty(head)) + if (list_empty(head)) goto out; ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); @@ -943,7 +943,7 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, struct ufs_clk_info *clki; struct list_head *head = &hba->clk_list_head; - if (!head || list_empty(head)) + if (list_empty(head)) return false; list_for_each_entry(clki, head, list) { @@ -6752,7 +6752,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, ktime_t start = ktime_get(); bool clk_state_changed = false; - if (!head || list_empty(head)) + if (list_empty(head)) goto out; ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); @@ -6818,7 +6818,7 @@ static int ufshcd_init_clocks(struct ufs_hba *hba) struct device *dev = hba->dev; struct list_head *head = &hba->clk_list_head; - if (!head || list_empty(head)) + if (list_empty(head)) goto out; list_for_each_entry(clki, head, list) { @@ -7811,6 +7811,8 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) hba->dev = dev; *hba_handle = hba; + INIT_LIST_HEAD(&hba->clk_list_head); + out_error: return err; } -- cgit From 896f6966fc815abe71f85fb26f0193875df8a035 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Wed, 31 May 2017 10:56:56 +0800 Subject: scsi: megaraid: Fix a sleep-in-atomic bug The driver may sleep under a spin lock, and the function call path is: mraid_mm_attach_buf (acquire the lock by spin_lock_irqsave) pci_pool_alloc(GFP_KERNEL) --> may sleep To fix it, the "GFP_KERNEL" is replaced with "GFP_ATOMIC". [mkp: fixed whitespace] Signed-off-by: Jia-Ju Bai Acked-by: Sumit Saxena Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_mm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index 4cf9ed96414f..544d6f7e6138 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -574,7 +574,7 @@ mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen) kioc->pool_index = right_pool; kioc->free_buf = 1; - kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, + kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_ATOMIC, &kioc->buf_paddr); spin_unlock_irqrestore(&pool->lock, flags); -- cgit From 8e6882545d8c06f99e9e117741cc87f3338b0bef Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 2 Jun 2017 14:21:52 -0700 Subject: scsi: Avoid that scsi_exit_rq() triggers a use-after-free Dereferencing shost from scsi_exit_rq() is not safe because the SCSI host may already have been freed when scsi_exit_rq() is called. Increasing the shost reference count in scsi_init_rq() and dropping that reference in scsi_exit_rq() is nontrivial since scsi_host_dev_release() may sleep and since scsi_exit_rq() may be called from interrupt context. Since scsi_exit_rq() only needs a single bit from shost, copy that bit into struct scsi_cmnd. Reported-by: Scott Bauer Fixes: e9c787e65c0c ("scsi: allocate scsi_cmnd structures as part of struct request") Signed-off-by: Bart Van Assche Reviewed-by: Christoph Hellwig Cc: Hannes Reinecke Cc: Scott Bauer Cc: Jan Kara Cc: Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_lib.c | 47 +++++++++++++++++++++++++++++------------------ include/scsi/scsi_cmnd.h | 1 + 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 99e16ac479e3..a95eb022fb89 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -45,23 +45,23 @@ static struct kmem_cache *scsi_sense_isadma_cache; static DEFINE_MUTEX(scsi_sense_cache_mutex); static inline struct kmem_cache * -scsi_select_sense_cache(struct Scsi_Host *shost) +scsi_select_sense_cache(bool unchecked_isa_dma) { - return shost->unchecked_isa_dma ? - scsi_sense_isadma_cache : scsi_sense_cache; + return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache; } -static void scsi_free_sense_buffer(struct Scsi_Host *shost, - unsigned char *sense_buffer) +static void scsi_free_sense_buffer(bool unchecked_isa_dma, + unsigned char *sense_buffer) { - kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer); + kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma), + sense_buffer); } -static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost, +static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma, gfp_t gfp_mask, int numa_node) { - return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask, - numa_node); + return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma), + gfp_mask, numa_node); } int scsi_init_sense_cache(struct Scsi_Host *shost) @@ -69,7 +69,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost) struct kmem_cache *cache; int ret = 0; - cache = scsi_select_sense_cache(shost); + cache = scsi_select_sense_cache(shost->unchecked_isa_dma); if (cache) return 0; @@ -1138,6 +1138,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) { void *buf = cmd->sense_buffer; void *prot = cmd->prot_sdb; + unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA; unsigned long flags; /* zero out the cmd, except for the embedded scsi_request */ @@ -1147,6 +1148,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) cmd->device = dev; cmd->sense_buffer = buf; cmd->prot_sdb = prot; + cmd->flags = unchecked_isa_dma; INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); cmd->jiffies_at_alloc = jiffies; @@ -1847,6 +1849,7 @@ static int scsi_mq_prep_fn(struct request *req) struct scsi_device *sdev = req->q->queuedata; struct Scsi_Host *shost = sdev->host; unsigned char *sense_buf = cmd->sense_buffer; + unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA; struct scatterlist *sg; /* zero out the cmd, except for the embedded scsi_request */ @@ -1858,6 +1861,7 @@ static int scsi_mq_prep_fn(struct request *req) cmd->request = req; cmd->device = sdev; cmd->sense_buffer = sense_buf; + cmd->flags = unchecked_isa_dma; cmd->tag = req->tag; @@ -2004,10 +2008,13 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) { struct Scsi_Host *shost = set->driver_data; + const bool unchecked_isa_dma = shost->unchecked_isa_dma; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); - cmd->sense_buffer = - scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node); + if (unchecked_isa_dma) + cmd->flags |= SCMD_UNCHECKED_ISA_DMA; + cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, + GFP_KERNEL, numa_node); if (!cmd->sense_buffer) return -ENOMEM; cmd->req.sense = cmd->sense_buffer; @@ -2017,10 +2024,10 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq, static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx) { - struct Scsi_Host *shost = set->driver_data; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); - scsi_free_sense_buffer(shost, cmd->sense_buffer); + scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA, + cmd->sense_buffer); } static int scsi_map_queues(struct blk_mq_tag_set *set) @@ -2093,11 +2100,15 @@ EXPORT_SYMBOL_GPL(__scsi_init_queue); static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) { struct Scsi_Host *shost = q->rq_alloc_data; + const bool unchecked_isa_dma = shost->unchecked_isa_dma; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); memset(cmd, 0, sizeof(*cmd)); - cmd->sense_buffer = scsi_alloc_sense_buffer(shost, gfp, NUMA_NO_NODE); + if (unchecked_isa_dma) + cmd->flags |= SCMD_UNCHECKED_ISA_DMA; + cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, gfp, + NUMA_NO_NODE); if (!cmd->sense_buffer) goto fail; cmd->req.sense = cmd->sense_buffer; @@ -2111,19 +2122,19 @@ static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) return 0; fail_free_sense: - scsi_free_sense_buffer(shost, cmd->sense_buffer); + scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer); fail: return -ENOMEM; } static void scsi_exit_rq(struct request_queue *q, struct request *rq) { - struct Scsi_Host *shost = q->rq_alloc_data; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); if (cmd->prot_sdb) kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); - scsi_free_sense_buffer(shost, cmd->sense_buffer); + scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA, + cmd->sense_buffer); } struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index b379f93a2c48..16351de31243 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -56,6 +56,7 @@ struct scsi_pointer { /* for scmd->flags */ #define SCMD_TAGGED (1 << 0) +#define SCMD_UNCHECKED_ISA_DMA (1 << 1) struct scsi_cmnd { struct scsi_request req; -- cgit From 551eb598e5ea52996eb821f43740496a78a97b68 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 2 Jun 2017 14:21:53 -0700 Subject: scsi: Split scsi_internal_device_block() Instead of passing a "wait" argument to scsi_internal_device_block(), split this function into a function that waits and a function that doesn't wait. This will make it easier to serialize SCSI device state changes through a mutex. Signed-off-by: Bart Van Assche Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Reviewed-by: Christoph Hellwig Cc: Sreekanth Reddy Signed-off-by: Martin K. Petersen --- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 4 +- drivers/scsi/scsi_lib.c | 73 +++++++++++++++++++++++------------- include/scsi/scsi_device.h | 2 +- 3 files changed, 50 insertions(+), 29 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index a5d872664257..c63bc5ccce37 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev, sas_device_priv_data->sas_target->handle); sas_device_priv_data->block = 1; - r = scsi_internal_device_block(sdev, false); + r = scsi_internal_device_block_nowait(sdev); if (r == -EINVAL) sdev_printk(KERN_WARNING, sdev, "device_block failed with return(%d) for handle(0x%04x)\n", @@ -2895,7 +2895,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev, "performing a block followed by an unblock\n", r, sas_device_priv_data->sas_target->handle); sas_device_priv_data->block = 1; - r = scsi_internal_device_block(sdev, false); + r = scsi_internal_device_block_nowait(sdev); if (r) sdev_printk(KERN_WARNING, sdev, "retried device_block " "failed with return(%d) for handle(0x%04x)\n", diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a95eb022fb89..fc8f394eb854 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2944,28 +2944,20 @@ scsi_target_resume(struct scsi_target *starget) EXPORT_SYMBOL(scsi_target_resume); /** - * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state - * @sdev: device to block - * @wait: Whether or not to wait until ongoing .queuecommand() / - * .queue_rq() calls have finished. + * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state + * @sdev: device to block * - * Block request made by scsi lld's to temporarily stop all - * scsi commands on the specified device. May sleep. + * Pause SCSI command processing on the specified device. Does not sleep. * - * Returns zero if successful or error if not + * Returns zero if successful or a negative error code upon failure. * - * Notes: - * This routine transitions the device to the SDEV_BLOCK state - * (which must be a legal transition). When the device is in this - * state, all commands are deferred until the scsi lld reenables - * the device with scsi_device_unblock or device_block_tmo fires. - * - * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after - * scsi_internal_device_block() has blocked a SCSI device and also - * remove the rport mutex lock and unlock calls from srp_queuecommand(). + * Notes: + * This routine transitions the device to the SDEV_BLOCK state (which must be + * a legal transition). When the device is in this state, command processing + * is paused until the device leaves the SDEV_BLOCK state. See also + * scsi_internal_device_unblock_nowait(). */ -int -scsi_internal_device_block(struct scsi_device *sdev, bool wait) +int scsi_internal_device_block_nowait(struct scsi_device *sdev) { struct request_queue *q = sdev->request_queue; unsigned long flags; @@ -2985,21 +2977,50 @@ scsi_internal_device_block(struct scsi_device *sdev, bool wait) * request queue. */ if (q->mq_ops) { - if (wait) - blk_mq_quiesce_queue(q); - else - blk_mq_stop_hw_queues(q); + blk_mq_stop_hw_queues(q); } else { spin_lock_irqsave(q->queue_lock, flags); blk_stop_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); - if (wait) - scsi_wait_for_queuecommand(sdev); } return 0; } -EXPORT_SYMBOL_GPL(scsi_internal_device_block); +EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); + +/** + * scsi_internal_device_block - try to transition to the SDEV_BLOCK state + * @sdev: device to block + * + * Pause SCSI command processing on the specified device and wait until all + * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep. + * + * Returns zero if successful or a negative error code upon failure. + * + * Note: + * This routine transitions the device to the SDEV_BLOCK state (which must be + * a legal transition). When the device is in this state, command processing + * is paused until the device leaves the SDEV_BLOCK state. See also + * scsi_internal_device_unblock(). + * + * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after + * scsi_internal_device_block() has blocked a SCSI device and also + * remove the rport mutex lock and unlock calls from srp_queuecommand(). + */ +static int scsi_internal_device_block(struct scsi_device *sdev) +{ + struct request_queue *q = sdev->request_queue; + int err; + + err = scsi_internal_device_block_nowait(sdev); + if (err == 0) { + if (q->mq_ops) + blk_mq_quiesce_queue(q); + else + scsi_wait_for_queuecommand(sdev); + } + return err; +} /** * scsi_internal_device_unblock - resume a device after a block request @@ -3056,7 +3077,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); static void device_block(struct scsi_device *sdev, void *data) { - scsi_internal_device_block(sdev, true); + scsi_internal_device_block(sdev); } static int diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 05641aebd181..6ce6888f3c69 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -472,7 +472,7 @@ static inline int scsi_device_created(struct scsi_device *sdev) sdev->sdev_state == SDEV_CREATED_BLOCK; } -int scsi_internal_device_block(struct scsi_device *sdev, bool wait); +int scsi_internal_device_block_nowait(struct scsi_device *sdev); int scsi_internal_device_unblock(struct scsi_device *sdev, enum scsi_device_state new_state); -- cgit From 43f7571be077ee4673466cbcba115427d68440e1 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 2 Jun 2017 14:21:54 -0700 Subject: scsi: Create two versions of scsi_internal_device_unblock() This will make it easier to serialize SCSI device state changes through a mutex. Signed-off-by: Bart Van Assche Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Reviewed-by: Christoph Hellwig Cc: Sreekanth Reddy Signed-off-by: Martin K. Petersen --- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 4 ++-- drivers/scsi/scsi_lib.c | 46 +++++++++++++++++++++++++----------- include/scsi/scsi_device.h | 4 ++-- 3 files changed, 36 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index c63bc5ccce37..22998cbd538f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2883,7 +2883,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev, sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); sas_device_priv_data->block = 0; - r = scsi_internal_device_unblock(sdev, SDEV_RUNNING); + r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); if (r == -EINVAL) { /* The device has been set to SDEV_RUNNING by SD layer during * device addition but the request queue is still stopped by @@ -2902,7 +2902,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev, r, sas_device_priv_data->sas_target->handle); sas_device_priv_data->block = 0; - r = scsi_internal_device_unblock(sdev, SDEV_RUNNING); + r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); if (r) sdev_printk(KERN_WARNING, sdev, "retried device_unblock" " failed with return(%d) for handle(0x%04x)\n", diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index fc8f394eb854..f0eb55744513 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -3023,24 +3023,22 @@ static int scsi_internal_device_block(struct scsi_device *sdev) } /** - * scsi_internal_device_unblock - resume a device after a block request + * scsi_internal_device_unblock_nowait - resume a device after a block request * @sdev: device to resume - * @new_state: state to set devices to after unblocking + * @new_state: state to set the device to after unblocking * - * Called by scsi lld's or the midlayer to restart the device queue - * for the previously suspended scsi device. Called from interrupt or - * normal process context. + * Restart the device queue for a previously suspended SCSI device. Does not + * sleep. * - * Returns zero if successful or error if not. + * Returns zero if successful or a negative error code upon failure. * - * Notes: - * This routine transitions the device to the SDEV_RUNNING state - * or to one of the offline states (which must be a legal transition) - * allowing the midlayer to goose the queue for this device. + * Notes: + * This routine transitions the device to the SDEV_RUNNING state or to one of + * the offline states (which must be a legal transition) allowing the midlayer + * to goose the queue for this device. */ -int -scsi_internal_device_unblock(struct scsi_device *sdev, - enum scsi_device_state new_state) +int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, + enum scsi_device_state new_state) { struct request_queue *q = sdev->request_queue; unsigned long flags; @@ -3072,7 +3070,27 @@ scsi_internal_device_unblock(struct scsi_device *sdev, return 0; } -EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); +EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait); + +/** + * scsi_internal_device_unblock - resume a device after a block request + * @sdev: device to resume + * @new_state: state to set the device to after unblocking + * + * Restart the device queue for a previously suspended SCSI device. May sleep. + * + * Returns zero if successful or a negative error code upon failure. + * + * Notes: + * This routine transitions the device to the SDEV_RUNNING state or to one of + * the offline states (which must be a legal transition) allowing the midlayer + * to goose the queue for this device. + */ +static int scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state) +{ + return scsi_internal_device_unblock_nowait(sdev, new_state); +} static void device_block(struct scsi_device *sdev, void *data) diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 6ce6888f3c69..5f24dae2a8e1 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -473,8 +473,8 @@ static inline int scsi_device_created(struct scsi_device *sdev) } int scsi_internal_device_block_nowait(struct scsi_device *sdev); -int scsi_internal_device_unblock(struct scsi_device *sdev, - enum scsi_device_state new_state); +int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, + enum scsi_device_state new_state); /* accessor functions for the SCSI parameters */ static inline int scsi_device_sync(struct scsi_device *sdev) -- cgit From 0db6ca8a5e1ea585795db3643ec7d50fc8cb1aff Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 2 Jun 2017 14:21:55 -0700 Subject: scsi: Protect SCSI device state changes with a mutex Serializing SCSI device state changes avoids that two state changes can occur concurrently, e.g. the state changes in scsi_target_block() and __scsi_remove_device(). This serialization is essential to make patch "Make __scsi_remove_device go straight from BLOCKED to DEL" work reliably. Enable this mechanism for all scsi_target_*block() callers but not for the scsi_internal_device_unblock() calls from the mpt3sas driver because that driver can call scsi_internal_device_unblock() from atomic context. Signed-off-by: Bart Van Assche Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_error.c | 8 +++++++- drivers/scsi/scsi_lib.c | 27 +++++++++++++++++++++------ drivers/scsi/scsi_scan.c | 16 +++++++++------- drivers/scsi/scsi_sysfs.c | 24 +++++++++++++++++++----- drivers/scsi/scsi_transport_srp.c | 7 ++++--- drivers/scsi/sd.c | 7 +++++-- include/scsi/scsi_device.h | 1 + 7 files changed, 66 insertions(+), 24 deletions(-) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index ecc07dab893d..ac3196420435 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1628,11 +1628,17 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q, struct list_head *done_q) { struct scsi_cmnd *scmd, *next; + struct scsi_device *sdev; list_for_each_entry_safe(scmd, next, work_q, eh_entry) { sdev_printk(KERN_INFO, scmd->device, "Device offlined - " "not ready after error recovery\n"); - scsi_device_set_state(scmd->device, SDEV_OFFLINE); + sdev = scmd->device; + + mutex_lock(&sdev->state_mutex); + scsi_device_set_state(sdev, SDEV_OFFLINE); + mutex_unlock(&sdev->state_mutex); + scsi_eh_finish_cmd(scmd, done_q); } return; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f0eb55744513..4b0bac3ac6ab 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2882,7 +2882,12 @@ static void scsi_wait_for_queuecommand(struct scsi_device *sdev) int scsi_device_quiesce(struct scsi_device *sdev) { - int err = scsi_device_set_state(sdev, SDEV_QUIESCE); + int err; + + mutex_lock(&sdev->state_mutex); + err = scsi_device_set_state(sdev, SDEV_QUIESCE); + mutex_unlock(&sdev->state_mutex); + if (err) return err; @@ -2910,10 +2915,11 @@ void scsi_device_resume(struct scsi_device *sdev) * so assume the state is being managed elsewhere (for example * device deleted during suspend) */ - if (sdev->sdev_state != SDEV_QUIESCE || - scsi_device_set_state(sdev, SDEV_RUNNING)) - return; - scsi_run_queue(sdev->request_queue); + mutex_lock(&sdev->state_mutex); + if (sdev->sdev_state == SDEV_QUIESCE && + scsi_device_set_state(sdev, SDEV_RUNNING) == 0) + scsi_run_queue(sdev->request_queue); + mutex_unlock(&sdev->state_mutex); } EXPORT_SYMBOL(scsi_device_resume); @@ -3012,6 +3018,7 @@ static int scsi_internal_device_block(struct scsi_device *sdev) struct request_queue *q = sdev->request_queue; int err; + mutex_lock(&sdev->state_mutex); err = scsi_internal_device_block_nowait(sdev); if (err == 0) { if (q->mq_ops) @@ -3019,6 +3026,8 @@ static int scsi_internal_device_block(struct scsi_device *sdev) else scsi_wait_for_queuecommand(sdev); } + mutex_unlock(&sdev->state_mutex); + return err; } @@ -3089,7 +3098,13 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait); static int scsi_internal_device_unblock(struct scsi_device *sdev, enum scsi_device_state new_state) { - return scsi_internal_device_unblock_nowait(sdev, new_state); + int ret; + + mutex_lock(&sdev->state_mutex); + ret = scsi_internal_device_unblock_nowait(sdev, new_state); + mutex_unlock(&sdev->state_mutex); + + return ret; } static void diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 6f7128f49c30..e6de4eee97a3 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -231,6 +231,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, sdev->id = starget->id; sdev->lun = lun; sdev->channel = starget->channel; + mutex_init(&sdev->state_mutex); sdev->sdev_state = SDEV_CREATED; INIT_LIST_HEAD(&sdev->siblings); INIT_LIST_HEAD(&sdev->same_target_siblings); @@ -943,16 +944,17 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, /* set the device running here so that slave configure * may do I/O */ + mutex_lock(&sdev->state_mutex); ret = scsi_device_set_state(sdev, SDEV_RUNNING); - if (ret) { + if (ret) ret = scsi_device_set_state(sdev, SDEV_BLOCK); + mutex_unlock(&sdev->state_mutex); - if (ret) { - sdev_printk(KERN_ERR, sdev, - "in wrong state %s to complete scan\n", - scsi_device_state_name(sdev->sdev_state)); - return SCSI_SCAN_NO_RESPONSE; - } + if (ret) { + sdev_printk(KERN_ERR, sdev, + "in wrong state %s to complete scan\n", + scsi_device_state_name(sdev->sdev_state)); + return SCSI_SCAN_NO_RESPONSE; } if (*bflags & BLIST_MS_192_BYTES_FOR_3F) diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 82dfe07b1d47..a91537a3abbf 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -719,7 +719,7 @@ static ssize_t store_state_field(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int i; + int i, ret; struct scsi_device *sdev = to_scsi_device(dev); enum scsi_device_state state = 0; @@ -734,9 +734,11 @@ store_state_field(struct device *dev, struct device_attribute *attr, if (!state) return -EINVAL; - if (scsi_device_set_state(sdev, state)) - return -EINVAL; - return count; + mutex_lock(&sdev->state_mutex); + ret = scsi_device_set_state(sdev, state); + mutex_unlock(&sdev->state_mutex); + + return ret == 0 ? count : -EINVAL; } static ssize_t @@ -1272,6 +1274,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) void __scsi_remove_device(struct scsi_device *sdev) { struct device *dev = &sdev->sdev_gendev; + int res; /* * This cleanup path is not reentrant and while it is impossible @@ -1282,7 +1285,15 @@ void __scsi_remove_device(struct scsi_device *sdev) return; if (sdev->is_visible) { - if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) + /* + * If scsi_internal_target_block() is running concurrently, + * wait until it has finished before changing the device state. + */ + mutex_lock(&sdev->state_mutex); + res = scsi_device_set_state(sdev, SDEV_CANCEL); + mutex_unlock(&sdev->state_mutex); + + if (res != 0) return; bsg_unregister_queue(sdev->request_queue); @@ -1298,7 +1309,10 @@ void __scsi_remove_device(struct scsi_device *sdev) * scsi_run_queue() invocations have finished before tearing down the * device. */ + mutex_lock(&sdev->state_mutex); scsi_device_set_state(sdev, SDEV_DEL); + mutex_unlock(&sdev->state_mutex); + blk_cleanup_queue(sdev->request_queue); cancel_work_sync(&sdev->requeue_work); diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 3c5d89852e9f..f617021c94f7 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -554,11 +554,12 @@ int srp_reconnect_rport(struct srp_rport *rport) * invoking scsi_target_unblock() won't change the state of * these devices into running so do that explicitly. */ - spin_lock_irq(shost->host_lock); - __shost_for_each_device(sdev, shost) + shost_for_each_device(sdev, shost) { + mutex_lock(&sdev->state_mutex); if (sdev->sdev_state == SDEV_OFFLINE) sdev->sdev_state = SDEV_RUNNING; - spin_unlock_irq(shost->host_lock); + mutex_unlock(&sdev->state_mutex); + } } else if (rport->state == SRP_RPORT_RUNNING) { /* * srp_reconnect_rport() has been invoked with fast_io_fail diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b6bb4e0ce0e3..53b6d72c214d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1818,8 +1818,9 @@ static void sd_eh_reset(struct scsi_cmnd *scmd) static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) { struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk); + struct scsi_device *sdev = scmd->device; - if (!scsi_device_online(scmd->device) || + if (!scsi_device_online(sdev) || !scsi_medium_access_command(scmd) || host_byte(scmd->result) != DID_TIME_OUT || eh_disp != SUCCESS) @@ -1845,7 +1846,9 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) { scmd_printk(KERN_ERR, scmd, "Medium access timeout failure. Offlining disk!\n"); - scsi_device_set_state(scmd->device, SDEV_OFFLINE); + mutex_lock(&sdev->state_mutex); + scsi_device_set_state(sdev, SDEV_OFFLINE); + mutex_unlock(&sdev->state_mutex); return SUCCESS; } diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 5f24dae2a8e1..d13bc80825b1 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -207,6 +207,7 @@ struct scsi_device { void *handler_data; unsigned char access_state; + struct mutex state_mutex; enum scsi_device_state sdev_state; unsigned long sdev_data[0]; } __attribute__((aligned(sizeof(unsigned long)))); -- cgit From 66483a4a9f34427e3d6ec87d8e583f5d2a7cbb76 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 2 Jun 2017 14:21:56 -0700 Subject: scsi: Introduce scsi_start_queue() This patch does not change any functionality. Signed-off-by: Bart Van Assche Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Cc: Israel Rukshin Cc: Max Gurtovoy Cc: Benjamin Block Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_lib.c | 25 +++++++++++++++---------- drivers/scsi/scsi_priv.h | 1 + 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 4b0bac3ac6ab..ed744668b984 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -3031,6 +3031,20 @@ static int scsi_internal_device_block(struct scsi_device *sdev) return err; } +void scsi_start_queue(struct scsi_device *sdev) +{ + struct request_queue *q = sdev->request_queue; + unsigned long flags; + + if (q->mq_ops) { + blk_mq_start_stopped_hw_queues(q, false); + } else { + spin_lock_irqsave(q->queue_lock, flags); + blk_start_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); + } +} + /** * scsi_internal_device_unblock_nowait - resume a device after a block request * @sdev: device to resume @@ -3049,9 +3063,6 @@ static int scsi_internal_device_block(struct scsi_device *sdev) int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, enum scsi_device_state new_state) { - struct request_queue *q = sdev->request_queue; - unsigned long flags; - /* * Try to transition the scsi device to SDEV_RUNNING or one of the * offlined states and goose the device queue if successful. @@ -3069,13 +3080,7 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, sdev->sdev_state != SDEV_OFFLINE) return -EINVAL; - if (q->mq_ops) { - blk_mq_start_stopped_hw_queues(q, false); - } else { - spin_lock_irqsave(q->queue_lock, flags); - blk_start_queue(q); - spin_unlock_irqrestore(q->queue_lock, flags); - } + scsi_start_queue(sdev); return 0; } diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 59ebc1795bb3..f86057842f9a 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -88,6 +88,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost); extern void scsi_requeue_run_queue(struct work_struct *work); extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev); extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev); +extern void scsi_start_queue(struct scsi_device *sdev); extern int scsi_mq_setup_tags(struct Scsi_Host *shost); extern void scsi_mq_destroy_tags(struct Scsi_Host *shost); extern int scsi_init_queue(void); -- cgit From 255ee9320e5dc46173bb94dbcd68e32f11fc10a9 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 2 Jun 2017 14:21:57 -0700 Subject: scsi: Make __scsi_remove_device go straight from BLOCKED to DEL If a device is blocked, make __scsi_remove_device() cause it to transition to the DEL state. This means that all the commands issued in .shutdown() will error in the mid-layer, thus making the removal proceed without being stopped. This patch is a slightly modified version of a patch from James Bottomley. This patch avoids that the following lockup occurs: Call Trace: schedule+0x35/0x80 schedule_timeout+0x237/0x2d0 io_schedule_timeout+0xa6/0x110 wait_for_completion_io+0xa3/0x110 blk_execute_rq+0xdf/0x120 scsi_execute+0xce/0x150 [scsi_mod] scsi_execute_req_flags+0x8f/0xf0 [scsi_mod] sd_sync_cache+0xa9/0x190 [sd_mod] sd_shutdown+0x6a/0x100 [sd_mod] sd_remove+0x64/0xc0 [sd_mod] __device_release_driver+0x8d/0x120 device_release_driver+0x1e/0x30 bus_remove_device+0xf9/0x170 device_del+0x127/0x240 __scsi_remove_device+0xc1/0xd0 [scsi_mod] scsi_forget_host+0x57/0x60 [scsi_mod] scsi_remove_host+0x72/0x110 [scsi_mod] srp_remove_work+0x8b/0x200 [ib_srp] Reported-by: Israel Rukshin Signed-off-by: Bart Van Assche Reviewed-by: Hannes Reinecke Cc: James Bottomley Cc: Israel Rukshin Cc: Max Gurtovoy Cc: Benjamin Block Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_lib.c | 2 +- drivers/scsi/scsi_sysfs.c | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ed744668b984..0554a6a7ea55 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2625,7 +2625,6 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_QUIESCE: case SDEV_OFFLINE: case SDEV_TRANSPORT_OFFLINE: - case SDEV_BLOCK: break; default: goto illegal; @@ -2639,6 +2638,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_OFFLINE: case SDEV_TRANSPORT_OFFLINE: case SDEV_CANCEL: + case SDEV_BLOCK: case SDEV_CREATED_BLOCK: break; default: diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index a91537a3abbf..ce470f62a8ae 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1290,7 +1290,17 @@ void __scsi_remove_device(struct scsi_device *sdev) * wait until it has finished before changing the device state. */ mutex_lock(&sdev->state_mutex); + /* + * If blocked, we go straight to DEL and restart the queue so + * any commands issued during driver shutdown (like sync + * cache) are errored immediately. + */ res = scsi_device_set_state(sdev, SDEV_CANCEL); + if (res != 0) { + res = scsi_device_set_state(sdev, SDEV_DEL); + if (res == 0) + scsi_start_queue(sdev); + } mutex_unlock(&sdev->state_mutex); if (res != 0) -- cgit From 2dd6fb5957a75cd926089bb4434449e6181ca5c5 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 2 Jun 2017 14:21:58 -0700 Subject: scsi: Only add commands to the device command list if required by the LLD Just like for the scsi-mq code path, in the single queue SCSI code path only add commands to the per-device command list if required by the SCSI LLD. This patch will make it easier to merge the single-queue and multiqueue command initialization code. Signed-off-by: Bart Van Assche Reviewed-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Cc: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi.c | 9 +-------- drivers/scsi/scsi_lib.c | 52 +++++++++++++++++++++++++++++------------------- drivers/scsi/scsi_priv.h | 2 ++ 3 files changed, 35 insertions(+), 28 deletions(-) diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 61cdd99ae41e..1bf274e3b2b6 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -108,14 +108,7 @@ EXPORT_SYMBOL(scsi_sd_pm_domain); */ void scsi_put_command(struct scsi_cmnd *cmd) { - unsigned long flags; - - /* serious error if the command hasn't come from a device list */ - spin_lock_irqsave(&cmd->device->list_lock, flags); - BUG_ON(list_empty(&cmd->list)); - list_del_init(&cmd->list); - spin_unlock_irqrestore(&cmd->device->list_lock, flags); - + scsi_del_cmd_from_list(cmd); BUG_ON(delayed_work_pending(&cmd->abort_work)); } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0554a6a7ea55..1470c93a87e7 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -584,19 +584,9 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) { - struct scsi_device *sdev = cmd->device; - struct Scsi_Host *shost = sdev->host; - unsigned long flags; - scsi_mq_free_sgtables(cmd); scsi_uninit_cmd(cmd); - - if (shost->use_cmd_list) { - BUG_ON(list_empty(&cmd->list)); - spin_lock_irqsave(&sdev->list_lock, flags); - list_del_init(&cmd->list); - spin_unlock_irqrestore(&sdev->list_lock, flags); - } + scsi_del_cmd_from_list(cmd); } /* @@ -1134,12 +1124,40 @@ err_exit: } EXPORT_SYMBOL(scsi_init_io); +/* Add a command to the list used by the aacraid and dpt_i2o drivers */ +void scsi_add_cmd_to_list(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct Scsi_Host *shost = sdev->host; + unsigned long flags; + + if (shost->use_cmd_list) { + spin_lock_irqsave(&sdev->list_lock, flags); + list_add_tail(&cmd->list, &sdev->cmd_list); + spin_unlock_irqrestore(&sdev->list_lock, flags); + } +} + +/* Remove a command from the list used by the aacraid and dpt_i2o drivers */ +void scsi_del_cmd_from_list(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct Scsi_Host *shost = sdev->host; + unsigned long flags; + + if (shost->use_cmd_list) { + spin_lock_irqsave(&sdev->list_lock, flags); + BUG_ON(list_empty(&cmd->list)); + list_del_init(&cmd->list); + spin_unlock_irqrestore(&sdev->list_lock, flags); + } +} + void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) { void *buf = cmd->sense_buffer; void *prot = cmd->prot_sdb; unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA; - unsigned long flags; /* zero out the cmd, except for the embedded scsi_request */ memset((char *)cmd + sizeof(cmd->req), 0, @@ -1152,9 +1170,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); cmd->jiffies_at_alloc = jiffies; - spin_lock_irqsave(&dev->list_lock, flags); - list_add_tail(&cmd->list, &dev->cmd_list); - spin_unlock_irqrestore(&dev->list_lock, flags); + scsi_add_cmd_to_list(cmd); } static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req) @@ -1871,11 +1887,7 @@ static int scsi_mq_prep_fn(struct request *req) INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); cmd->jiffies_at_alloc = jiffies; - if (shost->use_cmd_list) { - spin_lock_irq(&sdev->list_lock); - list_add_tail(&cmd->list, &sdev->cmd_list); - spin_unlock_irq(&sdev->list_lock); - } + scsi_add_cmd_to_list(cmd); sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; cmd->sdb.table.sgl = sg; diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index f86057842f9a..c11c1f9c912c 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -80,6 +80,8 @@ int scsi_eh_get_sense(struct list_head *work_q, int scsi_noretry_cmd(struct scsi_cmnd *scmd); /* scsi_lib.c */ +extern void scsi_add_cmd_to_list(struct scsi_cmnd *cmd); +extern void scsi_del_cmd_from_list(struct scsi_cmnd *cmd); extern int scsi_maybe_unblock_host(struct scsi_device *sdev); extern void scsi_device_unbusy(struct scsi_device *sdev); extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason); -- cgit From be4c186c80113f95a5946594687a8e70f876e857 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 2 Jun 2017 14:21:59 -0700 Subject: scsi: Introduce scsi_mq_sgl_size() This patch does not change any functionality but makes the next patch easier to read. Signed-off-by: Bart Van Assche Reviewed-by: Christoph Hellwig Cc: Hannes Reinecke Cc: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_lib.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1470c93a87e7..c2a9601bd302 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1859,6 +1859,13 @@ static inline int prep_to_mq(int ret) } } +/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */ +static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost) +{ + return min_t(unsigned int, shost->sg_tablesize, SG_CHUNK_SIZE) * + sizeof(struct scatterlist); +} + static int scsi_mq_prep_fn(struct request *req) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); @@ -1893,10 +1900,7 @@ static int scsi_mq_prep_fn(struct request *req) cmd->sdb.table.sgl = sg; if (scsi_host_get_prot(shost)) { - cmd->prot_sdb = (void *)sg + - min_t(unsigned int, - shost->sg_tablesize, SG_CHUNK_SIZE) * - sizeof(struct scatterlist); + cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost); memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); cmd->prot_sdb->table.sgl = @@ -2202,12 +2206,9 @@ struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) int scsi_mq_setup_tags(struct Scsi_Host *shost) { - unsigned int cmd_size, sgl_size, tbl_size; + unsigned int cmd_size, sgl_size; - tbl_size = shost->sg_tablesize; - if (tbl_size > SG_CHUNK_SIZE) - tbl_size = SG_CHUNK_SIZE; - sgl_size = tbl_size * sizeof(struct scatterlist); + sgl_size = scsi_mq_sgl_size(shost); cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; if (scsi_host_get_prot(shost)) cmd_size += sizeof(struct scsi_data_buffer) + sgl_size; -- cgit From 08f784364fef627650e471c7ae342f1da1c57bbf Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 2 Jun 2017 14:22:00 -0700 Subject: scsi: Make scsi_mq_prep_fn() call scsi_init_command() This patch reduces code duplication. There are two functional changes in this patch: - It causes scsi_mq_prep_fn() to clear driver-private command data, just like the already upstream commit 1bad6c4a57ef ("scsi: zero per-cmd private driver data for each MQ I/O"). - The initialization of .prot_sdb is moved from scsi_mq_prep_fn() into scsi_init_request(). [mkp: applied by hand] Signed-off-by: Bart Van Assche Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Johannes Thumshirn Reviewed-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_lib.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index c2a9601bd302..41c19c75dab4 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1871,36 +1871,21 @@ static int scsi_mq_prep_fn(struct request *req) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); struct scsi_device *sdev = req->q->queuedata; struct Scsi_Host *shost = sdev->host; - unsigned char *sense_buf = cmd->sense_buffer; - unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA; struct scatterlist *sg; - /* zero out the cmd, except for the embedded scsi_request */ - memset((char *)cmd + sizeof(cmd->req), 0, - sizeof(*cmd) - sizeof(cmd->req) + shost->hostt->cmd_size); + scsi_init_command(sdev, cmd); req->special = cmd; cmd->request = req; - cmd->device = sdev; - cmd->sense_buffer = sense_buf; - cmd->flags = unchecked_isa_dma; cmd->tag = req->tag; - cmd->prot_op = SCSI_PROT_NORMAL; - INIT_LIST_HEAD(&cmd->list); - INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); - cmd->jiffies_at_alloc = jiffies; - - scsi_add_cmd_to_list(cmd); - sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; cmd->sdb.table.sgl = sg; if (scsi_host_get_prot(shost)) { - cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost); memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); cmd->prot_sdb->table.sgl = @@ -2026,6 +2011,7 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq, struct Scsi_Host *shost = set->driver_data; const bool unchecked_isa_dma = shost->unchecked_isa_dma; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + struct scatterlist *sg; if (unchecked_isa_dma) cmd->flags |= SCMD_UNCHECKED_ISA_DMA; @@ -2034,6 +2020,13 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq, if (!cmd->sense_buffer) return -ENOMEM; cmd->req.sense = cmd->sense_buffer; + + if (scsi_host_get_prot(shost)) { + sg = (void *)cmd + sizeof(struct scsi_cmnd) + + shost->hostt->cmd_size; + cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost); + } + return 0; } -- cgit From c3006a9264681fd0551543f526758208792319d6 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 2 Jun 2017 14:22:01 -0700 Subject: scsi: snic: Remove code that zeroes driver-private command data Since the SCSI core zeroes driver-private command data, remove that code from the snic driver. Signed-off-by: Bart Van Assche Reviewed-by: Hannes Reinecke Reviewed-by: Narsimhulu Musini Reviewed-by: Christoph Hellwig Cc: Sesidhar Baddela Cc: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/snic/snic_scsi.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c index da979a73baa0..05c3a7282d4a 100644 --- a/drivers/scsi/snic/snic_scsi.c +++ b/drivers/scsi/snic/snic_scsi.c @@ -359,8 +359,6 @@ snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n", sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun); - memset(scsi_cmd_priv(sc), 0, sizeof(struct snic_internal_io_state)); - ret = snic_issue_scsi_req(snic, tgt, sc); if (ret) { SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret); -- cgit From c2bb87318baa6c012432eda6f08290d8623538ee Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 2 Jun 2017 14:22:02 -0700 Subject: scsi: virtio_scsi: Remove code that zeroes driver-private command data Since the SCSI core zeroes driver-private command data, remove that code from the virtio driver. Signed-off-by: Bart Van Assche Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Cc: Michael S. Tsirkin Cc: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/virtio_scsi.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index f8dbfeee6c63..dc2e97c543a5 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -547,7 +547,6 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, dev_dbg(&sc->device->sdev_gendev, "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); - memset(cmd, 0, sizeof(*cmd)); cmd->sc = sc; BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); -- cgit From f7de50da1479156d76c0dd3c29234a44bc4845f0 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 2 Jun 2017 14:22:03 -0700 Subject: scsi: xen-scsifront: Remove code that zeroes driver-private command data Since the SCSI core zeroes driver-private command data, remove that code from the xen-scsifront driver. Signed-off-by: Bart Van Assche Reviewed-by: Hannes Reinecke Reviewed-by: Juergen Gross Reviewed-by: Christoph Hellwig Cc: xen-devel@lists.xenproject.org Cc: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/xen-scsifront.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index a6a8b60d4902..36f59a1be7e9 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -534,7 +534,6 @@ static int scsifront_queuecommand(struct Scsi_Host *shost, int err; sc->result = 0; - memset(shadow, 0, sizeof(*shadow)); shadow->sc = sc; shadow->act = VSCSIIF_ACT_SCSI_CDB; -- cgit From 39aa23f98a6b9932d6ffa1483639aad865b7ba9a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 18 May 2017 10:35:24 +0100 Subject: scsi: lpfc: make a couple of functions static functions lpfc_nvmet_cleanup_io_context and lpfc_nvmet_setup_io_context can be made static as they do not need to be in global scope. Cleans up sparse warnings: "warning: symbol 'lpfc_nvmet_cleanup_io_context' was not declared. Should it be static?" "warning: symbol 'lpfc_nvmet_setup_io_context' was not declared. Should it be static?" Signed-off-by: Colin Ian King Acked-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_nvmet.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 518b15e6f222..41ea97fb10ae 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -828,7 +828,7 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = { .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), }; -void +static void lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) { struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf; @@ -858,7 +858,7 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) } } -int +static int lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) { struct lpfc_nvmet_ctxbuf *ctx_buf; -- cgit From 7b57338eca85c65a36d6f4715b053ee403c8ed95 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 6 Jun 2017 14:35:30 +0300 Subject: scsi: ufshcd-pci: Fix PM config Put PM functions under correct config options and use standard PM macros to set callbacks. Signed-off-by: Adrian Hunter Reviewed-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd-pci.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 5dd4122cbd85..0dcff829f8b6 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -37,7 +37,7 @@ #include #include -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP /** * ufshcd_pci_suspend - suspend power management function * @pdev: pointer to PCI device handle @@ -62,7 +62,9 @@ static int ufshcd_pci_resume(struct device *dev) { return ufshcd_system_resume(dev_get_drvdata(dev)); } +#endif /* !CONFIG_PM_SLEEP */ +#ifdef CONFIG_PM static int ufshcd_pci_runtime_suspend(struct device *dev) { return ufshcd_runtime_suspend(dev_get_drvdata(dev)); @@ -75,13 +77,7 @@ static int ufshcd_pci_runtime_idle(struct device *dev) { return ufshcd_runtime_idle(dev_get_drvdata(dev)); } -#else /* !CONFIG_PM */ -#define ufshcd_pci_suspend NULL -#define ufshcd_pci_resume NULL -#define ufshcd_pci_runtime_suspend NULL -#define ufshcd_pci_runtime_resume NULL -#define ufshcd_pci_runtime_idle NULL -#endif /* CONFIG_PM */ +#endif /* !CONFIG_PM */ /** * ufshcd_pci_shutdown - main function to put the controller in reset state @@ -158,11 +154,11 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) } static const struct dev_pm_ops ufshcd_pci_pm_ops = { - .suspend = ufshcd_pci_suspend, - .resume = ufshcd_pci_resume, - .runtime_suspend = ufshcd_pci_runtime_suspend, - .runtime_resume = ufshcd_pci_runtime_resume, - .runtime_idle = ufshcd_pci_runtime_idle, + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend, + ufshcd_pci_resume) + SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend, + ufshcd_pci_runtime_resume, + ufshcd_pci_runtime_idle) }; static const struct pci_device_id ufshcd_pci_tbl[] = { -- cgit From 2c87ea97ce6a53fc8c98a796b34fed5b84aeff6b Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 6 Jun 2017 14:35:31 +0300 Subject: scsi: ufshcd-pci: Add Intel CNL support Add PCI id and variant ops for Intel CNL UFS host controller. Signed-off-by: Adrian Hunter Reviewed-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd-pci.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 0dcff829f8b6..925b0ec7ec54 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -37,6 +37,41 @@ #include #include +static int ufs_intel_disable_lcc(struct ufs_hba *hba) +{ + u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE); + u32 lcc_enable = 0; + + ufshcd_dme_get(hba, attr, &lcc_enable); + if (lcc_enable) + ufshcd_dme_set(hba, attr, 0); + + return 0; +} + +static int ufs_intel_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + + switch (status) { + case PRE_CHANGE: + err = ufs_intel_disable_lcc(hba); + break; + case POST_CHANGE: + break; + default: + break; + } + + return err; +} + +static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { + .name = "intel-pci", + .link_startup_notify = ufs_intel_link_startup_notify, +}; + #ifdef CONFIG_PM_SLEEP /** * ufshcd_pci_suspend - suspend power management function @@ -139,6 +174,8 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } + hba->vops = (struct ufs_hba_variant_ops *)id->driver_data; + err = ufshcd_init(hba, mmio_base, pdev->irq); if (err) { dev_err(&pdev->dev, "Initialization failed\n"); @@ -163,6 +200,7 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = { static const struct pci_device_id ufshcd_pci_tbl[] = { { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, { } /* terminate list */ }; -- cgit From fe4d5cec8eda7b1404eec8664bc165d6d2539176 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 5 Jun 2017 18:29:58 +0100 Subject: scsi: qla2xxx: remove redundant null check on tgt An earlier commit ed7fb808477b846bb2 ("scsi: qla2xxx: Remove redundant wait when target is stopped.") removed a null check on ha->tgt.tgt_ops and replaced it with a new check that null checked tgt, thus making the subsequent null check on tgt totally redundant. Remove it. Detected by CoverityScan, CID#1440452 ("Logically Dead Code") Signed-off-by: Colin Ian King Acked-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_target.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 88eea4d34487..5aae7aa03c38 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -5521,12 +5521,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, if (!tgt || tgt->tgt_stop || tgt->tgt_stopped) return; - if (unlikely(tgt == NULL)) { - ql_dbg(ql_dbg_tgt, vha, 0xe03a, - "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); - return; - } - if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && IS_QLA2100(ha)) return; -- cgit From 4fae52b5a30b6f2cfc8ecf22524cedc1ece5b774 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Tue, 6 Jun 2017 13:55:23 -0700 Subject: scsi: qla2xxx: Fix compile warning Fixes following 0-day kernel build warnings: drivers/scsi/qla2xxx/qla_init.c:6407:50: warning: format '%lx' expects argument of type 'long unsigned int', but argument 5 has type 'unsigned int' [-Wformat=] drivers/scsi/qla2xxx/qla_init.c:6709:50: warning: format '%lx' expects argument of type 'long unsigned int', but argument 5 has type 'unsigned int' [-Wformat=] Fixes: b95b9452aacf ("scsi: qla2xxx: Fix crash due to mismatch mumber of Q-pair creation for Multi queue") Signed-off-by: Himanshu Madhani Signed-off-by: Giridhar Malavali Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 436968ad4484..730e7fe4344a 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -6404,7 +6404,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, "-> template size %x bytes\n", dlen); if (dlen > risc_size * sizeof(*dcode)) { ql_log(ql_log_warn, vha, 0x0167, - "Failed fwdump template exceeds array by %lx bytes\n", + "Failed fwdump template exceeds array by %zx bytes\n", (size_t)(dlen - risc_size * sizeof(*dcode))); goto default_template; } @@ -6706,7 +6706,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) "-> template size %x bytes\n", dlen); if (dlen > risc_size * sizeof(*fwcode)) { ql_log(ql_log_warn, vha, 0x0177, - "Failed fwdump template exceeds array by %lx bytes\n", + "Failed fwdump template exceeds array by %zx bytes\n", (size_t)(dlen - risc_size * sizeof(*fwcode))); goto default_template; } -- cgit From 74d2fd488d77ab4ff15a7fe0b3b6900f2382b42b Mon Sep 17 00:00:00 2001 From: Binoy Jayan Date: Thu, 8 Jun 2017 15:37:30 +0530 Subject: scsi: esas2r: Replace semaphore fm_api_semaphore with mutex The semaphore 'fm_api_semaphore' is used as a simple mutex, so it should be written as one. Semaphores are going away in the future. Signed-off-by: Binoy Jayan Reviewed-by: Arnd Bergmann Signed-off-by: Martin K. Petersen --- drivers/scsi/esas2r/esas2r.h | 2 +- drivers/scsi/esas2r/esas2r_init.c | 2 +- drivers/scsi/esas2r/esas2r_ioctl.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h index b6030e3edd01..c5013de964e6 100644 --- a/drivers/scsi/esas2r/esas2r.h +++ b/drivers/scsi/esas2r/esas2r.h @@ -945,7 +945,7 @@ struct esas2r_adapter { struct list_head vrq_mds_head; struct esas2r_mem_desc *vrq_mds; int num_vrqs; - struct semaphore fm_api_semaphore; + struct mutex fm_api_mutex; struct semaphore fs_api_semaphore; struct semaphore nvram_semaphore; struct atto_ioctl *local_atto_ioctl; diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index 6432a50b26d8..ad85b33ab6b4 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -327,7 +327,7 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, esas2r_debug("new adapter %p, name %s", a, a->name); spin_lock_init(&a->request_lock); spin_lock_init(&a->fw_event_lock); - sema_init(&a->fm_api_semaphore, 1); + mutex_init(&a->fm_api_mutex); sema_init(&a->fs_api_semaphore, 1); sema_init(&a->nvram_semaphore, 1); diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c index 2d4b7f049a68..c6b041a9f007 100644 --- a/drivers/scsi/esas2r/esas2r_ioctl.c +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -110,7 +110,7 @@ static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi) { struct esas2r_request *rq; - if (down_interruptible(&a->fm_api_semaphore)) { + if (mutex_lock_interruptible(&a->fm_api_mutex)) { fi->status = FI_STAT_BUSY; return; } @@ -173,7 +173,7 @@ all_done: free_req: esas2r_free_request(a, (struct esas2r_request *)rq); free_sem: - up(&a->fm_api_semaphore); + mutex_unlock(&a->fm_api_mutex); return; } -- cgit From 249cf320bd5dd3490252cb9f3cdef41f7d1caacc Mon Sep 17 00:00:00 2001 From: Binoy Jayan Date: Thu, 8 Jun 2017 15:37:31 +0530 Subject: scsi: esas2r: Replace semaphore fs_api_semaphore with mutex The semaphore 'fs_api_semaphore' is used as a simple mutex, so it should be written as one. Semaphores are going away in the future. Signed-off-by: Binoy Jayan Reviewed-by: Arnd Bergmann Signed-off-by: Martin K. Petersen --- drivers/scsi/esas2r/esas2r.h | 2 +- drivers/scsi/esas2r/esas2r_init.c | 2 +- drivers/scsi/esas2r/esas2r_ioctl.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h index c5013de964e6..1da6407ee142 100644 --- a/drivers/scsi/esas2r/esas2r.h +++ b/drivers/scsi/esas2r/esas2r.h @@ -946,7 +946,7 @@ struct esas2r_adapter { struct esas2r_mem_desc *vrq_mds; int num_vrqs; struct mutex fm_api_mutex; - struct semaphore fs_api_semaphore; + struct mutex fs_api_mutex; struct semaphore nvram_semaphore; struct atto_ioctl *local_atto_ioctl; u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ]; diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index ad85b33ab6b4..5b14dd29b764 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -328,7 +328,7 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, spin_lock_init(&a->request_lock); spin_lock_init(&a->fw_event_lock); mutex_init(&a->fm_api_mutex); - sema_init(&a->fs_api_semaphore, 1); + mutex_init(&a->fs_api_mutex); sema_init(&a->nvram_semaphore, 1); esas2r_fw_event_off(a); diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c index c6b041a9f007..97623002908f 100644 --- a/drivers/scsi/esas2r/esas2r_ioctl.c +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -1962,7 +1962,7 @@ int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count) (struct esas2r_ioctl_fs *)a->fs_api_buffer; /* If another flash request is already in progress, return. */ - if (down_interruptible(&a->fs_api_semaphore)) { + if (mutex_lock_interruptible(&a->fs_api_mutex)) { busy: fs->status = ATTO_STS_OUT_OF_RSRC; return -EBUSY; @@ -1978,7 +1978,7 @@ busy: rq = esas2r_alloc_request(a); if (rq == NULL) { esas2r_debug("esas2r_read_fs: out of requests"); - up(&a->fs_api_semaphore); + mutex_unlock(&a->fs_api_mutex); goto busy; } @@ -2006,7 +2006,7 @@ busy: ; dont_wait: /* Free the request and keep going */ - up(&a->fs_api_semaphore); + mutex_unlock(&a->fs_api_mutex); esas2r_free_request(a, (struct esas2r_request *)rq); /* Pick up possible error code from above */ -- cgit From 3a240b21b5ccb77fcbece7a617f333cd31097776 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 11 Jun 2017 08:16:02 +0200 Subject: scsi: qedf: Fix a return value in case of error in 'qedf_alloc_global_queues' We should return -ENOMEM in case of memory allocation error, as done elsewhere in this function. [mkp: fixed typo] Fixes: 61d8658b4a435 ("scsi: qedf: Add QLogic FastLinQ offload FCoE driver framework.") Signed-off-by: Christophe JAILLET Acked-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 5842676e3b71..f97dd0f98543 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2671,8 +2671,9 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) qedf->global_queues[i] = kzalloc(sizeof(struct global_queue), GFP_KERNEL); if (!qedf->global_queues[i]) { - QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocation " + QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate " "global queue %d.\n", i); + status = -ENOMEM; goto mem_alloc_failure; } -- cgit From c4d6ffc8465f4225e79ee58275fa1a61def991cc Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 11 Jun 2017 08:16:03 +0200 Subject: scsi: qedf: Use 'dma_zalloc_coherent' to reduce code verbosity. Replace some 'dma_alloc_coherent+memset' by some quivalent 'dma_zalloc_coherent' in order to reduce code verbosity Signed-off-by: Christophe JAILLET Acked-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index f97dd0f98543..c2f1a71a724e 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -972,17 +972,16 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) sizeof(void *); fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; - fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, - &fcport->sq_dma, GFP_KERNEL); + fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev, + fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL); if (!fcport->sq) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send " "queue.\n"); rval = 1; goto out; } - memset(fcport->sq, 0, fcport->sq_mem_size); - fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, + fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev, fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); if (!fcport->sq_pbl) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send " @@ -990,7 +989,6 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) rval = 1; goto out_free_sq; } - memset(fcport->sq_pbl, 0, fcport->sq_pbl_size); /* Create PBL */ num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE; @@ -2597,14 +2595,13 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf) } /* Allocate list of PBL pages */ - qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, + qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); if (!qedf->bdq_pbl_list) { QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL " "pages.\n"); return -ENOMEM; } - memset(qedf->bdq_pbl_list, 0, QEDF_PAGE_SIZE); /* * Now populate PBL list with pages that contain pointers to the @@ -2689,7 +2686,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); qedf->global_queues[i]->cq = - dma_alloc_coherent(&qedf->pdev->dev, + dma_zalloc_coherent(&qedf->pdev->dev, qedf->global_queues[i]->cq_mem_size, &qedf->global_queues[i]->cq_dma, GFP_KERNEL); @@ -2699,11 +2696,9 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) status = -ENOMEM; goto mem_alloc_failure; } - memset(qedf->global_queues[i]->cq, 0, - qedf->global_queues[i]->cq_mem_size); qedf->global_queues[i]->cq_pbl = - dma_alloc_coherent(&qedf->pdev->dev, + dma_zalloc_coherent(&qedf->pdev->dev, qedf->global_queues[i]->cq_pbl_size, &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); @@ -2713,8 +2708,6 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) status = -ENOMEM; goto mem_alloc_failure; } - memset(qedf->global_queues[i]->cq_pbl, 0, - qedf->global_queues[i]->cq_pbl_size); /* Create PBL */ num_pages = qedf->global_queues[i]->cq_mem_size / -- cgit From 32eebb317fba6f29dfd3be677b0fc2b63fa08131 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 11 Jun 2017 08:16:04 +0200 Subject: scsi: qedf: Merge a few quoted strings split across lines Merge some quoted strings to improve readability and to save some lines of code. Signed-off-by: Christophe JAILLET Acked-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index c2f1a71a724e..6149ea004fa1 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -975,8 +975,7 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL); if (!fcport->sq) { - QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send " - "queue.\n"); + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); rval = 1; goto out; } @@ -984,8 +983,7 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev, fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); if (!fcport->sq_pbl) { - QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send " - "queue PBL.\n"); + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); rval = 1; goto out_free_sq; } @@ -2598,8 +2596,7 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf) qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); if (!qedf->bdq_pbl_list) { - QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL " - "pages.\n"); + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); return -ENOMEM; } @@ -2691,8 +2688,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) &qedf->global_queues[i]->cq_dma, GFP_KERNEL); if (!qedf->global_queues[i]->cq) { - QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " - "cq.\n"); + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); status = -ENOMEM; goto mem_alloc_failure; } @@ -2703,8 +2699,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); if (!qedf->global_queues[i]->cq_pbl) { - QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " - "cq PBL.\n"); + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); status = -ENOMEM; goto mem_alloc_failure; } @@ -2800,8 +2795,7 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf) cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE); cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe); - memset(&(qedf->pf_params), 0, - sizeof(qedf->pf_params)); + memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params)); /* Setup the value for fcoe PF */ qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS; -- cgit From 7a06dcd3f8b0d8a89be4c0c573ecf1a1d720474e Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:06:55 -0700 Subject: scsi: lpfc: Add nvme initiator devloss support Add nvme initiator devloss support The existing implementation was based on no devloss behavior in the transport (e.g. immediate teardown) so code didn't properly handle delayed nvme rport device unregister calls. In addition, the driver was not correctly cycling the rport port role for each register-unregister-reregister process. This patch does the following: Rework the code to properly handle rport device unregister calls and potential re-allocation of the remoteport structure if the port comes back in under dev_loss_tmo. Correct code that was incorrectly cycling the rport port role for each register-unregister-reregister process. Prep the code to enable calling the nvme_fc transport api to dynamically update dev_loss_tmo when the scsi sysfs interface changes it. Memset the rpinfo structure in the registration call to enforce "accept nvme transport defaults" in the registration call. Driver parameters do influence the dev_loss_tmo transport setting dynamically. Simplifies the register function: the driver was incorrectly searching its local rport list to determine resume or new semantics, which is not valid as the transport already handles this. The rport was resumed if the rport handed back matches the ndlp->nrport pointer. Otherwise, devloss fired and the ndlp's nrport is NULL. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_attr.c | 7 ++- drivers/scsi/lpfc/lpfc_nvme.c | 141 ++++++++++++++++-------------------------- 2 files changed, 57 insertions(+), 91 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index bb2d9e238225..37f258fcf6d4 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -3198,9 +3198,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); - list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) - if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport) + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; + if (ndlp->rport) ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; + } spin_unlock_irq(shost->host_lock); } diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 8008c8205fb6..70675fd7d884 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -186,13 +186,13 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) /* Remove this rport from the lport's list - memory is owned by the * transport. Remove the ndlp reference for the NVME transport before - * calling state machine to remove the node, this is devloss = 0 - * semantics. + * calling state machine to remove the node. */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6146 remoteport delete complete %p\n", remoteport); list_del(&rport->list); + ndlp->nrport = NULL; lpfc_nlp_put(ndlp); rport_err: @@ -1466,7 +1466,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, /* The remote node has to be ready to send an abort. */ if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && - !(ndlp->nlp_type & NLP_NVME_TARGET)) { + (ndlp->nlp_type & NLP_NVME_TARGET)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, "6048 rport %p, DID x%06x not ready for " "IO. State x%x, Type x%x\n", @@ -2340,69 +2340,44 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) localport = vport->localport; lport = (struct lpfc_nvme_lport *)localport->private; - if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) { - - /* The driver isn't expecting the rport wwn to change - * but it might get a different DID on a different - * fabric. + /* NVME rports are not preserved across devloss. + * Just register this instance. Note, rpinfo->dev_loss_tmo + * is left 0 to indicate accept transport defaults. The + * driver communicates port role capabilities consistent + * with the PRLI response data. + */ + memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info)); + rpinfo.port_id = ndlp->nlp_DID; + if (ndlp->nlp_type & NLP_NVME_TARGET) + rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET; + if (ndlp->nlp_type & NLP_NVME_INITIATOR) + rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; + + if (ndlp->nlp_type & NLP_NVME_DISCOVERY) + rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; + + rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); + rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); + ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port); + if (!ret) { + /* If the ndlp already has an nrport, this is just + * a resume of the existing rport. Else this is a + * new rport. */ - list_for_each_entry(rport, &lport->rport_list, list) { - if (rport->remoteport->port_name != - wwn_to_u64(ndlp->nlp_portname.u.wwn)) - continue; - lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, - "6035 lport %p, found matching rport " - "at wwpn 0x%llx, Data: x%x x%x x%x " - "x%06x\n", - lport, - rport->remoteport->port_name, - rport->remoteport->port_id, - rport->remoteport->port_role, + rport = remote_port->private; + if (ndlp->nrport == rport) { + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NVME_DISC, + "6014 Rebinding lport to " + "rport wwpn 0x%llx, " + "Data: x%x x%x x%x x%06x\n", + remote_port->port_name, + remote_port->port_id, + remote_port->port_role, ndlp->nlp_type, ndlp->nlp_DID); - remote_port = rport->remoteport; - if ((remote_port->port_id == 0) && - (remote_port->port_role == - FC_PORT_ROLE_NVME_DISCOVERY)) { - remote_port->port_id = ndlp->nlp_DID; - remote_port->port_role &= - ~FC_PORT_ROLE_NVME_DISCOVERY; - if (ndlp->nlp_type & NLP_NVME_TARGET) - remote_port->port_role |= - FC_PORT_ROLE_NVME_TARGET; - if (ndlp->nlp_type & NLP_NVME_INITIATOR) - remote_port->port_role |= - FC_PORT_ROLE_NVME_INITIATOR; - - lpfc_printf_vlog(ndlp->vport, KERN_INFO, - LOG_NVME_DISC, - "6014 Rebinding lport to " - "rport wwpn 0x%llx, " - "Data: x%x x%x x%x x%06x\n", - remote_port->port_name, - remote_port->port_id, - remote_port->port_role, - ndlp->nlp_type, - ndlp->nlp_DID); - } - return 0; - } - - /* NVME rports are not preserved across devloss. - * Just register this instance. - */ - rpinfo.port_id = ndlp->nlp_DID; - rpinfo.port_role = 0; - if (ndlp->nlp_type & NLP_NVME_TARGET) - rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET; - if (ndlp->nlp_type & NLP_NVME_INITIATOR) - rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; - rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); - rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); - ret = nvme_fc_register_remoteport(localport, &rpinfo, - &remote_port); - if (!ret) { - rport = remote_port->private; + } else { + /* New rport. */ rport->remoteport = remote_port; rport->lport = lport; rport->ndlp = lpfc_nlp_get(ndlp); @@ -2413,26 +2388,22 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_add_tail(&rport->list, &lport->rport_list); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NODE, - "6022 Binding new rport to lport %p " - "Rport WWNN 0x%llx, Rport WWPN 0x%llx " - "DID x%06x Role x%x\n", + "6022 Binding new rport to " + "lport %p Rport WWNN 0x%llx, " + "Rport WWPN 0x%llx DID " + "x%06x Role x%x\n", lport, rpinfo.node_name, rpinfo.port_name, rpinfo.port_id, rpinfo.port_role); - } else { - lpfc_printf_vlog(vport, KERN_ERR, - LOG_NVME_DISC | LOG_NODE, - "6031 RemotePort Registration failed " - "err: %d, DID x%06x\n", - ret, ndlp->nlp_DID); } } else { - ret = -EINVAL; - lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, - "6027 Unknown nlp_type x%x on DID x%06x " - "ndlp %p. Not Registering nvme rport\n", - ndlp->nlp_type, ndlp->nlp_DID, ndlp); + lpfc_printf_vlog(vport, KERN_ERR, + LOG_NVME_DISC | LOG_NODE, + "6031 RemotePort Registration failed " + "err: %d, DID x%06x\n", + ret, ndlp->nlp_DID); } + return ret; #else return 0; @@ -2460,7 +2431,6 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) struct lpfc_nvme_lport *lport; struct lpfc_nvme_rport *rport; struct nvme_fc_remote_port *remoteport; - unsigned long wait_tmo; localport = vport->localport; @@ -2491,6 +2461,10 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) */ if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) { init_completion(&rport->rport_unreg_done); + + /* No concern about the role change on the nvme remoteport. + * The transport will update it. + */ ret = nvme_fc_unregister_remoteport(remoteport); if (ret != 0) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, @@ -2499,17 +2473,6 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ret, remoteport->port_state); } - /* Wait for the driver's delete completion routine to finish - * before proceeding. This guarantees the transport and driver - * have completed the unreg process. - */ - wait_tmo = msecs_to_jiffies(5000); - ret = wait_for_completion_timeout(&rport->rport_unreg_done, - wait_tmo); - if (ret == 0) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, - "6169 Unreg nvme wait timeout\n"); - } } return; -- cgit From 80cc004393619a1b3a17aaf4a9e55c5b9f4fc3c1 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:06:56 -0700 Subject: scsi: lpfc: Fix transition nvme-i rport handling to nport only. As the devloss API was implemented in the nvmei driver, an evaluation of the nvme transport and the lpfc driver showed dual management of the rports. This creates a bug possibility when the thread count and SAN size increases. The nvmei driver code was based on a very early transport and was not revisited until the devloss API was introduced. Remove the listhead in the driver's rport data structure and the listhead in the driver's lport data structure. Remove all rport_list traversal. Convert the driver to use the nrport (nvme rport) pointer that is now NULL or nonNULL depending on a devloss action. Convert debugfs and nvme_info in sysfs to use the fc_nodes list in the vport. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_attr.c | 11 ++++++----- drivers/scsi/lpfc/lpfc_debugfs.c | 10 +++++----- drivers/scsi/lpfc/lpfc_nvme.c | 18 ------------------ drivers/scsi/lpfc/lpfc_nvme.h | 2 -- 4 files changed, 11 insertions(+), 30 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 37f258fcf6d4..6d9b83cd82a2 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -148,8 +148,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = vport->phba; struct lpfc_nvmet_tgtport *tgtp; struct nvme_fc_local_port *localport; - struct lpfc_nvme_lport *lport; - struct lpfc_nvme_rport *rport; + struct lpfc_nodelist *ndlp; struct nvme_fc_remote_port *nrport; char *statep; int len = 0; @@ -265,7 +264,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n"); spin_lock_irq(shost->host_lock); - lport = (struct lpfc_nvme_lport *)localport->private; /* Port state is only one of two values for now. */ if (localport->port_id) @@ -281,9 +279,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, wwn_to_u64(vport->fc_nodename.u.wwn), localport->port_id, statep); - list_for_each_entry(rport, &lport->rport_list, list) { + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (!ndlp->nrport) + continue; + /* local short-hand pointer. */ - nrport = rport->remoteport; + nrport = ndlp->nrport->remoteport; /* Port state is only one of two values for now. */ switch (nrport->port_state) { diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index efd8a17ac2e5..e288e59c967f 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -550,8 +550,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) struct lpfc_nodelist *ndlp; unsigned char *statep; struct nvme_fc_local_port *localport; - struct lpfc_nvme_lport *lport; - struct lpfc_nvme_rport *rport; struct lpfc_nvmet_tgtport *tgtp; struct nvme_fc_remote_port *nrport; @@ -660,7 +658,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) goto out_exit; spin_lock_irq(shost->host_lock); - lport = (struct lpfc_nvme_lport *)localport->private; /* Port state is only one of two values for now. */ if (localport->port_id) @@ -673,9 +670,12 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) localport->port_id, statep); len += snprintf(buf + len, size - len, "\tRport List:\n"); - list_for_each_entry(rport, &lport->rport_list, list) { + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { /* local short-hand pointer. */ - nrport = rport->remoteport; + if (!ndlp->nrport) + continue; + + nrport = ndlp->nrport->remoteport; /* Port state is only one of two values for now. */ switch (nrport->port_state) { diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 70675fd7d884..ede42411e57b 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -191,7 +191,6 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6146 remoteport delete complete %p\n", remoteport); - list_del(&rport->list); ndlp->nrport = NULL; lpfc_nlp_put(ndlp); @@ -2208,7 +2207,6 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) lport = (struct lpfc_nvme_lport *)localport->private; vport->localport = localport; lport->vport = vport; - INIT_LIST_HEAD(&lport->rport_list); vport->nvmei_support = 1; len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max); vport->phba->total_nvme_bufs += len; @@ -2233,7 +2231,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) #if (IS_ENABLED(CONFIG_NVME_FC)) struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; - struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL; int ret; if (vport->nvmei_support == 0) @@ -2246,19 +2243,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6011 Destroying NVME localport %p\n", localport); - list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) { - /* The last node ref has to get released now before the rport - * private memory area is released by the transport. - */ - list_del(&rport->list); - - init_completion(&rport->rport_unreg_done); - ret = nvme_fc_unregister_remoteport(rport->remoteport); - if (ret) - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, - "6008 rport fail destroy %x\n", ret); - wait_for_completion_timeout(&rport->rport_unreg_done, 5); - } /* lport's rport list is clear. Unregister * lport and release resources. @@ -2384,8 +2368,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (!rport->ndlp) return -1; ndlp->nrport = rport; - INIT_LIST_HEAD(&rport->list); - list_add_tail(&rport->list, &lport->rport_list); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NODE, "6022 Binding new rport to " diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index ec32f45daa66..d192bb268f99 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -35,13 +35,11 @@ struct lpfc_nvme_qhandle { /* Declare nvme-based local and remote port definitions. */ struct lpfc_nvme_lport { struct lpfc_vport *vport; - struct list_head rport_list; struct completion lport_unreg_done; /* Add sttats counters here */ }; struct lpfc_nvme_rport { - struct list_head list; struct lpfc_nvme_lport *lport; struct nvme_fc_remote_port *remoteport; struct lpfc_nodelist *ndlp; -- cgit From 7d790f04d7e4759da238cc6c46796f917af4cec2 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:06:57 -0700 Subject: scsi: lpfc: Fix nvme port role handling in sysfs and debugfs handlers. While debugging Devloss and recovery, debugfs and sysfs were found to not show the NVME port roles consistently. The port role FC_PORT_ROLE_NVME_DISCOVERY was added with the devloss bringup and the other issues were just oversight. Add NVME Target and DISCSRVC to debugfs nodeinfo and sysfs nvme info handlers. The full port role was added to the NVME data only not the generic nodelist. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_attr.c | 22 ++++++++++------------ drivers/scsi/lpfc/lpfc_debugfs.c | 32 ++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 26 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 6d9b83cd82a2..200a614bb540 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -312,25 +312,23 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ", nrport->port_id); - switch (nrport->port_role) { - case FC_PORT_ROLE_NVME_INITIATOR: + /* An NVME rport can have multiple roles. */ + if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) len += snprintf(buf + len, PAGE_SIZE - len, "INITIATOR "); - break; - case FC_PORT_ROLE_NVME_TARGET: + if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) len += snprintf(buf + len, PAGE_SIZE - len, "TARGET "); - break; - case FC_PORT_ROLE_NVME_DISCOVERY: + if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) len += snprintf(buf + len, PAGE_SIZE - len, - "DISCOVERY "); - break; - default: + "DISCSRVC "); + if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | + FC_PORT_ROLE_NVME_TARGET | + FC_PORT_ROLE_NVME_DISCOVERY)) len += snprintf(buf + len, PAGE_SIZE - len, - "UNKNOWN_ROLE x%x", + "UNKNOWN ROLE x%x", nrport->port_role); - break; - } + len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep); /* Terminate the string. */ len += snprintf(buf + len, PAGE_SIZE - len, "\n"); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index e288e59c967f..fe3215241c46 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -621,6 +621,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) ndlp->nlp_sid); if (ndlp->nlp_type & NLP_FCP_INITIATOR) len += snprintf(buf+len, size-len, "FCP_INITIATOR "); + if (ndlp->nlp_type & NLP_NVME_TARGET) + len += snprintf(buf + len, + size - len, "NVME_TGT sid:%d ", + NLP_NO_SID); + if (ndlp->nlp_type & NLP_NVME_INITIATOR) + len += snprintf(buf + len, + size - len, "NVME_INITIATOR "); len += snprintf(buf+len, size-len, "usgmap:%x ", ndlp->nlp_usg_map); len += snprintf(buf+len, size-len, "refcnt:%x", @@ -698,26 +705,23 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) nrport->port_name); len += snprintf(buf + len, size - len, "WWNN x%llx ", nrport->node_name); - switch (nrport->port_role) { - case FC_PORT_ROLE_NVME_INITIATOR: + + /* An NVME rport can have multiple roles. */ + if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) len += snprintf(buf + len, size - len, - "NVME INITIATOR "); - break; - case FC_PORT_ROLE_NVME_TARGET: + "INITIATOR "); + if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) len += snprintf(buf + len, size - len, - "NVME TARGET "); - break; - case FC_PORT_ROLE_NVME_DISCOVERY: + "TARGET "); + if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) len += snprintf(buf + len, size - len, - "NVME DISCOVERY "); - break; - default: + "DISCSRVC "); + if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | + FC_PORT_ROLE_NVME_TARGET | + FC_PORT_ROLE_NVME_DISCOVERY)) len += snprintf(buf + len, size - len, "UNKNOWN ROLE x%x", nrport->port_role); - break; - } - /* Terminate the string. */ len += snprintf(buf + len, size - len, "\n"); } -- cgit From ce1b591c5be11fbe053f009837eccf0c633245f0 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:06:58 -0700 Subject: scsi: lpfc: Add changes to assist in NVMET debugging Inconsistent error messages and context state checks Context state sanity checks were not accurate or inconsistent in the code paths. Separated LS context states from FCP. Added and modified context state sanity checks. Use context state to determine if a sol or unsol ABORT is needed. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_nvmet.c | 208 ++++++++++++++++++++++++++++------------- drivers/scsi/lpfc/lpfc_nvmet.h | 14 +-- 2 files changed, 151 insertions(+), 71 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 41ea97fb10ae..e82b9dc733a1 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -112,6 +112,15 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, status = bf_get(lpfc_wcqe_c_status, wcqe); result = wcqe->parameter; + ctxp = cmdwqe->context2; + + if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6410 NVMET LS cmpl state mismatch IO x%x: " + "%d %d\n", + ctxp->oxid, ctxp->state, ctxp->entry_cnt); + } + if (!phba->targetport) goto out; @@ -123,15 +132,14 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, atomic_inc(&tgtp->xmt_ls_rsp_cmpl); out: - ctxp = cmdwqe->context2; rsp = &ctxp->ctx.ls_req; lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n", ctxp->oxid, status, result); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, - "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__, - ctxp, status, result); + "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n", + status, result, ctxp->oxid); lpfc_nlp_put(cmdwqe->context1); cmdwqe->context2 = NULL; @@ -173,6 +181,12 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) ctxp->txrdy = NULL; ctxp->txrdy_phys = 0; } + + if (ctxp->state == LPFC_NVMET_STE_FREE) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6411 NVMET free, already free IO x%x: %d %d\n", + ctxp->oxid, ctxp->state, ctxp->entry_cnt); + } ctxp->state = LPFC_NVMET_STE_FREE; spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); @@ -580,8 +594,17 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, int rc; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, - "6023 %s: Entrypoint ctx %p %p\n", __func__, - ctxp, tgtport); + "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid); + + if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) || + (ctxp->entry_cnt != 1)) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6412 NVMET LS rsp state mismatch " + "oxid x%x: %d %d\n", + ctxp->oxid, ctxp->state, ctxp->entry_cnt); + } + ctxp->state = LPFC_NVMET_STE_LS_RSP; + ctxp->entry_cnt++; nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma, rsp->rsplen); @@ -751,15 +774,14 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, unsigned long flags; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6103 Abort op: oxri x%x flg x%x cnt %d\n", - ctxp->oxid, ctxp->flag, ctxp->entry_cnt); + "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n", + ctxp->oxid, ctxp->flag, ctxp->state); - lpfc_nvmeio_data(phba, "NVMET FCP ABRT: " - "xri x%x flg x%x cnt x%x\n", - ctxp->oxid, ctxp->flag, ctxp->entry_cnt); + lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n", + ctxp->oxid, ctxp->flag, ctxp->state); atomic_inc(&lpfc_nvmep->xmt_fcp_abort); - ctxp->entry_cnt++; + spin_lock_irqsave(&ctxp->ctxlock, flags); /* Since iaab/iaar are NOT set, we need to check @@ -770,12 +792,17 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, return; } ctxp->flag |= LPFC_NVMET_ABORT_OP; - if (ctxp->flag & LPFC_NVMET_IO_INP) - lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, - ctxp->oxid); - else + + /* An state of LPFC_NVMET_STE_RCV means we have just received + * the NVME command and have not started processing it. + * (by issuing any IO WQEs on this exchange yet) + */ + if (ctxp->state == LPFC_NVMET_STE_RCV) lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); + else + lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); spin_unlock_irqrestore(&ctxp->ctxlock, flags); } @@ -790,6 +817,13 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, unsigned long flags; bool aborting = false; + if (ctxp->state != LPFC_NVMET_STE_DONE && + ctxp->state != LPFC_NVMET_STE_ABORT) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6413 NVMET release bad state %d %d oxid x%x\n", + ctxp->state, ctxp->entry_cnt, ctxp->oxid); + } + spin_lock_irqsave(&ctxp->ctxlock, flags); if ((ctxp->flag & LPFC_NVMET_ABORT_OP) || (ctxp->flag & LPFC_NVMET_XBUSY)) { @@ -891,6 +925,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) return -ENOMEM; } ctx_buf->context->ctxbuf = ctx_buf; + ctx_buf->context->state = LPFC_NVMET_STE_FREE; ctx_buf->iocbq = lpfc_sli_get_iocbq(phba); if (!ctx_buf->iocbq) { @@ -1103,7 +1138,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, } lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6318 XB aborted %x flg x%x (%x)\n", + "6318 XB aborted oxid %x flg x%x (%x)\n", ctxp->oxid, ctxp->flag, released); if (released) lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); @@ -1253,7 +1288,8 @@ dropit: ctxp->oxid = oxid; ctxp->sid = sid; ctxp->wqeq = NULL; - ctxp->state = LPFC_NVMET_STE_RCV; + ctxp->state = LPFC_NVMET_STE_LS_RCV; + ctxp->entry_cnt = 1; ctxp->rqb_buffer = (void *)nvmebuf; lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", @@ -1268,8 +1304,8 @@ dropit: payload, size); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, - "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x " - "%08x %08x %08x\n", __func__, ctxp, size, rc, + "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x " + "%08x %08x %08x\n", size, rc, *payload, *(payload+1), *(payload+2), *(payload+3), *(payload+4), *(payload+5)); @@ -1383,6 +1419,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, sid = sli4_sid_from_fc_hdr(fc_hdr); ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; + if (ctxp->state != LPFC_NVMET_STE_FREE) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6414 NVMET Context corrupt %d %d oxid x%x\n", + ctxp->state, ctxp->entry_cnt, ctxp->oxid); + } memset(ctxp, 0, sizeof(ctxp->ctx)); ctxp->wqeq = NULL; ctxp->txrdy = NULL; @@ -1547,9 +1588,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, if (!lpfc_is_link_up(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, - "6104 lpfc_nvmet_prep_ls_wqe: link err: " - "NPORT x%x oxid:x%x\n", - ctxp->sid, ctxp->oxid); + "6104 NVMET prep LS wqe: link err: " + "NPORT x%x oxid:x%x ste %d\n", + ctxp->sid, ctxp->oxid, ctxp->state); return NULL; } @@ -1557,9 +1598,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, nvmewqe = lpfc_sli_get_iocbq(phba); if (nvmewqe == NULL) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, - "6105 lpfc_nvmet_prep_ls_wqe: No WQE: " - "NPORT x%x oxid:x%x\n", - ctxp->sid, ctxp->oxid); + "6105 NVMET prep LS wqe: No WQE: " + "NPORT x%x oxid x%x ste %d\n", + ctxp->sid, ctxp->oxid, ctxp->state); return NULL; } @@ -1568,9 +1609,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, - "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: " - "NPORT x%x oxid:x%x\n", - ctxp->sid, ctxp->oxid); + "6106 NVMET prep LS wqe: No ndlp: " + "NPORT x%x oxid x%x ste %d\n", + ctxp->sid, ctxp->oxid, ctxp->state); goto nvme_wqe_free_wqeq_exit; } ctxp->wqeq = nvmewqe; @@ -1642,9 +1683,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; nvmewqe->iocb_flag |= LPFC_IO_NVME_LS; - /* Xmit NVME response to remote NPORT */ + /* Xmit NVMET response to remote NPORT */ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, - "6039 Xmit NVME LS response to remote " + "6039 Xmit NVMET LS response to remote " "NPORT x%x iotag:x%x oxid:x%x size:x%x\n", ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid, rspsize); @@ -1676,9 +1717,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, if (!lpfc_is_link_up(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6107 lpfc_nvmet_prep_fcp_wqe: link err:" - "NPORT x%x oxid:x%x\n", ctxp->sid, - ctxp->oxid); + "6107 NVMET prep FCP wqe: link err:" + "NPORT x%x oxid x%x ste %d\n", + ctxp->sid, ctxp->oxid, ctxp->state); return NULL; } @@ -1687,17 +1728,18 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: " - "NPORT x%x oxid:x%x\n", - ctxp->sid, ctxp->oxid); + "6108 NVMET prep FCP wqe: no ndlp: " + "NPORT x%x oxid x%x ste %d\n", + ctxp->sid, ctxp->oxid, ctxp->state); return NULL; } if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: " - "NPORT x%x oxid:x%x cnt %d\n", - ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt); + "6109 NVMET prep FCP wqe: seg cnt err: " + "NPORT x%x oxid x%x ste %d cnt %d\n", + ctxp->sid, ctxp->oxid, ctxp->state, + phba->cfg_nvme_seg_cnt); return NULL; } @@ -1708,9 +1750,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, nvmewqe = ctxp->ctxbuf->iocbq; if (nvmewqe == NULL) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6110 lpfc_nvmet_prep_fcp_wqe: No " - "WQE: NPORT x%x oxid:x%x\n", - ctxp->sid, ctxp->oxid); + "6110 NVMET prep FCP wqe: No " + "WQE: NPORT x%x oxid x%x ste %d\n", + ctxp->sid, ctxp->oxid, ctxp->state); return NULL; } ctxp->wqeq = nvmewqe; @@ -1722,13 +1764,12 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* Sanity check */ if (((ctxp->state == LPFC_NVMET_STE_RCV) && (ctxp->entry_cnt == 1)) || - ((ctxp->state == LPFC_NVMET_STE_DATA) && - (ctxp->entry_cnt > 1))) { + (ctxp->state == LPFC_NVMET_STE_DATA)) { wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; } else { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6111 Wrong state %s: %d cnt %d\n", - __func__, ctxp->state, ctxp->entry_cnt); + "6111 Wrong state NVMET FCP: %d cnt %d\n", + ctxp->state, ctxp->entry_cnt); return NULL; } @@ -1832,7 +1873,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0); bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); } - ctxp->state = LPFC_NVMET_STE_DATA; break; case NVMET_FCOP_WRITEDATA: @@ -1923,7 +1963,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = 0; sgl++; - ctxp->state = LPFC_NVMET_STE_DATA; atomic_inc(&tgtp->xmt_fcp_write); break; @@ -1980,7 +2019,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP); bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); - ctxp->state = LPFC_NVMET_STE_RSP; if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { /* Good response - all zero's on wire */ @@ -2029,6 +2067,8 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, sgl++; ctxp->offset += cnt; } + ctxp->state = LPFC_NVMET_STE_DATA; + ctxp->entry_cnt++; return nvmewqe; } @@ -2206,17 +2246,32 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, atomic_inc(&tgtp->xmt_ls_abort_cmpl); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", + "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n", ctxp, wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); - if (ctxp) { - cmdwqe->context2 = NULL; - cmdwqe->context3 = NULL; - lpfc_sli_release_iocbq(phba, cmdwqe); - kfree(ctxp); - } else + if (!ctxp) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + "6415 NVMET LS Abort No ctx: WCQE: " + "%08x %08x %08x %08x\n", + wcqe->word0, wcqe->total_data_placed, + result, wcqe->word3); + lpfc_sli_release_iocbq(phba, cmdwqe); + return; + } + + if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6416 NVMET LS abort cmpl state mismatch: " + "oxid x%x: %d %d\n", + ctxp->oxid, ctxp->state, ctxp->entry_cnt); + } + + cmdwqe->context2 = NULL; + cmdwqe->context3 = NULL; + lpfc_sli_release_iocbq(phba, cmdwqe); + kfree(ctxp); } static int @@ -2240,7 +2295,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, "6134 Drop ABTS - wrong NDLP state x%x.\n", (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); @@ -2250,7 +2305,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, abts_wqeq = ctxp->wqeq; wqe_abts = &abts_wqeq->wqe; - ctxp->state = LPFC_NVMET_STE_ABORT; /* * Since we zero the whole WQE, we need to ensure we set the WQE fields @@ -2338,7 +2392,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, "6160 Drop ABORT - wrong NDLP state x%x.\n", (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); @@ -2351,7 +2405,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); if (!ctxp->abort_wqeq) { atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, "6161 ABORT failed: No wqeqs: " "xri: x%x\n", ctxp->oxid); /* No failure to an ABTS request. */ @@ -2471,6 +2525,15 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, ctxp->wqeq->hba_wqidx = 0; } + if (ctxp->state == LPFC_NVMET_STE_FREE) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6417 NVMET ABORT ctx freed %d %d oxid x%x\n", + ctxp->state, ctxp->entry_cnt, ctxp->oxid); + rc = WQE_BUSY; + goto aerr; + } + ctxp->state = LPFC_NVMET_STE_ABORT; + ctxp->entry_cnt++; rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); if (rc == 0) goto aerr; @@ -2490,7 +2553,7 @@ aerr: atomic_inc(&tgtp->xmt_abort_rsp_error); ctxp->flag &= ~LPFC_NVMET_ABORT_OP; atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", ctxp->oxid, rc); return 1; @@ -2507,12 +2570,24 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, unsigned long flags; int rc; + if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) || + (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) { + ctxp->state = LPFC_NVMET_STE_LS_ABORT; + ctxp->entry_cnt++; + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6418 NVMET LS abort state mismatch " + "IO x%x: %d %d\n", + ctxp->oxid, ctxp->state, ctxp->entry_cnt); + ctxp->state = LPFC_NVMET_STE_LS_ABORT; + } + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; if (!ctxp->wqeq) { /* Issue ABTS for this WQE based on iotag */ ctxp->wqeq = lpfc_sli_get_iocbq(phba); if (!ctxp->wqeq) { - lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, "6068 Abort failed: No wqeqs: " "xri: x%x\n", xri); /* No failure to an ABTS request. */ @@ -2523,7 +2598,10 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, abts_wqeq = ctxp->wqeq; wqe_abts = &abts_wqeq->wqe; - lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); + if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) { + rc = WQE_BUSY; + goto out; + } spin_lock_irqsave(&phba->hbalock, flags); abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; @@ -2535,13 +2613,13 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, atomic_inc(&tgtp->xmt_abort_unsol); return 0; } - +out: atomic_inc(&tgtp->xmt_abort_rsp_error); abts_wqeq->context2 = NULL; abts_wqeq->context3 = NULL; lpfc_sli_release_iocbq(phba, abts_wqeq); kfree(ctxp); - lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, "6056 Failed to Issue ABTS. Status x%x\n", rc); return 0; } diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 6eb2f5d8d4ed..e675ef17be08 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -93,12 +93,14 @@ struct lpfc_nvmet_rcv_ctx { uint16_t cpu; uint16_t state; /* States */ -#define LPFC_NVMET_STE_FREE 0 -#define LPFC_NVMET_STE_RCV 1 -#define LPFC_NVMET_STE_DATA 2 -#define LPFC_NVMET_STE_ABORT 3 -#define LPFC_NVMET_STE_RSP 4 -#define LPFC_NVMET_STE_DONE 5 +#define LPFC_NVMET_STE_LS_RCV 1 +#define LPFC_NVMET_STE_LS_ABORT 2 +#define LPFC_NVMET_STE_LS_RSP 3 +#define LPFC_NVMET_STE_RCV 4 +#define LPFC_NVMET_STE_DATA 5 +#define LPFC_NVMET_STE_ABORT 6 +#define LPFC_NVMET_STE_DONE 7 +#define LPFC_NVMET_STE_FREE 0xff uint16_t flag; #define LPFC_NVMET_IO_INP 0x1 /* IO is in progress on exchange */ #define LPFC_NVMET_ABORT_OP 0x2 /* Abort WQE issued on exchange */ -- cgit From 92721c3b97126a712af221635407251e42e58b60 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:06:59 -0700 Subject: scsi: lpfc: Fix Lun Priority level shown as NA Lun Priority level shown as NA Remote port is not getting registered for nameserver and fdmi. Due to which dfc SendCTPassThru cmd is failing. Made changes to register the remote port for both. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_hbadisc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 3ffcd9215ca8..055fedd761ea 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -4182,8 +4182,10 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (new_state == NLP_STE_MAPPED_NODE || new_state == NLP_STE_UNMAPPED_NODE) { - if ((ndlp->nlp_fc4_type & NLP_FC4_FCP) || - (ndlp->nlp_DID == Fabric_DID)) { + if (ndlp->nlp_fc4_type & NLP_FC4_FCP || + ndlp->nlp_DID == Fabric_DID || + ndlp->nlp_DID == NameServer_DID || + ndlp->nlp_DID == FDMI_DID) { vport->phba->nport_event_cnt++; /* * Tell the fc transport about the port, if we haven't -- cgit From 6599e12428a8b3b182e91818cb7b73ab29e7daff Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:07:00 -0700 Subject: scsi: lpfc: Fix nvmet node ref count handling When unloading the driver, the NVMET driver would wait the full 30 seconds for its UNMAPPED initiator node to get removed before continuing with the unload process. NVMEI worked correctly. For each rport put into UNMAPPED or MAPPED state by NVMET, the driver puts a reference on the NDLP. The difference is that NVMEI has a unregister call for its rports and the extra reference is removed in the unregister process. For NVMET, the driver has to remove the reference explicitly when dropping out of UNMAPPED or MAPPED because there is no unregister call. Add a call to lpfc_nlp_put on the ndlp when NVMET and the old state was UNMAPPED or MAPPED. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_hbadisc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 055fedd761ea..db2d0e692ddf 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -4167,14 +4167,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_unregister_remote_port(ndlp); } - /* Notify the NVME transport of this rport's loss on the - * Initiator. For NVME Target, should upcall transport - * in the else clause when API available. - */ if (ndlp->nlp_fc4_type & NLP_FC4_NVME) { vport->phba->nport_event_cnt++; if (vport->phba->nvmet_support == 0) + /* Start devloss */ lpfc_nvme_unregister_port(vport, ndlp); + else + /* NVMET has no upcall. */ + lpfc_nlp_put(ndlp); } } -- cgit From 14041bd170c28f652a91f6ac343acbabfdf1ef4b Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:07:01 -0700 Subject: scsi: lpfc: Fix Port going offline after multiple resets. Observing lpfc port down after issuing hbacmd reset command Failure in posting SGL buffers. If there is only one SGL buffer and rrq is valid for its XRI, we are rightly returning NULL but not adding the buffer back to the SGL list. So, number of buffers become less than total count and repost fails during reset. Add SGL buffer back to list before returning NULL. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_sli.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index d6b184839bc2..e81fa7d4deb5 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -968,6 +968,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); if (sglq == start_sglq) { + list_add_tail(&sglq->list, lpfc_els_sgl_list); sglq = NULL; break; } else -- cgit From 2cee7808004b33bd5dc75fccd8d145b5e208ef93 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:07:02 -0700 Subject: scsi: lpfc: Fix counters so outstandng NVME IO count is accurate NVME FC counters don't reflect actual results Since counters are not atomic, or protected by a lock, the values often get screwed up. Make them atomic, like NVMET. Fix up sysfs and debugfs display accordingly Added Outstanding IOs to stats display Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc.h | 20 ++++++++++---------- drivers/scsi/lpfc/lpfc_attr.c | 32 +++++++++++++++++++++----------- drivers/scsi/lpfc/lpfc_debugfs.c | 30 +++++++++++++++++++++--------- drivers/scsi/lpfc/lpfc_init.c | 10 ++++++++++ drivers/scsi/lpfc/lpfc_nvme.c | 19 +++++++++++++------ drivers/scsi/lpfc/lpfc_scsi.c | 19 ++++++++++++++----- 6 files changed, 89 insertions(+), 41 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index f2c0ba6ced78..a9d73728a68c 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -913,16 +913,16 @@ struct lpfc_hba { /* * stat counters */ - uint64_t fc4ScsiInputRequests; - uint64_t fc4ScsiOutputRequests; - uint64_t fc4ScsiControlRequests; - uint64_t fc4ScsiIoCmpls; - uint64_t fc4NvmeInputRequests; - uint64_t fc4NvmeOutputRequests; - uint64_t fc4NvmeControlRequests; - uint64_t fc4NvmeIoCmpls; - uint64_t fc4NvmeLsRequests; - uint64_t fc4NvmeLsCmpls; + atomic_t fc4ScsiInputRequests; + atomic_t fc4ScsiOutputRequests; + atomic_t fc4ScsiControlRequests; + atomic_t fc4ScsiIoCmpls; + atomic_t fc4NvmeInputRequests; + atomic_t fc4NvmeOutputRequests; + atomic_t fc4NvmeControlRequests; + atomic_t fc4NvmeIoCmpls; + atomic_t fc4NvmeLsRequests; + atomic_t fc4NvmeLsCmpls; uint64_t bg_guard_err_cnt; uint64_t bg_apptag_err_cnt; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 200a614bb540..eb33473cbc62 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -150,6 +150,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, struct nvme_fc_local_port *localport; struct lpfc_nodelist *ndlp; struct nvme_fc_remote_port *nrport; + uint64_t data1, data2, data3, tot; char *statep; int len = 0; @@ -244,11 +245,18 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&tgtp->xmt_abort_rsp), atomic_read(&tgtp->xmt_abort_rsp_error)); + spin_lock(&phba->sli4_hba.nvmet_io_lock); + tot = phba->sli4_hba.nvmet_xri_cnt - + phba->sli4_hba.nvmet_ctx_cnt; + spin_unlock(&phba->sli4_hba.nvmet_io_lock); + len += snprintf(buf + len, PAGE_SIZE - len, - "IO_CTX: %08x outstanding %08x total %x", + "IO_CTX: %08x WAIT: cur %08x tot %08x\n" + "CTX Outstanding %08llx\n", phba->sli4_hba.nvmet_ctx_cnt, phba->sli4_hba.nvmet_io_wait_cnt, - phba->sli4_hba.nvmet_io_wait_total); + phba->sli4_hba.nvmet_io_wait_total, + tot); len += snprintf(buf+len, PAGE_SIZE-len, "\n"); return len; @@ -337,19 +345,21 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n"); len += snprintf(buf+len, PAGE_SIZE-len, - "LS: Xmt %016llx Cmpl %016llx\n", - phba->fc4NvmeLsRequests, - phba->fc4NvmeLsCmpls); - + "LS: Xmt %016x Cmpl %016x\n", + atomic_read(&phba->fc4NvmeLsRequests), + atomic_read(&phba->fc4NvmeLsCmpls)); + + tot = atomic_read(&phba->fc4NvmeIoCmpls); + data1 = atomic_read(&phba->fc4NvmeInputRequests); + data2 = atomic_read(&phba->fc4NvmeOutputRequests); + data3 = atomic_read(&phba->fc4NvmeControlRequests); len += snprintf(buf+len, PAGE_SIZE-len, "FCP: Rd %016llx Wr %016llx IO %016llx\n", - phba->fc4NvmeInputRequests, - phba->fc4NvmeOutputRequests, - phba->fc4NvmeControlRequests); + data1, data2, data3); len += snprintf(buf+len, PAGE_SIZE-len, - " Cmpl %016llx\n", phba->fc4NvmeIoCmpls); - + " Cmpl %016llx Outstanding %016llx\n", + tot, (data1 + data2 + data3) - tot); return len; } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index fe3215241c46..bd45c50ddcc2 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -750,6 +750,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) struct lpfc_hba *phba = vport->phba; struct lpfc_nvmet_tgtport *tgtp; struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; + uint64_t tot, data1, data2, data3; int len = 0; int cnt; @@ -847,11 +848,18 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); } + spin_lock(&phba->sli4_hba.nvmet_io_lock); + tot = phba->sli4_hba.nvmet_xri_cnt - + phba->sli4_hba.nvmet_ctx_cnt; + spin_unlock(&phba->sli4_hba.nvmet_io_lock); + len += snprintf(buf + len, size - len, - "IO_CTX: %08x outstanding %08x total %08x\n", + "IO_CTX: %08x WAIT: cur %08x tot %08x\n" + "CTX Outstanding %08llx\n", phba->sli4_hba.nvmet_ctx_cnt, phba->sli4_hba.nvmet_io_wait_cnt, - phba->sli4_hba.nvmet_io_wait_total); + phba->sli4_hba.nvmet_io_wait_total, + tot); } else { if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) return len; @@ -860,18 +868,22 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) "\nNVME Lport Statistics\n"); len += snprintf(buf + len, size - len, - "LS: Xmt %016llx Cmpl %016llx\n", - phba->fc4NvmeLsRequests, - phba->fc4NvmeLsCmpls); + "LS: Xmt %016x Cmpl %016x\n", + atomic_read(&phba->fc4NvmeLsRequests), + atomic_read(&phba->fc4NvmeLsCmpls)); + + tot = atomic_read(&phba->fc4NvmeIoCmpls); + data1 = atomic_read(&phba->fc4NvmeInputRequests); + data2 = atomic_read(&phba->fc4NvmeOutputRequests); + data3 = atomic_read(&phba->fc4NvmeControlRequests); len += snprintf(buf + len, size - len, "FCP: Rd %016llx Wr %016llx IO %016llx\n", - phba->fc4NvmeInputRequests, - phba->fc4NvmeOutputRequests, - phba->fc4NvmeControlRequests); + data1, data2, data3); len += snprintf(buf + len, size - len, - " Cmpl %016llx\n", phba->fc4NvmeIoCmpls); + " Cmpl %016llx Outstanding %016llx\n", + tot, (data1 + data2 + data3) - tot); } return len; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 9add9473cae5..3064f0768033 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -6731,6 +6731,16 @@ lpfc_create_shost(struct lpfc_hba *phba) phba->fc_arbtov = FF_DEF_ARBTOV; atomic_set(&phba->sdev_cnt, 0); + atomic_set(&phba->fc4ScsiInputRequests, 0); + atomic_set(&phba->fc4ScsiOutputRequests, 0); + atomic_set(&phba->fc4ScsiControlRequests, 0); + atomic_set(&phba->fc4ScsiIoCmpls, 0); + atomic_set(&phba->fc4NvmeInputRequests, 0); + atomic_set(&phba->fc4NvmeOutputRequests, 0); + atomic_set(&phba->fc4NvmeControlRequests, 0); + atomic_set(&phba->fc4NvmeIoCmpls, 0); + atomic_set(&phba->fc4NvmeLsRequests, 0); + atomic_set(&phba->fc4NvmeLsCmpls, 0); vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); if (!vport) return -ENODEV; diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index ede42411e57b..8206aa5493e3 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -211,7 +211,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_dmabuf *buf_ptr; struct lpfc_nodelist *ndlp; - vport->phba->fc4NvmeLsCmpls++; + atomic_inc(&vport->phba->fc4NvmeLsCmpls); pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2; status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; @@ -478,7 +478,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, &pnvme_lsreq->rspdma); - vport->phba->fc4NvmeLsRequests++; + atomic_inc(&vport->phba->fc4NvmeLsRequests); /* Hardcode the wait to 30 seconds. Connections are failing otherwise. * This code allows it all to work. @@ -773,7 +773,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, wcqe); return; } - phba->fc4NvmeIoCmpls++; + atomic_inc(&phba->fc4NvmeIoCmpls); nCmd = lpfc_ncmd->nvmeCmd; rport = lpfc_ncmd->nrport; @@ -998,7 +998,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_WRITE_CMD); - phba->fc4NvmeOutputRequests++; + atomic_inc(&phba->fc4NvmeOutputRequests); } else { /* Word 7 */ bf_set(wqe_cmnd, &wqe->generic.wqe_com, @@ -1019,7 +1019,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD); - phba->fc4NvmeInputRequests++; + atomic_inc(&phba->fc4NvmeInputRequests); } } else { /* Word 4 */ @@ -1040,7 +1040,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, /* Word 11 */ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD); - phba->fc4NvmeControlRequests++; + atomic_inc(&phba->fc4NvmeControlRequests); } /* * Finish initializing those WQE fields that are independent @@ -1361,6 +1361,13 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, return 0; out_free_nvme_buf: + if (lpfc_ncmd->nvmeCmd->sg_cnt) { + if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE) + atomic_dec(&phba->fc4NvmeOutputRequests); + else + atomic_dec(&phba->fc4NvmeInputRequests); + } else + atomic_dec(&phba->fc4NvmeControlRequests); lpfc_release_nvme_buf(phba, lpfc_ncmd); out_fail: return ret; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 54fd0c81ceaf..cfe1d01eb73f 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -3931,7 +3931,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct Scsi_Host *shost; uint32_t logit = LOG_FCP; - phba->fc4ScsiIoCmpls++; + atomic_inc(&phba->fc4ScsiIoCmpls); /* Sanity check on return of outstanding command */ cmd = lpfc_cmd->pCmd; @@ -4250,19 +4250,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, vport->cfg_first_burst_size; } fcp_cmnd->fcpCntl3 = WRITE_DATA; - phba->fc4ScsiOutputRequests++; + atomic_inc(&phba->fc4ScsiOutputRequests); } else { iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; fcp_cmnd->fcpCntl3 = READ_DATA; - phba->fc4ScsiInputRequests++; + atomic_inc(&phba->fc4ScsiInputRequests); } } else { iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; iocb_cmd->un.fcpi.fcpi_parm = 0; iocb_cmd->ulpPU = 0; fcp_cmnd->fcpCntl3 = 0; - phba->fc4ScsiControlRequests++; + atomic_inc(&phba->fc4ScsiControlRequests); } if (phba->sli_rev == 3 && !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) @@ -4640,7 +4640,16 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) (uint32_t) (cmnd->request->timeout / 1000)); - + switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { + case WRITE_DATA: + atomic_dec(&phba->fc4ScsiOutputRequests); + break; + case READ_DATA: + atomic_dec(&phba->fc4ScsiInputRequests); + break; + default: + atomic_dec(&phba->fc4ScsiControlRequests); + } goto out_host_busy_free_buf; } if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { -- cgit From 522dceeb62ded1a7b538d2f1f61cc69a1402537d Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:07:03 -0700 Subject: scsi: lpfc: Fix return value of board_mode store routine in case of online failure On hbacmd reset failure, observing wrong string "nline" in kernel log. On failure, non negative value (1) is returned from sysfs store routine. It is interpreted as count by kernel and store routine is called again with the remaining characters as input. Fix: Return negative error code (-EIO) in case of failure. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_attr.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index eb33473cbc62..8eee39de15f7 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1351,6 +1351,8 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, goto board_mode_out; } wait_for_completion(&online_compl); + if (status) + status = -EIO; } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) -- cgit From ecbb227e635a61f751e8c8ad1c585a0c4ed11de1 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:07:04 -0700 Subject: scsi: lpfc: Fix crash on powering off BFS VM with passthrough device Null pointer dereference when BFS VM is powered off The driver incorrectly uses sli3_ring on SLI-4 adapters Use the correct ring structure based on sli_rev Signed-off-by: Dick Kennedy Signed-off-by: James Smart Tested-by: Raphael Silva Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_sli.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index e81fa7d4deb5..c4ceef69bd6b 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -10951,6 +10951,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *iocbq; struct lpfc_iocbq *abtsiocb; + struct lpfc_sli_ring *pring_s4; IOCB_t *cmd = NULL; int errcnt = 0, ret_val = 0; int i; @@ -11004,8 +11005,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, /* Setup callback routine and issue the command. */ abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; - ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, - abtsiocb, 0); + if (phba->sli_rev == LPFC_SLI_REV4) { + pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); + if (!pring_s4) + continue; + ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno, + abtsiocb, 0); + } else + ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, + abtsiocb, 0); if (ret_val == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); errcnt++; -- cgit From b83d005e63ba2383738c1818691e973761d1e860 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:07:05 -0700 Subject: scsi: lpfc: Fix System panic after loading the driver System panic with general protection fault during driver load The driver uses a static array sli4_hba.handler_name to store the irq handler names. If the io_channel_irqs exceeds the pre-allocated size (32+1), then the driver will overwrite other fields of sli4_hba. Fix: Dynamically allocate handler_name. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_init.c | 11 ++++++----- drivers/scsi/lpfc/lpfc_sli4.h | 4 ++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 3064f0768033..a825806036c3 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -9665,6 +9665,7 @@ static int lpfc_sli4_enable_msix(struct lpfc_hba *phba) { int vectors, rc, index; + char *name; /* Set up MSI-X multi-message vectors */ vectors = phba->io_channel_irqs; @@ -9683,9 +9684,9 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) /* Assign MSI-X vectors to interrupt handlers */ for (index = 0; index < vectors; index++) { - memset(&phba->sli4_hba.handler_name[index], 0, 16); - snprintf((char *)&phba->sli4_hba.handler_name[index], - LPFC_SLI4_HANDLER_NAME_SZ, + name = phba->sli4_hba.hba_eq_hdl[index].handler_name; + memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); + snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, LPFC_DRIVER_HANDLER_NAME"%d", index); phba->sli4_hba.hba_eq_hdl[index].idx = index; @@ -9694,12 +9695,12 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) if (phba->cfg_fof && (index == (vectors - 1))) rc = request_irq(pci_irq_vector(phba->pcidev, index), &lpfc_sli4_fof_intr_handler, 0, - (char *)&phba->sli4_hba.handler_name[index], + name, &phba->sli4_hba.hba_eq_hdl[index]); else rc = request_irq(pci_irq_vector(phba->pcidev, index), &lpfc_sli4_hba_intr_handler, 0, - (char *)&phba->sli4_hba.handler_name[index], + name, &phba->sli4_hba.hba_eq_hdl[index]); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index cf863db27700..28b75e08e044 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -407,8 +407,10 @@ struct lpfc_max_cfg_param { struct lpfc_hba; /* SLI4 HBA multi-fcp queue handler struct */ +#define LPFC_SLI4_HANDLER_NAME_SZ 16 struct lpfc_hba_eq_hdl { uint32_t idx; + char handler_name[LPFC_SLI4_HANDLER_NAME_SZ]; struct lpfc_hba *phba; atomic_t hba_eq_in_use; struct cpumask *cpumask; @@ -480,7 +482,6 @@ struct lpfc_sli4_lnk_info { #define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \ LPFC_FOF_IO_CHAN_NUM) -#define LPFC_SLI4_HANDLER_NAME_SZ 16 /* Used for IRQ vector to CPU mapping */ struct lpfc_vector_map_info { @@ -548,7 +549,6 @@ struct lpfc_sli4_hba { uint32_t ue_to_rp; struct lpfc_register sli_intf; struct lpfc_pc_sli4_params pc_sli4_params; - uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ]; struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */ /* Pointers to the constructed SLI4 queues */ -- cgit From e92974f6cdaad36691b74045817b585a7b76988b Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:07:06 -0700 Subject: scsi: lpfc: Null pointer dereference when log_verbose is set to 0xffffffff Kernel panic when log_verbose is set to 0xffffffff phba->pport is dereferenced before it is initialized Fix: Do not dereference phba->pport if it is NULL Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_sli.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index c4ceef69bd6b..fb4c708ae747 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -7513,7 +7513,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, "(%d):0308 Mbox cmd issue - BUSY Data: " "x%x x%x x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0xffffff, - mbx->mbxCommand, phba->pport->port_state, + mbx->mbxCommand, + phba->pport ? phba->pport->port_state : 0xff, psli->sli_flag, flag); psli->slistat.mbox_busy++; @@ -7565,7 +7566,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " "x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, - mbx->mbxCommand, phba->pport->port_state, + mbx->mbxCommand, + phba->pport ? phba->pport->port_state : 0xff, psli->sli_flag, flag); if (mbx->mbxCommand != MBX_HEARTBEAT) { -- cgit From dea37e82fa3423a6c8d1325d44e68b6d03892453 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:07:07 -0700 Subject: scsi: lpfc: Fix PRLI retry handling when target rejects it. The nvmet driver was rejecting the initiator's PRLI because its reg_rpi for the PLOGI was still outstanding. The initiator would resend the PRLI without delay and get the same answer. The PRLI retries would exhaust causing the nvme initiator to set the nvmet ndlp to UNMAPPED. The driver's lpfc_els_retry handler did not have a policy for an LS_RJT with explanation CMD_IN_PROGRESS for PRLI or NVME_PRLI. This caused the delay to remain at 0 but retry set 1. Fix: When the ELS response is LS_RJT, TPC and the command was PRLI or NVME_PRLI, just set the delay to 1000 mS to get a 1 second delay on the PRLI retry. This was enough to allow the REG_RPI to complete at the target. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_els.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 8e532b39ae93..a140318d6159 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3332,6 +3332,19 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, */ switch (stat.un.b.lsRjtRsnCode) { case LSRJT_UNABLE_TPC: + /* The driver has a VALID PLOGI but the rport has + * rejected the PRLI - can't do it now. Delay + * for 1 second and try again - don't care about + * the explanation. + */ + if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) { + delay = 1000; + maxretry = lpfc_max_els_tries + 1; + retry = 1; + break; + } + + /* Legacy bug fix code for targets with PLOGI delays. */ if (stat.un.b.lsRjtRsnCodeExp == LSEXP_CMD_IN_PROGRESS) { if (cmd == ELS_CMD_PLOGI) { @@ -3350,9 +3363,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, retry = 1; break; } - if ((cmd == ELS_CMD_PLOGI) || - (cmd == ELS_CMD_PRLI) || - (cmd == ELS_CMD_NVMEPRLI)) { + if (cmd == ELS_CMD_PLOGI) { delay = 1000; maxretry = lpfc_max_els_tries + 1; retry = 1; -- cgit From b57ab7469d2643ef1ff1fc5d82f5db22af60fc46 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:07:08 -0700 Subject: scsi: lpfc: Fix vports not logging into target vports cannot login to target. For vports, lpfc_nodelist is allocated for targets only on completion of GFF_ID command. Driver checks if lpfc_nodelist exists for target before sending GFF_ID. So, GFF_ID and PLOGI are not sent. As mentioned by the comment in lpfc_prep_node_fc4type() routine, do not send GFF_ID only if this NPortID is previously identified as FCP target. Send GFF_ID if it is a newly identified remote port from GID_FT response. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_ct.c | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 24ce96dcc94d..9c0c1463057d 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -503,26 +503,23 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) Did, vport->fc_flag, vport->fc_rscn_id_cnt); /* - * This NPortID was previously a FCP target, + * This NPortID was previously a FCP/NVMe target, * Don't even bother to send GFF_ID. */ ndlp = lpfc_findnode_did(vport, Did); - if (ndlp && NLP_CHK_NODE_ACT(ndlp)) - ndlp->nlp_fc4_type = fc4_type; - - if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { - ndlp->nlp_fc4_type = fc4_type; - - if (ndlp->nlp_type & NLP_FCP_TARGET) - lpfc_setup_disc_node(vport, Did); - - else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, - 0, Did) == 0) - vport->num_disc_nodes++; - - else - lpfc_setup_disc_node(vport, Did); - } + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && + (ndlp->nlp_type & + (NLP_FCP_TARGET | NLP_NVME_TARGET))) { + if (fc4_type == FC_TYPE_FCP) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + if (fc4_type == FC_TYPE_NVME) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + lpfc_setup_disc_node(vport, Did); + } else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, + 0, Did) == 0) + vport->num_disc_nodes++; + else + lpfc_setup_disc_node(vport, Did); } else { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d", -- cgit From 78e1d2009f1c539a23f470728e530a3ce1d527e6 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:07:09 -0700 Subject: scsi: lpfc: Fix defects reported by Coverity Scan Addressed the following reported defects: ** CID 1411552: Control flow issues (MISSING_BREAK) /drivers/scsi/lpfc/lpfc_sli.c: 13259 in lpfc_sli4_nvmet_handle_rcqe() ** CID 1411553: Memory - illegal accesses (OVERRUN) /drivers/scsi/lpfc/lpfc_sli.c: 16218 in lpfc_fc_frame_check() ** CID 1411553: Memory - illegal accesses (OVERRUN) Overrunning array "lpfc_rctl_names" of 202 8-byte elements at element index 244 (byte offset 1952) using index "fc_hdr->fh_r_ctl" (which evaluates to 244). ** CID 1411554: Null pointer dereferences (REVERSE_INULL) /drivers/scsi/lpfc/lpfc_nvmet.c: 2131 in lpfc_nvmet_unsol_fcp_abort_cmp() ** CID 1411555: Memory - illegal accesses (UNINIT) /drivers/scsi/lpfc/lpfc_nvmet.c: 180 in lpfc_nvmet_ctxbuf_post() Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_nvmet.c | 10 ++++------ drivers/scsi/lpfc/lpfc_sli.c | 24 +++++++++--------------- 2 files changed, 13 insertions(+), 21 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index e82b9dc733a1..dba1bd216be3 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -170,7 +170,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; struct rqb_dmabuf *nvmebuf; - struct lpfc_dmabuf *hbufp; uint32_t *payload; uint32_t size, oxid, sid, rc; unsigned long iflag; @@ -191,7 +190,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); if (phba->sli4_hba.nvmet_io_wait_cnt) { - hbufp = &nvmebuf->hbuf; list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list, nvmebuf, struct rqb_dmabuf, hbuf.list); @@ -2164,10 +2162,6 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, status = bf_get(lpfc_wcqe_c_status, wcqe); result = wcqe->parameter; - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - if (ctxp->flag & LPFC_NVMET_ABORT_OP) - atomic_inc(&tgtp->xmt_fcp_abort_cmpl); - if (!ctxp) { /* if context is clear, related io alrady complete */ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, @@ -2177,6 +2171,10 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, return; } + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (ctxp->flag & LPFC_NVMET_ABORT_OP) + atomic_inc(&tgtp->xmt_fcp_abort_cmpl); + /* Sanity check */ if (ctxp->state != LPFC_NVMET_STE_ABORT) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index fb4c708ae747..f60c9e3e37d7 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -13267,6 +13267,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6126 Receive Frame Truncated!!\n"); + /* Drop thru */ case FC_STATUS_RQ_SUCCESS: lpfc_sli4_rq_release(hrq, drq); spin_lock_irqsave(&phba->hbalock, iflags); @@ -16137,9 +16138,6 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, return rc; } -static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT; -static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT; - /** * lpfc_fc_frame_check - Check that this frame is a valid frame to handle * @phba: pointer to lpfc_hba struct that the frame was received on @@ -16214,22 +16212,18 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) } lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2538 Received frame rctl:%s (x%x), type:%s (x%x), " + "2538 Received frame rctl:x%x, type:x%x, " "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", - (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" : - lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, - (fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ? - "Vendor Unique" : lpfc_type_names[fc_hdr->fh_type], - fc_hdr->fh_type, be32_to_cpu(header[0]), - be32_to_cpu(header[1]), be32_to_cpu(header[2]), - be32_to_cpu(header[3]), be32_to_cpu(header[4]), - be32_to_cpu(header[5]), be32_to_cpu(header[6])); + fc_hdr->fh_r_ctl, fc_hdr->fh_type, + be32_to_cpu(header[0]), be32_to_cpu(header[1]), + be32_to_cpu(header[2]), be32_to_cpu(header[3]), + be32_to_cpu(header[4]), be32_to_cpu(header[5]), + be32_to_cpu(header[6])); return 0; drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, - "2539 Dropped frame rctl:%s type:%s\n", - lpfc_rctl_names[fc_hdr->fh_r_ctl], - lpfc_type_names[fc_hdr->fh_type]); + "2539 Dropped frame rctl:x%x type:x%x\n", + fc_hdr->fh_r_ctl, fc_hdr->fh_type); return 1; } -- cgit From 0cf07f84dd32639394084b9d6794424587a38789 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:07:10 -0700 Subject: scsi: lpfc: Add auto EQ delay logic Administrator intervention is currently required to get good numbers when switching from running latency tests to IOPS tests. The configured interrupt coalescing values will greatly effect the results of these tests. Currently, the driver has a single coalescing value set by values of the module attribute. This patch changes the driver to support auto-configuration of the coalescing value based on the total number of outstanding IOs and average number of CQEs processed per interrupt for an EQ. Values are checked every 5 seconds. The driver defaults to the automatic selection. Automatic selection can be disabled by the new lpfc_auto_imax module_parameter. Older hardware can only change interrupt coalescing by mailbox command. Newer hardware supports change via a register. The patch support both. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc.h | 3 ++ drivers/scsi/lpfc/lpfc_attr.c | 20 +++++++- drivers/scsi/lpfc/lpfc_debugfs.c | 4 +- drivers/scsi/lpfc/lpfc_hw4.h | 14 ++++++ drivers/scsi/lpfc/lpfc_init.c | 104 ++++++++++++++++++++++++++++++++++++++- drivers/scsi/lpfc/lpfc_sli.c | 36 +++++++++++--- drivers/scsi/lpfc/lpfc_sli.h | 1 + drivers/scsi/lpfc/lpfc_sli4.h | 8 +-- 8 files changed, 177 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index a9d73728a68c..562dc0139735 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -756,6 +756,7 @@ struct lpfc_hba { uint8_t nvmet_support; /* driver supports NVMET */ #define LPFC_NVMET_MAX_PORTS 32 uint8_t mds_diags_support; + uint32_t initial_imax; /* HBA Config Parameters */ uint32_t cfg_ack0; @@ -777,6 +778,7 @@ struct lpfc_hba { uint32_t cfg_poll_tmo; uint32_t cfg_task_mgmt_tmo; uint32_t cfg_use_msi; + uint32_t cfg_auto_imax; uint32_t cfg_fcp_imax; uint32_t cfg_fcp_cpu_map; uint32_t cfg_fcp_io_channel; @@ -1050,6 +1052,7 @@ struct lpfc_hba { uint8_t temp_sensor_support; /* Fields used for heart beat. */ + unsigned long last_eqdelay_time; unsigned long last_completion_time; unsigned long skipped_hb; struct timer_list hb_tmofunc; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 8eee39de15f7..66269e342c7e 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -4481,9 +4481,11 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, return -EINVAL; phba->cfg_fcp_imax = (uint32_t)val; + phba->initial_imax = phba->cfg_fcp_imax; for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT) - lpfc_modify_hba_eq_delay(phba, i); + lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT, + val); return strlen(buf); } @@ -4538,6 +4540,16 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR, lpfc_fcp_imax_show, lpfc_fcp_imax_store); +/* + * lpfc_auto_imax: Controls Auto-interrupt coalescing values support. + * 0 No auto_imax support + * 1 auto imax on + * Auto imax will change the value of fcp_imax on a per EQ basis, using + * the EQ Delay Multiplier, depending on the activity for that EQ. + * Value range [0,1]. Default value is 1. + */ +LPFC_ATTR_RW(auto_imax, 1, 0, 1, "Enable Auto imax"); + /** * lpfc_state_show - Display current driver CPU affinity * @dev: class converted to a Scsi_host structure. @@ -5164,6 +5176,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_task_mgmt_tmo, &dev_attr_lpfc_use_msi, &dev_attr_lpfc_nvme_oas, + &dev_attr_lpfc_auto_imax, &dev_attr_lpfc_fcp_imax, &dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_io_channel, @@ -6182,6 +6195,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_nvme_oas_init(phba, lpfc_nvme_oas); + lpfc_auto_imax_init(phba, lpfc_auto_imax); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); @@ -6226,6 +6240,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP; } + if (phba->cfg_auto_imax && !phba->cfg_fcp_imax) + phba->cfg_auto_imax = 0; + phba->initial_imax = phba->cfg_fcp_imax; + /* A value of 0 means use the number of CPUs found in the system */ if (phba->cfg_fcp_io_channel == 0) phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index bd45c50ddcc2..cc49850e18a9 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -3265,9 +3265,9 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype, len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n%s EQ info: EQ-STAT[max:x%x noE:x%x " - "bs:x%x proc:x%llx]\n", + "bs:x%x proc:x%llx eqd %d]\n", eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, - (unsigned long long)qp->q_cnt_4); + (unsigned long long)qp->q_cnt_4, qp->q_mode); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index e0a5fce416ae..bb4715705fa3 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -197,6 +197,7 @@ struct lpfc_sli_intf { /* Delay Multiplier constant */ #define LPFC_DMULT_CONST 651042 +#define LPFC_DMULT_MAX 1023 /* Configuration of Interrupts / sec for entire HBA port */ #define LPFC_MIN_IMAX 5000 @@ -657,6 +658,15 @@ struct lpfc_register { #define LPFC_CTL_PORT_ER1_OFFSET 0x40C #define LPFC_CTL_PORT_ER2_OFFSET 0x410 +#define LPFC_CTL_PORT_EQ_DELAY_OFFSET 0x418 +#define lpfc_sliport_eqdelay_delay_SHIFT 16 +#define lpfc_sliport_eqdelay_delay_MASK 0xffff +#define lpfc_sliport_eqdelay_delay_WORD word0 +#define lpfc_sliport_eqdelay_id_SHIFT 0 +#define lpfc_sliport_eqdelay_id_MASK 0xfff +#define lpfc_sliport_eqdelay_id_WORD word0 +#define LPFC_SEC_TO_USEC 1000000 + /* The following Registers apply to SLI4 if_type 0 UCNAs. They typically * reside in BAR 2. */ @@ -3258,6 +3268,10 @@ struct lpfc_sli4_parameters { #define cfg_xib_SHIFT 4 #define cfg_xib_MASK 0x00000001 #define cfg_xib_WORD word19 +#define cfg_eqdr_SHIFT 8 +#define cfg_eqdr_MASK 0x00000001 +#define cfg_eqdr_WORD word19 +#define LPFC_NODELAY_MAX_IO 32 }; #define LPFC_SET_UE_RECOVERY 0x10 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a825806036c3..9d3a12636455 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1249,6 +1249,12 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) int retval, i; struct lpfc_sli *psli = &phba->sli; LIST_HEAD(completions); + struct lpfc_queue *qp; + unsigned long time_elapsed; + uint32_t tick_cqe, max_cqe, val; + uint64_t tot, data1, data2, data3; + struct lpfc_register reg_data; + void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr; vports = lpfc_create_vport_work_array(phba); if (vports != NULL) @@ -1263,6 +1269,95 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) (phba->pport->fc_flag & FC_OFFLINE_MODE)) return; + if (phba->cfg_auto_imax) { + if (!phba->last_eqdelay_time) { + phba->last_eqdelay_time = jiffies; + goto skip_eqdelay; + } + time_elapsed = jiffies - phba->last_eqdelay_time; + phba->last_eqdelay_time = jiffies; + + tot = 0xffff; + /* Check outstanding IO count */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + if (phba->nvmet_support) { + spin_lock(&phba->sli4_hba.nvmet_io_lock); + tot = phba->sli4_hba.nvmet_xri_cnt - + phba->sli4_hba.nvmet_ctx_cnt; + spin_unlock(&phba->sli4_hba.nvmet_io_lock); + } else { + tot = atomic_read(&phba->fc4NvmeIoCmpls); + data1 = atomic_read( + &phba->fc4NvmeInputRequests); + data2 = atomic_read( + &phba->fc4NvmeOutputRequests); + data3 = atomic_read( + &phba->fc4NvmeControlRequests); + tot = (data1 + data2 + data3) - tot; + } + } + + /* Interrupts per sec per EQ */ + val = phba->cfg_fcp_imax / phba->io_channel_irqs; + tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */ + + /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */ + max_cqe = time_elapsed * tick_cqe; + + for (i = 0; i < phba->io_channel_irqs; i++) { + /* Fast-path EQ */ + qp = phba->sli4_hba.hba_eq[i]; + if (!qp) + continue; + + /* Use no EQ delay if we don't have many outstanding + * IOs, or if we are only processing 1 CQE/ISR or less. + * Otherwise, assume we can process up to lpfc_fcp_imax + * interrupts per HBA. + */ + if (tot < LPFC_NODELAY_MAX_IO || + qp->EQ_cqe_cnt <= max_cqe) + val = 0; + else + val = phba->cfg_fcp_imax; + + if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { + /* Use EQ Delay Register method */ + + /* Convert for EQ Delay register */ + if (val) { + /* First, interrupts per sec per EQ */ + val = phba->cfg_fcp_imax / + phba->io_channel_irqs; + + /* us delay between each interrupt */ + val = LPFC_SEC_TO_USEC / val; + } + if (val != qp->q_mode) { + reg_data.word0 = 0; + bf_set(lpfc_sliport_eqdelay_id, + ®_data, qp->queue_id); + bf_set(lpfc_sliport_eqdelay_delay, + ®_data, val); + writel(reg_data.word0, eqdreg); + } + } else { + /* Use mbox command method */ + if (val != qp->q_mode) + lpfc_modify_hba_eq_delay(phba, i, + 1, val); + } + + /* + * val is cfg_fcp_imax or 0 for mbox delay or us delay + * between interrupts for EQDR. + */ + qp->q_mode = val; + qp->EQ_cqe_cnt = 0; + } + } + +skip_eqdelay: spin_lock_irq(&phba->pport->work_port_lock); if (time_after(phba->last_completion_time + @@ -7257,6 +7352,9 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; break; case LPFC_SLI_INTF_IF_TYPE_2: + phba->sli4_hba.u.if_type2.EQDregaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_EQ_DELAY_OFFSET; phba->sli4_hba.u.if_type2.ERR1regaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER1_OFFSET; @@ -8783,7 +8881,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) } for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) - lpfc_modify_hba_eq_delay(phba, qidx); + lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, + phba->cfg_fcp_imax); return 0; @@ -10252,6 +10351,9 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp) phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; + if (bf_get(cfg_eqdr, mbx_sli4_parameters)) + phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; + /* Make sure that sge_supp_len can be handled by the driver */ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index f60c9e3e37d7..040575adf9c6 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -13478,6 +13478,7 @@ process_cq: /* Track the max number of CQEs processed in 1 EQ */ if (ecount > cq->CQ_max_cqe) cq->CQ_max_cqe = ecount; + cq->assoc_qp->EQ_cqe_cnt += ecount; /* Catch the no cq entry condition */ if (unlikely(ecount == 0)) @@ -13569,6 +13570,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) /* Track the max number of CQEs processed in 1 EQ */ if (ecount > cq->CQ_max_cqe) cq->CQ_max_cqe = ecount; + cq->assoc_qp->EQ_cqe_cnt += ecount; /* Catch the no cq entry condition */ if (unlikely(ecount == 0)) @@ -13629,7 +13631,6 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id) /* Check device state for handling interrupt */ if (unlikely(lpfc_intr_state_check(phba))) { - eq->EQ_badstate++; /* Check again for link_state with lock held */ spin_lock_irqsave(&phba->hbalock, iflag); if (phba->link_state < LPFC_LINK_DOWN) @@ -13741,7 +13742,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) /* Check device state for handling interrupt */ if (unlikely(lpfc_intr_state_check(phba))) { - fpeq->EQ_badstate++; /* Check again for link_state with lock held */ spin_lock_irqsave(&phba->hbalock, iflag); if (phba->link_state < LPFC_LINK_DOWN) @@ -14000,14 +14000,15 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) * fails this function will return -ENXIO. **/ int -lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq) +lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, + uint32_t numq, uint32_t imax) { struct lpfc_mbx_modify_eq_delay *eq_delay; LPFC_MBOXQ_t *mbox; struct lpfc_queue *eq; int cnt, rc, length, status = 0; uint32_t shdr_status, shdr_add_status; - uint32_t result; + uint32_t result, val; int qidx; union lpfc_sli4_cfg_shdr *shdr; uint16_t dmult; @@ -14026,22 +14027,45 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq) eq_delay = &mbox->u.mqe.un.eq_delay; /* Calculate delay multiper from maximum interrupt per second */ - result = phba->cfg_fcp_imax / phba->io_channel_irqs; + result = imax / phba->io_channel_irqs; if (result > LPFC_DMULT_CONST || result == 0) dmult = 0; else dmult = LPFC_DMULT_CONST/result - 1; + if (dmult > LPFC_DMULT_MAX) + dmult = LPFC_DMULT_MAX; cnt = 0; for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) { eq = phba->sli4_hba.hba_eq[qidx]; if (!eq) continue; + eq->q_mode = imax; eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; eq_delay->u.request.eq[cnt].phase = 0; eq_delay->u.request.eq[cnt].delay_multi = dmult; cnt++; - if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT) + + /* q_mode is only used for auto_imax */ + if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { + /* Use EQ Delay Register method for q_mode */ + + /* Convert for EQ Delay register */ + val = phba->cfg_fcp_imax; + if (val) { + /* First, interrupts per sec per EQ */ + val = phba->cfg_fcp_imax / + phba->io_channel_irqs; + + /* us delay between each interrupt */ + val = LPFC_SEC_TO_USEC / val; + } + eq->q_mode = val; + } else { + eq->q_mode = imax; + } + + if (cnt >= numq) break; } eq_delay->u.request.num_eq = cnt; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 9085306ddd78..a3b1b5145d2b 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -321,6 +321,7 @@ struct lpfc_sli { #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ #define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ #define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */ +#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */ struct lpfc_sli_ring *sli3_ring; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 28b75e08e044..830dc83b9c21 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -168,7 +168,7 @@ struct lpfc_queue { struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */ struct lpfc_rqb *rqbp; /* ptr to RQ buffers */ - uint16_t sgl_list_cnt; + uint32_t q_mode; uint16_t db_format; #define LPFC_DB_RING_FORMAT 0x01 #define LPFC_DB_LIST_FORMAT 0x02 @@ -181,7 +181,7 @@ struct lpfc_queue { /* defines for EQ stats */ #define EQ_max_eqe q_cnt_1 #define EQ_no_entry q_cnt_2 -#define EQ_badstate q_cnt_3 +#define EQ_cqe_cnt q_cnt_3 #define EQ_processed q_cnt_4 /* defines for CQ stats */ @@ -523,6 +523,7 @@ struct lpfc_sli4_hba { #define SLIPORT_ERR2_REG_FAILURE_CQ 0x4 #define SLIPORT_ERR2_REG_FAILURE_BUS 0x5 #define SLIPORT_ERR2_REG_FAILURE_RQ 0x6 + void __iomem *EQDregaddr; } if_type2; } u; @@ -755,7 +756,8 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, uint32_t); void lpfc_sli4_queue_free(struct lpfc_queue *); int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t); -int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq); +int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, + uint32_t numq, uint32_t imax); int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t, uint32_t); int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, -- cgit From 1080f7ec612569e47b747189b38e4b0aa9f14013 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 1 Jun 2017 21:07:11 -0700 Subject: scsi: lpfc: update to revision to 11.4.0.0 Set lpfc driver revision to 11.4.0.0 Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c2653244221c..067c9e8a4b2d 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "11.2.0.14" +#define LPFC_DRIVER_VERSION "11.4.0.0" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ -- cgit From f64a6988268aae866bb6ce6edb910d454ccef331 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:11 +0800 Subject: scsi: hisi_sas: fix timeout check in hisi_sas_internal_task_abort() We need to check for timeout before task status, or the task will be mistook as completed internal abort command. Also add protection for sas_task.task_state_flags in hisi_sas_tmf_timedout(). Signed-off-by: Xiang Chen Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_main.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index f720d3ced851..3605d28a2c60 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -691,8 +691,13 @@ static void hisi_sas_task_done(struct sas_task *task) static void hisi_sas_tmf_timedout(unsigned long data) { struct sas_task *task = (struct sas_task *)data; + unsigned long flags; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + spin_unlock_irqrestore(&task->task_state_lock, flags); - task->task_state_flags |= SAS_TASK_STATE_ABORTED; complete(&task->slow_task->completion); } @@ -1247,6 +1252,17 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, wait_for_completion(&task->slow_task->completion); res = TMF_RESP_FUNC_FAILED; + /* Internal abort timed out */ + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + struct hisi_sas_slot *slot = task->lldd_task; + + if (slot) + slot->task = NULL; + dev_err(dev, "internal task abort: timeout.\n"); + } + } + if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { res = TMF_RESP_FUNC_COMPLETE; @@ -1259,13 +1275,6 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, goto exit; } - /* Internal abort timed out */ - if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { - if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { - dev_err(dev, "internal task abort: timeout.\n"); - } - } - exit: dev_dbg(dev, "internal task abort: task to dev %016llx task=%p " "resp: 0x%x sts 0x%x\n", -- cgit From ad6048325c7807818c6c49e485660143d97a622e Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 14 Jun 2017 23:33:12 +0800 Subject: scsi: hisi_sas: define hisi_sas_device.device_id as int Currently hisi_sas_device.device_id is a u64. This can create a problem in selecting the queue for a device, in that this code does a 64b division on device id. For some 32b systems, 64b division is slow and the lib reference must be explicitly included. The device id does not need to be 64b in size, so, as a solution, just make as an int. Also, struct hisi_sas_device elements are re-ordered to improve packing efficiency. Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas.h | 8 ++++---- drivers/scsi/hisi_sas/hisi_sas_main.c | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 4e28f32e90b0..b4e96fa90bc2 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -107,15 +107,15 @@ struct hisi_sas_dq { }; struct hisi_sas_device { - enum sas_device_type dev_type; struct hisi_hba *hisi_hba; struct domain_device *sas_device; + struct list_head list; u64 attached_phy; - u64 device_id; atomic64_t running_req; - struct list_head list; - u8 dev_status; + enum sas_device_type dev_type; + int device_id; int sata_idx; + u8 dev_status; }; struct hisi_sas_slot { diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 3605d28a2c60..54e0cf270c99 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -209,7 +209,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, if (DEV_IS_GONE(sas_dev)) { if (sas_dev) - dev_info(dev, "task prep: device %llu not ready\n", + dev_info(dev, "task prep: device %d not ready\n", sas_dev->device_id); else dev_info(dev, "task prep: device %016llx not ready\n", @@ -627,9 +627,9 @@ static void hisi_sas_dev_gone(struct domain_device *device) struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = &hisi_hba->pdev->dev; - u64 dev_id = sas_dev->device_id; + int dev_id = sas_dev->device_id; - dev_info(dev, "found dev[%lld:%x] is gone\n", + dev_info(dev, "found dev[%d:%x] is gone\n", sas_dev->device_id, sas_dev->dev_type); hisi_sas_internal_task_abort(hisi_hba, device, @@ -1082,7 +1082,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) } out: if (rc != TMF_RESP_FUNC_COMPLETE) - dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n", + dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", sas_dev->device_id, rc); return rc; } @@ -1129,7 +1129,7 @@ static int hisi_sas_query_task(struct sas_task *task) } static int -hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id, +hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, struct sas_task *task, int abort_flag, int task_tag) { -- cgit From b1a49412f0aed757e7632f9276acdf2fb8f3832e Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:13 +0800 Subject: scsi: hisi_sas: optimise the usage of hisi_hba.lock Currently hisi_hba.lock is locked to deliver and receive a command to/from any hw queue. This causes much contention at high data-rates. To boost performance, lock on a per queue basis for sending and receiving commands to/from hw. Certain critical regions still need to be locked in the delivery and completion stages with hisi_hba.lock. New element hisi_sas_device.dq is added to store the delivery queue for a device, so it does not need to be needlessly re-calculated for every task. Signed-off-by: Xiang Chen Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas.h | 9 +++-- drivers/scsi/hisi_sas/hisi_sas_main.c | 69 +++++++++++++++++++++++----------- drivers/scsi/hisi_sas/hisi_sas_v1_hw.c | 23 ++++-------- drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 34 ++++++++--------- 4 files changed, 77 insertions(+), 58 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index b4e96fa90bc2..68ba7bd229e8 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -102,6 +102,8 @@ struct hisi_sas_cq { struct hisi_sas_dq { struct hisi_hba *hisi_hba; + struct hisi_sas_slot *slot_prep; + spinlock_t lock; int wr_point; int id; }; @@ -109,6 +111,7 @@ struct hisi_sas_dq { struct hisi_sas_device { struct hisi_hba *hisi_hba; struct domain_device *sas_device; + struct hisi_sas_dq *dq; struct list_head list; u64 attached_phy; atomic64_t running_req; @@ -154,9 +157,8 @@ struct hisi_sas_hw { struct domain_device *device); struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); - int (*get_free_slot)(struct hisi_hba *hisi_hba, u32 dev_id, - int *q, int *s); - void (*start_delivery)(struct hisi_hba *hisi_hba); + int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq); + void (*start_delivery)(struct hisi_sas_dq *dq); int (*prep_ssp)(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, int is_tmf, struct hisi_sas_tmf_task *tmf); @@ -217,7 +219,6 @@ struct hisi_hba { struct hisi_sas_port port[HISI_SAS_MAX_PHYS]; int queue_count; - struct hisi_sas_slot *slot_prep; struct dma_pool *sge_page_pool; struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES]; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 54e0cf270c99..4e78cbcd0cf2 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -179,10 +179,11 @@ out: task->task_done(task); } -static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, - int is_tmf, struct hisi_sas_tmf_task *tmf, - int *pass) +static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq + *dq, int is_tmf, struct hisi_sas_tmf_task *tmf, + int *pass) { + struct hisi_hba *hisi_hba = dq->hisi_hba; struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_port *port; @@ -240,18 +241,24 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, } else n_elem = task->num_scatter; + spin_lock_irqsave(&hisi_hba->lock, flags); if (hisi_hba->hw->slot_index_alloc) rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx, device); else rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); - if (rc) + if (rc) { + spin_unlock_irqrestore(&hisi_hba->lock, flags); goto err_out; - rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id, - &dlvry_queue, &dlvry_queue_slot); + } + spin_unlock_irqrestore(&hisi_hba->lock, flags); + + rc = hisi_hba->hw->get_free_slot(hisi_hba, dq); if (rc) goto err_out_tag; + dlvry_queue = dq->id; + dlvry_queue_slot = dq->wr_point; slot = &hisi_hba->slot_info[slot_idx]; memset(slot, 0, sizeof(struct hisi_sas_slot)); @@ -316,7 +323,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, task->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&task->task_state_lock, flags); - hisi_hba->slot_prep = slot; + dq->slot_prep = slot; atomic64_inc(&sas_dev->running_req); ++(*pass); @@ -335,7 +342,9 @@ err_out_status_buf: err_out_slot_buf: /* Nothing to be done */ err_out_tag: + spin_lock_irqsave(&hisi_hba->lock, flags); hisi_sas_slot_index_free(hisi_hba, slot_idx); + spin_unlock_irqrestore(&hisi_hba->lock, flags); err_out: dev_err(dev, "task prep: failed[%d]!\n", rc); if (!sas_protocol_ata(task->task_proto)) @@ -354,19 +363,22 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, unsigned long flags; struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); struct device *dev = &hisi_hba->pdev->dev; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_dq *dq = sas_dev->dq; if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))) return -EINVAL; /* protect task_prep and start_delivery sequence */ - spin_lock_irqsave(&hisi_hba->lock, flags); - rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass); + spin_lock_irqsave(&dq->lock, flags); + rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass); if (rc) dev_err(dev, "task exec: failed[%d]!\n", rc); if (likely(pass)) - hisi_hba->hw->start_delivery(hisi_hba); - spin_unlock_irqrestore(&hisi_hba->lock, flags); + hisi_hba->hw->start_delivery(dq); + spin_unlock_irqrestore(&dq->lock, flags); return rc; } @@ -421,12 +433,16 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) spin_lock(&hisi_hba->lock); for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { + int queue = i % hisi_hba->queue_count; + struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; + hisi_hba->devices[i].device_id = i; sas_dev = &hisi_hba->devices[i]; sas_dev->dev_status = HISI_SAS_DEV_NORMAL; sas_dev->dev_type = device->dev_type; sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; + sas_dev->dq = dq; INIT_LIST_HEAD(&hisi_hba->devices[i].list); break; } @@ -1140,8 +1156,9 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, struct hisi_sas_slot *slot; struct asd_sas_port *sas_port = device->port; struct hisi_sas_cmd_hdr *cmd_hdr_base; + struct hisi_sas_dq *dq = sas_dev->dq; int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; - unsigned long flags; + unsigned long flags, flags_dq; if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))) return -EINVAL; @@ -1152,14 +1169,22 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, port = to_hisi_sas_port(sas_port); /* simply get a slot and send abort command */ + spin_lock_irqsave(&hisi_hba->lock, flags); rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); - if (rc) + if (rc) { + spin_unlock_irqrestore(&hisi_hba->lock, flags); goto err_out; - rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id, - &dlvry_queue, &dlvry_queue_slot); + } + spin_unlock_irqrestore(&hisi_hba->lock, flags); + + spin_lock_irqsave(&dq->lock, flags_dq); + rc = hisi_hba->hw->get_free_slot(hisi_hba, dq); if (rc) goto err_out_tag; + dlvry_queue = dq->id; + dlvry_queue_slot = dq->wr_point; + slot = &hisi_hba->slot_info[slot_idx]; memset(slot, 0, sizeof(struct hisi_sas_slot)); @@ -1186,17 +1211,21 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, task->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&task->task_state_lock, flags); - hisi_hba->slot_prep = slot; + dq->slot_prep = slot; atomic64_inc(&sas_dev->running_req); - /* send abort command to our chip */ - hisi_hba->hw->start_delivery(hisi_hba); + /* send abort command to the chip */ + hisi_hba->hw->start_delivery(dq); + spin_unlock_irqrestore(&dq->lock, flags_dq); return 0; err_out_tag: + spin_lock_irqsave(&hisi_hba->lock, flags); hisi_sas_slot_index_free(hisi_hba, slot_idx); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock_irqrestore(&dq->lock, flags_dq); err_out: dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); @@ -1221,7 +1250,6 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev = device->lldd_dev; struct device *dev = &hisi_hba->pdev->dev; int res; - unsigned long flags; if (!hisi_hba->hw->prep_abort) return -EOPNOTSUPP; @@ -1238,11 +1266,8 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110); add_timer(&task->slow_task->timer); - /* Lock as we are alloc'ing a slot, which cannot be interrupted */ - spin_lock_irqsave(&hisi_hba->lock, flags); res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, task, abort_flag, tag); - spin_unlock_irqrestore(&hisi_hba->lock, flags); if (res) { del_timer(&task->slow_task->timer); dev_err(dev, "internal task abort: executing internal task failed: %d\n", diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index fc1c1b2c1a19..7d7d2a7e690d 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -900,22 +900,17 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id) return bitmap; } -/** - * This function allocates across all queues to load balance. - * Slots are allocated from queues in a round-robin fashion. - * +/* * The callpath to this function and upto writing the write * queue pointer should be safe from interruption. */ -static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id, - int *q, int *s) +static int +get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) { struct device *dev = &hisi_hba->pdev->dev; - struct hisi_sas_dq *dq; + int queue = dq->id; u32 r, w; - int queue = dev_id % hisi_hba->queue_count; - dq = &hisi_hba->dq[queue]; w = dq->wr_point; r = hisi_sas_read32_relaxed(hisi_hba, DLVRY_Q_0_RD_PTR + (queue * 0x14)); @@ -924,16 +919,14 @@ static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id, return -EAGAIN; } - *q = queue; - *s = w; return 0; } -static void start_delivery_v1_hw(struct hisi_hba *hisi_hba) +static void start_delivery_v1_hw(struct hisi_sas_dq *dq) { - int dlvry_queue = hisi_hba->slot_prep->dlvry_queue; - int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot; - struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue]; + struct hisi_hba *hisi_hba = dq->hisi_hba; + int dlvry_queue = dq->slot_prep->dlvry_queue; + int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index e241921bee10..2607aac00ac9 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -695,6 +695,9 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) if (sata_dev && (i & 1)) continue; if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { + int queue = i % hisi_hba->queue_count; + struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; + hisi_hba->devices[i].device_id = i; sas_dev = &hisi_hba->devices[i]; sas_dev->dev_status = HISI_SAS_DEV_NORMAL; @@ -702,6 +705,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; sas_dev->sata_idx = sata_idx; + sas_dev->dq = dq; INIT_LIST_HEAD(&hisi_hba->devices[i].list); break; } @@ -1454,22 +1458,17 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) return bitmap; } -/** - * This function allocates across all queues to load balance. - * Slots are allocated from queues in a round-robin fashion. - * +/* * The callpath to this function and upto writing the write * queue pointer should be safe from interruption. */ -static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id, - int *q, int *s) +static int +get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) { struct device *dev = &hisi_hba->pdev->dev; - struct hisi_sas_dq *dq; + int queue = dq->id; u32 r, w; - int queue = dev_id % hisi_hba->queue_count; - dq = &hisi_hba->dq[queue]; w = dq->wr_point; r = hisi_sas_read32_relaxed(hisi_hba, DLVRY_Q_0_RD_PTR + (queue * 0x14)); @@ -1479,16 +1478,14 @@ static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id, return -EAGAIN; } - *q = queue; - *s = w; return 0; } -static void start_delivery_v2_hw(struct hisi_hba *hisi_hba) +static void start_delivery_v2_hw(struct hisi_sas_dq *dq) { - int dlvry_queue = hisi_hba->slot_prep->dlvry_queue; - int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot; - struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue]; + struct hisi_hba *hisi_hba = dq->hisi_hba; + int dlvry_queue = dq->slot_prep->dlvry_queue; + int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), @@ -2344,7 +2341,9 @@ out: spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); + spin_lock_irqsave(&hisi_hba->lock, flags); hisi_sas_slot_task_free(hisi_hba, task, slot); + spin_unlock_irqrestore(&hisi_hba->lock, flags); sts = ts->stat; if (task->task_done) @@ -3162,13 +3161,14 @@ static void cq_tasklet_v2_hw(unsigned long val) struct hisi_sas_complete_v2_hdr *complete_queue; u32 rd_point = cq->rd_point, wr_point, dev_id; int queue = cq->id; + struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; if (unlikely(hisi_hba->reject_stp_links_msk)) phys_try_accept_stp_links_v2_hw(hisi_hba); complete_queue = hisi_hba->complete_hdr[queue]; - spin_lock(&hisi_hba->lock); + spin_lock(&dq->lock); wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + (0x14 * queue)); @@ -3218,7 +3218,7 @@ static void cq_tasklet_v2_hw(unsigned long val) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); - spin_unlock(&hisi_hba->lock); + spin_unlock(&dq->lock); } static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) -- cgit From 6c7bb8a1942a2a11b77f208910fc57047c62c77b Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:14 +0800 Subject: scsi: hisi_sas: relocate get_ata_protocol() Relocate get_ata_protocol() to a common location, as future hw versions will require it. Also rename with "hisi_sas_" prefix for consistency. Signed-off-by: Xiang Chen Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas.h | 7 ++++ drivers/scsi/hisi_sas/hisi_sas_main.c | 59 ++++++++++++++++++++++++++++++ drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 67 +--------------------------------- 3 files changed, 68 insertions(+), 65 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 68ba7bd229e8..a50c69989352 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -46,6 +46,12 @@ ((type == SAS_EDGE_EXPANDER_DEVICE) || \ (type == SAS_FANOUT_EXPANDER_DEVICE)) +#define HISI_SAS_SATA_PROTOCOL_NONDATA 0x1 +#define HISI_SAS_SATA_PROTOCOL_PIO 0x2 +#define HISI_SAS_SATA_PROTOCOL_DMA 0x4 +#define HISI_SAS_SATA_PROTOCOL_FPDMA 0x8 +#define HISI_SAS_SATA_PROTOCOL_ATAPI 0x10 + struct hisi_hba; enum { @@ -356,6 +362,7 @@ union hisi_sas_command_table { struct hisi_sas_command_table_stp stp; }; +extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction); extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); extern int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *ops); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 4e78cbcd0cf2..5b51d9af2013 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -23,6 +23,65 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, int abort_flag, int tag); static int hisi_sas_softreset_ata_disk(struct domain_device *device); +u8 hisi_sas_get_ata_protocol(u8 cmd, int direction) +{ + switch (cmd) { + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_FPDMA_READ: + case ATA_CMD_FPDMA_RECV: + case ATA_CMD_FPDMA_SEND: + case ATA_CMD_NCQ_NON_DATA: + return HISI_SAS_SATA_PROTOCOL_FPDMA; + + case ATA_CMD_DOWNLOAD_MICRO: + case ATA_CMD_ID_ATA: + case ATA_CMD_PMP_READ: + case ATA_CMD_READ_LOG_EXT: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_PMP_WRITE: + case ATA_CMD_WRITE_LOG_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + return HISI_SAS_SATA_PROTOCOL_PIO; + + case ATA_CMD_DSM: + case ATA_CMD_DOWNLOAD_MICRO_DMA: + case ATA_CMD_PMP_READ_DMA: + case ATA_CMD_PMP_WRITE_DMA: + case ATA_CMD_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ_LOG_DMA_EXT: + case ATA_CMD_READ_STREAM_DMA_EXT: + case ATA_CMD_TRUSTED_RCV_DMA: + case ATA_CMD_TRUSTED_SND_DMA: + case ATA_CMD_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE_FUA_EXT: + case ATA_CMD_WRITE_QUEUED: + case ATA_CMD_WRITE_LOG_DMA_EXT: + case ATA_CMD_WRITE_STREAM_DMA_EXT: + return HISI_SAS_SATA_PROTOCOL_DMA; + + case ATA_CMD_CHK_POWER: + case ATA_CMD_DEV_RESET: + case ATA_CMD_EDD: + case ATA_CMD_FLUSH: + case ATA_CMD_FLUSH_EXT: + case ATA_CMD_VERIFY: + case ATA_CMD_VERIFY_EXT: + case ATA_CMD_SET_FEATURES: + case ATA_CMD_STANDBY: + case ATA_CMD_STANDBYNOW1: + return HISI_SAS_SATA_PROTOCOL_NONDATA; + default: + if (direction == DMA_NONE) + return HISI_SAS_SATA_PROTOCOL_NONDATA; + return HISI_SAS_SATA_PROTOCOL_PIO; + } +} +EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); + static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) { return device->port->ha->lldd_ha; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 2607aac00ac9..d9314c4eff0f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -554,12 +554,6 @@ enum { #define DIR_TO_DEVICE 2 #define DIR_RESERVED 3 -#define SATA_PROTOCOL_NONDATA 0x1 -#define SATA_PROTOCOL_PIO 0x2 -#define SATA_PROTOCOL_DMA 0x4 -#define SATA_PROTOCOL_FPDMA 0x8 -#define SATA_PROTOCOL_ATAPI 0x10 - #define ERR_ON_TX_PHASE(err_phase) (err_phase == 0x2 || \ err_phase == 0x4 || err_phase == 0x8 ||\ err_phase == 0x6 || err_phase == 0xa) @@ -2352,64 +2346,6 @@ out: return sts; } -static u8 get_ata_protocol(u8 cmd, int direction) -{ - switch (cmd) { - case ATA_CMD_FPDMA_WRITE: - case ATA_CMD_FPDMA_READ: - case ATA_CMD_FPDMA_RECV: - case ATA_CMD_FPDMA_SEND: - case ATA_CMD_NCQ_NON_DATA: - return SATA_PROTOCOL_FPDMA; - - case ATA_CMD_DOWNLOAD_MICRO: - case ATA_CMD_ID_ATA: - case ATA_CMD_PMP_READ: - case ATA_CMD_READ_LOG_EXT: - case ATA_CMD_PIO_READ: - case ATA_CMD_PIO_READ_EXT: - case ATA_CMD_PMP_WRITE: - case ATA_CMD_WRITE_LOG_EXT: - case ATA_CMD_PIO_WRITE: - case ATA_CMD_PIO_WRITE_EXT: - return SATA_PROTOCOL_PIO; - - case ATA_CMD_DSM: - case ATA_CMD_DOWNLOAD_MICRO_DMA: - case ATA_CMD_PMP_READ_DMA: - case ATA_CMD_PMP_WRITE_DMA: - case ATA_CMD_READ: - case ATA_CMD_READ_EXT: - case ATA_CMD_READ_LOG_DMA_EXT: - case ATA_CMD_READ_STREAM_DMA_EXT: - case ATA_CMD_TRUSTED_RCV_DMA: - case ATA_CMD_TRUSTED_SND_DMA: - case ATA_CMD_WRITE: - case ATA_CMD_WRITE_EXT: - case ATA_CMD_WRITE_FUA_EXT: - case ATA_CMD_WRITE_QUEUED: - case ATA_CMD_WRITE_LOG_DMA_EXT: - case ATA_CMD_WRITE_STREAM_DMA_EXT: - return SATA_PROTOCOL_DMA; - - case ATA_CMD_CHK_POWER: - case ATA_CMD_DEV_RESET: - case ATA_CMD_EDD: - case ATA_CMD_FLUSH: - case ATA_CMD_FLUSH_EXT: - case ATA_CMD_VERIFY: - case ATA_CMD_VERIFY_EXT: - case ATA_CMD_SET_FEATURES: - case ATA_CMD_STANDBY: - case ATA_CMD_STANDBYNOW1: - return SATA_PROTOCOL_NONDATA; - default: - if (direction == DMA_NONE) - return SATA_PROTOCOL_NONDATA; - return SATA_PROTOCOL_PIO; - } -} - static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag) { struct ata_queued_cmd *qc = task->uldd_task; @@ -2464,7 +2400,8 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, (task->ata_task.fis.control & ATA_SRST)) dw1 |= 1 << CMD_HDR_RESET_OFF; - dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir)) + dw1 |= (hisi_sas_get_ata_protocol( + task->ata_task.fis.command, task->data_dir)) << CMD_HDR_FRAME_TYPE_OFF; dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; hdr->dw1 = cpu_to_le32(dw1); -- cgit From 759040770dbc7c8c53aa23552d2d955e80c91ce6 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:15 +0800 Subject: scsi: hisi_sas: relocate sata_done_v2_hw() Relocate get_ata_protocol() to a common location, as future hw versions will require it. Also rename with "hisi_sas_" prefix for consistency. Signed-off-by: Xiang Chen Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas.h | 2 ++ drivers/scsi/hisi_sas/hisi_sas_main.c | 15 +++++++++++++++ drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 18 ++---------------- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index a50c69989352..1dcdf66906ac 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -364,6 +364,8 @@ union hisi_sas_command_table { extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction); extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); +extern void hisi_sas_sata_done(struct sas_task *task, + struct hisi_sas_slot *slot); extern int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *ops); extern int hisi_sas_remove(struct platform_device *pdev); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 5b51d9af2013..ab133d4dd827 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -82,6 +82,21 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction) } EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); +void hisi_sas_sata_done(struct sas_task *task, + struct hisi_sas_slot *slot) +{ + struct task_status_struct *ts = &task->task_status; + struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; + struct dev_to_host_fis *d2h = slot->status_buffer + + sizeof(struct hisi_sas_err_record); + + resp->frame_len = sizeof(struct dev_to_host_fis); + memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); + + ts->buf_valid_size = sizeof(*resp); +} +EXPORT_SYMBOL_GPL(hisi_sas_sata_done); + static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) { return device->port->ha->lldd_ha; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index d9314c4eff0f..fdd7019ef299 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -1683,20 +1683,6 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, return 0; } -static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task, - struct hisi_sas_slot *slot) -{ - struct task_status_struct *ts = &task->task_status; - struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; - struct dev_to_host_fis *d2h = slot->status_buffer + - sizeof(struct hisi_sas_err_record); - - resp->frame_len = sizeof(struct dev_to_host_fis); - memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); - - ts->buf_valid_size = sizeof(*resp); -} - #define TRANS_TX_ERR 0 #define TRANS_RX_ERR 1 #define DMA_TX_ERR 2 @@ -2189,7 +2175,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, break; } } - sata_done_v2_hw(hisi_hba, task, slot); + hisi_sas_sata_done(task, slot); } break; default: @@ -2317,7 +2303,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { ts->stat = SAM_STAT_GOOD; - sata_done_v2_hw(hisi_hba, task, slot); + hisi_sas_sata_done(task, slot); break; } default: -- cgit From 318913c63c5d435cba30c7f744bd0f24d7295516 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:16 +0800 Subject: scsi: hisi_sas: relocate get_ncq_tag_v2_hw() Relocate get_ncq_tag_v2_hw() to a common location, as future hw versions will require it. Also rename with "hisi_sas_" prefix for consistency. Signed-off-by: Xiang Chen Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas.h | 1 + drivers/scsi/hisi_sas/hisi_sas_main.c | 15 +++++++++++++++ drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 16 +--------------- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 1dcdf66906ac..19c6ffd6d4ff 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -366,6 +366,7 @@ extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction); extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); extern void hisi_sas_sata_done(struct sas_task *task, struct hisi_sas_slot *slot); +extern int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag); extern int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *ops); extern int hisi_sas_remove(struct platform_device *pdev); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index ab133d4dd827..f53a93b1f955 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -97,6 +97,21 @@ void hisi_sas_sata_done(struct sas_task *task, } EXPORT_SYMBOL_GPL(hisi_sas_sata_done); +int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag) +{ + struct ata_queued_cmd *qc = task->uldd_task; + + if (qc) { + if (qc->tf.command == ATA_CMD_FPDMA_WRITE || + qc->tf.command == ATA_CMD_FPDMA_READ) { + *tag = qc->tag; + return 1; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag); + static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) { return device->port->ha->lldd_ha; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index fdd7019ef299..9cc54357eeb0 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -2332,20 +2332,6 @@ out: return sts; } -static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag) -{ - struct ata_queued_cmd *qc = task->uldd_task; - - if (qc) { - if (qc->tf.command == ATA_CMD_FPDMA_WRITE || - qc->tf.command == ATA_CMD_FPDMA_READ) { - *tag = qc->tag; - return 1; - } - } - return 0; -} - static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { @@ -2393,7 +2379,7 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, hdr->dw1 = cpu_to_le32(dw1); /* dw2 */ - if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) { + if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) { task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; } -- cgit From 11b752490a051b79de183fee73706e13d80d3998 Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 14 Jun 2017 23:33:17 +0800 Subject: scsi: hisi_sas: add pci_dev in hisi_hba struct Since hip08 SAS controller is based on pci device, add hisi_hba.pci_dev for hip08 (will be v3), and also rename hisi_hba.pdev to .platform_dev for clarity. In addition, for common code which wants to reference the controller device struct, add hisi_hba.dev, and change the common code to use it. Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas.h | 6 ++++- drivers/scsi/hisi_sas/hisi_sas_main.c | 36 ++++++++++++++-------------- drivers/scsi/hisi_sas/hisi_sas_v1_hw.c | 28 +++++++++++----------- drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 44 +++++++++++++++++----------------- 4 files changed, 59 insertions(+), 55 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 19c6ffd6d4ff..84cac98b368a 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -196,7 +197,10 @@ struct hisi_hba { /* This must be the first element, used by SHOST_TO_SAS_HA */ struct sas_ha_struct *p; - struct platform_device *pdev; + struct platform_device *platform_dev; + struct pci_dev *pci_dev; + struct device *dev; + void __iomem *regs; struct regmap *ctrl; u32 ctrl_reset_reg; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index f53a93b1f955..139df4509a41 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -168,7 +168,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, { if (task) { - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; @@ -245,7 +245,7 @@ static void hisi_sas_slot_abort(struct work_struct *work) struct scsi_cmnd *cmnd = task->uldd_task; struct hisi_sas_tmf_task tmf_task; struct scsi_lun lun; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int tag = abort_slot->idx; unsigned long flags; @@ -279,7 +279,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq struct hisi_sas_slot *slot; struct hisi_sas_cmd_hdr *cmd_hdr_base; struct asd_sas_port *sas_port = device->port; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; unsigned long flags; @@ -451,7 +451,7 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, u32 pass = 0; unsigned long flags; struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_dq *dq = sas_dev->dq; @@ -546,7 +546,7 @@ static int hisi_sas_dev_found(struct domain_device *device) struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct domain_device *parent_dev = device->parent; struct hisi_sas_device *sas_dev; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; if (hisi_hba->hw->alloc_dev) sas_dev = hisi_hba->hw->alloc_dev(device); @@ -731,7 +731,7 @@ static void hisi_sas_dev_gone(struct domain_device *device) { struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int dev_id = sas_dev->device_id; dev_info(dev, "found dev[%d:%x] is gone\n", @@ -814,7 +814,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, { struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = sas_dev->hisi_hba; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct sas_task *task; int res, retry; @@ -931,7 +931,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) struct ata_link *link; int rc = TMF_RESP_FUNC_FAILED; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int s = sizeof(struct host_to_dev_fis); unsigned long flags; @@ -989,7 +989,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) return -1; if (!test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct sas_ha_struct *sas_ha = &hisi_hba->sha; unsigned long flags; @@ -1022,7 +1022,7 @@ static int hisi_sas_abort_task(struct sas_task *task) struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int rc = TMF_RESP_FUNC_FAILED; unsigned long flags; @@ -1151,7 +1151,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) { struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; unsigned long flags; int rc = TMF_RESP_FUNC_FAILED; @@ -1240,7 +1240,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, { struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct hisi_sas_port *port; struct hisi_sas_slot *slot; struct asd_sas_port *sas_port = device->port; @@ -1337,7 +1337,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, { struct sas_task *task; struct hisi_sas_device *sas_dev = device->lldd_dev; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int res; if (!hisi_hba->hw->prep_abort) @@ -1547,8 +1547,7 @@ EXPORT_SYMBOL_GPL(hisi_sas_init_mem); static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) { - struct platform_device *pdev = hisi_hba->pdev; - struct device *dev = &pdev->dev; + struct device *dev = hisi_hba->dev; int i, s, max_command_entries = hisi_hba->hw->max_command_entries; spin_lock_init(&hisi_hba->lock); @@ -1668,7 +1667,7 @@ err_out: static void hisi_sas_free(struct hisi_hba *hisi_hba) { - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int i, s, max_command_entries = hisi_hba->hw->max_command_entries; for (i = 0; i < hisi_hba->queue_count; i++) { @@ -1749,7 +1748,8 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); hisi_hba->hw = hw; - hisi_hba->pdev = pdev; + hisi_hba->platform_dev = pdev; + hisi_hba->dev = dev; hisi_hba->shost = shost; SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; @@ -1866,7 +1866,7 @@ int hisi_sas_probe(struct platform_device *pdev, shost->cmd_per_lun = hisi_hba->hw->max_command_entries; sha->sas_ha_name = DRV_NAME; - sha->dev = &hisi_hba->pdev->dev; + sha->dev = hisi_hba->dev; sha->lldd_module = THIS_MODULE; sha->sas_addr = &hisi_hba->sas_addr[0]; sha->num_phys = hisi_hba->n_phy; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 7d7d2a7e690d..afa87d4c367a 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -505,7 +505,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { struct domain_device *device = sas_dev->sas_device; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; u64 qw0, device_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; struct asd_sas_port *sas_port = device->port; @@ -571,7 +571,7 @@ static int reset_hw_v1_hw(struct hisi_hba *hisi_hba) int i; unsigned long end_time; u32 val; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; for (i = 0; i < hisi_hba->n_phy; i++) { u32 phy_ctrl = hisi_sas_phy_read32(hisi_hba, i, PHY_CTRL); @@ -756,7 +756,7 @@ static void init_reg_v1_hw(struct hisi_hba *hisi_hba) static int hw_init_v1_hw(struct hisi_hba *hisi_hba) { - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int rc; rc = reset_hw_v1_hw(hisi_hba); @@ -907,7 +907,7 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id) static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) { - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int queue = dq->id; u32 r, w; @@ -939,7 +939,7 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, struct scatterlist *scatter, int n_elem) { - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct scatterlist *sg; int i; @@ -976,7 +976,7 @@ static int prep_smp_v1_hw(struct hisi_hba *hisi_hba, struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct domain_device *device = task->dev; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct hisi_sas_port *port = slot->port; struct scatterlist *sg_req, *sg_resp; struct hisi_sas_device *sas_dev = device->lldd_dev; @@ -1148,7 +1148,7 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba, { struct task_status_struct *ts = &task->task_status; struct hisi_sas_err_record_v1 *err_record = slot->status_buffer; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; switch (task->task_proto) { case SAS_PROTOCOL_SSP: @@ -1274,7 +1274,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, { struct sas_task *task = slot->task; struct hisi_sas_device *sas_dev; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct task_status_struct *ts; struct domain_device *device; enum exec_status sts; @@ -1423,7 +1423,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p) { struct hisi_sas_phy *phy = p; struct hisi_hba *hisi_hba = phy->hisi_hba; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct asd_sas_phy *sas_phy = &phy->sas_phy; int i, phy_no = sas_phy->id; u32 irq_value, context, port_id, link_rate; @@ -1504,7 +1504,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p) struct hisi_hba *hisi_hba = phy->hisi_hba; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct sas_ha_struct *sha = &hisi_hba->sha; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int phy_no = sas_phy->id; u32 irq_value; irqreturn_t res = IRQ_HANDLED; @@ -1531,7 +1531,7 @@ static irqreturn_t int_abnormal_v1_hw(int irq, void *p) { struct hisi_sas_phy *phy = p; struct hisi_hba *hisi_hba = phy->hisi_hba; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct asd_sas_phy *sas_phy = &phy->sas_phy; u32 irq_value, irq_mask_old; int phy_no = sas_phy->id; @@ -1634,7 +1634,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p) static irqreturn_t fatal_ecc_int_v1_hw(int irq, void *p) { struct hisi_hba *hisi_hba = p; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; u32 ecc_int = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); if (ecc_int & SAS_ECC_INTR_DQ_ECC1B_MSK) { @@ -1693,7 +1693,7 @@ static irqreturn_t fatal_ecc_int_v1_hw(int irq, void *p) static irqreturn_t fatal_axi_int_v1_hw(int irq, void *p) { struct hisi_hba *hisi_hba = p; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; u32 axi_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC2); u32 axi_info = hisi_sas_read32(hisi_hba, HGC_AXI_FIFO_ERR_INFO); @@ -1731,7 +1731,7 @@ static irq_handler_t fatal_interrupts[HISI_SAS_MAX_QUEUES] = { static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba) { - struct platform_device *pdev = hisi_hba->pdev; + struct platform_device *pdev = hisi_hba->platform_dev; struct device *dev = &pdev->dev; int i, j, irq, rc, idx; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 9cc54357eeb0..341a0bf6c667 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -653,7 +653,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx) { unsigned int index; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; void *bitmap = hisi_hba->sata_dev_bitmap; index = find_first_zero_bit(bitmap, HISI_MAX_SATA_SUPPORT_V2_HW); @@ -754,7 +754,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { struct domain_device *device = sas_dev->sas_device; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; u64 qw0, device_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; struct domain_device *parent_dev = device->parent; @@ -807,7 +807,7 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { u64 dev_id = sas_dev->device_id; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); int i; @@ -851,7 +851,7 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) int i, reset_val; u32 val; unsigned long end_time; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; /* The mask needs to be set depending on the number of phys */ if (hisi_hba->n_phy == 9) @@ -987,7 +987,7 @@ static void phys_try_accept_stp_links_v2_hw(struct hisi_hba *hisi_hba) static void init_reg_v2_hw(struct hisi_hba *hisi_hba) { - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int i; /* Global registers init */ @@ -1168,7 +1168,7 @@ static void set_link_timer_quirk(struct hisi_hba *hisi_hba) static int hw_init_v2_hw(struct hisi_hba *hisi_hba) { - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int rc; rc = reset_hw_v2_hw(hisi_hba); @@ -1217,7 +1217,7 @@ static bool tx_fifo_is_empty_v2_hw(struct hisi_hba *hisi_hba, int phy_no) static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { int i, max_loop = 1000; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; u32 status, axi_status, dfx_val, dfx_tx_val; for (i = 0; i < max_loop; i++) { @@ -1243,7 +1243,7 @@ static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no) static bool wait_io_done_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { int i, max_loop = 1000; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; u32 status, tx_dfx0; for (i = 0; i < max_loop; i++) { @@ -1281,7 +1281,7 @@ static bool allowed_disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg, axi_val, dfx0_val, txid_auto; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; /* Close axi bus. */ axi_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + @@ -1459,7 +1459,7 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) { - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; int queue = dq->id; u32 r, w; @@ -1492,7 +1492,7 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, struct scatterlist *scatter, int n_elem) { - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct scatterlist *sg; int i; @@ -1529,7 +1529,7 @@ static int prep_smp_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct domain_device *device = task->dev; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct hisi_sas_port *port = slot->port; struct scatterlist *sg_req, *sg_resp; struct hisi_sas_device *sas_dev = device->lldd_dev; @@ -2188,7 +2188,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_device *sas_dev; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct task_status_struct *ts; struct domain_device *device; enum exec_status sts; @@ -2486,7 +2486,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) u32 port_id, link_rate, hard_phy_linkrate; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; @@ -2673,7 +2673,7 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; u32 ent_msk, ent_tmp, irq_msk; int phy_no = 0; @@ -2733,7 +2733,7 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) static void one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value) { - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; u32 reg_val; if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) { @@ -2822,7 +2822,7 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value) { u32 reg_val; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) { reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR); @@ -2972,7 +2972,7 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; u32 irq_value, irq_msk, err_value; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe); @@ -3148,7 +3148,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) struct hisi_sas_phy *phy = p; struct hisi_hba *hisi_hba = phy->hisi_hba; struct asd_sas_phy *sas_phy = &phy->sas_phy; - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; struct hisi_sas_initial_fis *initial_fis; struct dev_to_host_fis *fis; u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; @@ -3250,7 +3250,7 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = { */ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) { - struct platform_device *pdev = hisi_hba->pdev; + struct platform_device *pdev = hisi_hba->platform_dev; struct device *dev = &pdev->dev; int i, irq, rc, irq_map[128]; @@ -3364,7 +3364,7 @@ static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba) { - struct platform_device *pdev = hisi_hba->pdev; + struct platform_device *pdev = hisi_hba->platform_dev; int i; for (i = 0; i < hisi_hba->queue_count; i++) @@ -3386,7 +3386,7 @@ static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba) static int soft_reset_v2_hw(struct hisi_hba *hisi_hba) { - struct device *dev = &hisi_hba->pdev->dev; + struct device *dev = hisi_hba->dev; u32 old_state, state; int rc, cnt; int phy_no; -- cgit From 0fa24c19d844945b6edf981ff425c93f20085f10 Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 14 Jun 2017 23:33:18 +0800 Subject: scsi: hisi_sas: create hisi_sas_get_fw_info() Move the functionality to retrieve the fw info into a dedicated device type-agnostic function, hisi_sas_get_fw_info(). The reasoning is that this function will be required for future pci-based platforms. Also add some debug logs for failure. Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas.h | 1 + drivers/scsi/hisi_sas/hisi_sas_main.c | 107 ++++++++++++++++++++++------------ 2 files changed, 71 insertions(+), 37 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 84cac98b368a..c1f6669ab3f8 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -371,6 +371,7 @@ extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); extern void hisi_sas_sata_done(struct sas_task *task, struct hisi_sas_slot *slot); extern int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag); +extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba); extern int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *ops); extern int hisi_sas_remove(struct platform_device *pdev); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 139df4509a41..8a2af906e1ad 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1729,66 +1729,99 @@ static void hisi_sas_rst_work_handler(struct work_struct *work) hisi_sas_controller_reset(hisi_hba); } -static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, - const struct hisi_sas_hw *hw) +int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) { - struct resource *res; - struct Scsi_Host *shost; - struct hisi_hba *hisi_hba; - struct device *dev = &pdev->dev; - struct device_node *np = pdev->dev.of_node; + struct device *dev = hisi_hba->dev; + struct platform_device *pdev = hisi_hba->platform_dev; + struct device_node *np = pdev ? pdev->dev.of_node : NULL; struct clk *refclk; - shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba)); - if (!shost) { - dev_err(dev, "scsi host alloc failed\n"); - return NULL; - } - hisi_hba = shost_priv(shost); - - INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); - hisi_hba->hw = hw; - hisi_hba->platform_dev = pdev; - hisi_hba->dev = dev; - hisi_hba->shost = shost; - SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; - - init_timer(&hisi_hba->timer); - if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, - SAS_ADDR_SIZE)) - goto err_out; + SAS_ADDR_SIZE)) { + dev_err(dev, "could not get property sas-addr\n"); + return -ENOENT; + } if (np) { + /* + * These properties are only required for platform device-based + * controller with DT firmware. + */ hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, "hisilicon,sas-syscon"); - if (IS_ERR(hisi_hba->ctrl)) - goto err_out; + if (IS_ERR(hisi_hba->ctrl)) { + dev_err(dev, "could not get syscon\n"); + return -ENOENT; + } if (device_property_read_u32(dev, "ctrl-reset-reg", - &hisi_hba->ctrl_reset_reg)) - goto err_out; + &hisi_hba->ctrl_reset_reg)) { + dev_err(dev, + "could not get property ctrl-reset-reg\n"); + return -ENOENT; + } if (device_property_read_u32(dev, "ctrl-reset-sts-reg", - &hisi_hba->ctrl_reset_sts_reg)) - goto err_out; + &hisi_hba->ctrl_reset_sts_reg)) { + dev_err(dev, + "could not get property ctrl-reset-sts-reg\n"); + return -ENOENT; + } if (device_property_read_u32(dev, "ctrl-clock-ena-reg", - &hisi_hba->ctrl_clock_ena_reg)) - goto err_out; + &hisi_hba->ctrl_clock_ena_reg)) { + dev_err(dev, + "could not get property ctrl-clock-ena-reg\n"); + return -ENOENT; + } } - refclk = devm_clk_get(&pdev->dev, NULL); + refclk = devm_clk_get(dev, NULL); if (IS_ERR(refclk)) dev_dbg(dev, "no ref clk property\n"); else hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; - if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) - goto err_out; + if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { + dev_err(dev, "could not get property phy-count\n"); + return -ENOENT; + } if (device_property_read_u32(dev, "queue-count", - &hisi_hba->queue_count)) + &hisi_hba->queue_count)) { + dev_err(dev, "could not get property queue-count\n"); + return -ENOENT; + } + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); + +static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, + const struct hisi_sas_hw *hw) +{ + struct resource *res; + struct Scsi_Host *shost; + struct hisi_hba *hisi_hba; + struct device *dev = &pdev->dev; + + shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba)); + if (!shost) { + dev_err(dev, "scsi host alloc failed\n"); + return NULL; + } + hisi_hba = shost_priv(shost); + + INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); + hisi_hba->hw = hw; + hisi_hba->dev = dev; + hisi_hba->platform_dev = pdev; + hisi_hba->shost = shost; + SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; + + init_timer(&hisi_hba->timer); + + if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) && -- cgit From 92f61e3bc24a0d42c9ede86ff6f211ef07022dd7 Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 14 Jun 2017 23:33:19 +0800 Subject: scsi: hisi_sas: add skeleton v3 hw driver Add skeleton driver for v3 hw in hisi_sas_v3_hw.c File hisi_sas_v3_hw.c will serve 2 purposes: - probing and initialisation of the controller based on pci device - hw layer for v3-based controllers The controller design is quite similar to v2 hw in hip07. However key differences include: -All v2 hw bugs are fixed (hopefully), so workarounds are not required -support for device deregistration -some interrupt modifications -configurable max device support Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/Kconfig | 10 +++++++- drivers/scsi/hisi_sas/Makefile | 1 + drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 47 ++++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 1 deletion(-) create mode 100644 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig index 374a329b91fc..d42f29a5eb65 100644 --- a/drivers/scsi/hisi_sas/Kconfig +++ b/drivers/scsi/hisi_sas/Kconfig @@ -6,4 +6,12 @@ config SCSI_HISI_SAS select BLK_DEV_INTEGRITY depends on ATA help - This driver supports HiSilicon's SAS HBA + This driver supports HiSilicon's SAS HBA, including support based + on platform device + +config SCSI_HISI_SAS_PCI + tristate "HiSilicon SAS on PCI bus" + depends on SCSI_HISI_SAS + depends on PCI + help + This driver supports HiSilicon's SAS HBA based on PCI device diff --git a/drivers/scsi/hisi_sas/Makefile b/drivers/scsi/hisi_sas/Makefile index c6d3a1b5fcb9..24623f228510 100644 --- a/drivers/scsi/hisi_sas/Makefile +++ b/drivers/scsi/hisi_sas/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_main.o obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_v1_hw.o hisi_sas_v2_hw.o +obj-$(CONFIG_SCSI_HISI_SAS_PCI) += hisi_sas_v3_hw.o diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c new file mode 100644 index 000000000000..cf72577c3e95 --- /dev/null +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include "hisi_sas.h" +#define DRV_NAME "hisi_sas_v3_hw" + +static int +hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return 0; +} + +static void hisi_sas_v3_remove(struct pci_dev *pdev) +{ +} + +enum { + /* instances of the controller */ + hip08, +}; + +static const struct pci_device_id sas_v3_pci_table[] = { + { PCI_VDEVICE(HUAWEI, 0xa230), hip08 }, + {} +}; + +static struct pci_driver sas_v3_pci_driver = { + .name = DRV_NAME, + .id_table = sas_v3_pci_table, + .probe = hisi_sas_v3_probe, + .remove = hisi_sas_v3_remove, +}; + +module_pci_driver(sas_v3_pci_driver); + +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Garry "); +MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device"); +MODULE_ALIAS("platform:" DRV_NAME); -- cgit From e21fe3a52692f554efd67957c772c702de627a3a Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 14 Jun 2017 23:33:20 +0800 Subject: scsi: hisi_sas: add initialisation for v3 pci-based controller Add the code to initialise the controller which is based on pci device in hisi_sas_v3_hw.c The core controller routines are still in hisi_sas_main.c; some common initialisation functions are also exported from hisi_sas_main.c For pci-based controller, the device properties, like phy count and sas address are read from the firmware, same as platform device-based controller. Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas.h | 6 ++ drivers/scsi/hisi_sas/hisi_sas_main.c | 18 ++-- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 154 +++++++++++++++++++++++++++++++++ 3 files changed, 172 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index c1f6669ab3f8..e89f6aec844f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -366,6 +366,12 @@ union hisi_sas_command_table { struct hisi_sas_command_table_stp stp; }; +extern struct scsi_transport_template *hisi_sas_stt; +extern struct scsi_host_template *hisi_sas_sht; + +extern void hisi_sas_init_add(struct hisi_hba *hisi_hba); +extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost); +extern void hisi_sas_free(struct hisi_hba *hisi_hba); extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction); extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); extern void hisi_sas_sata_done(struct sas_task *task, diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 8a2af906e1ad..124a3ff569fd 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1476,9 +1476,10 @@ void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, } EXPORT_SYMBOL_GPL(hisi_sas_rescan_topology); -static struct scsi_transport_template *hisi_sas_stt; +struct scsi_transport_template *hisi_sas_stt; +EXPORT_SYMBOL_GPL(hisi_sas_stt); -static struct scsi_host_template hisi_sas_sht = { +static struct scsi_host_template _hisi_sas_sht = { .module = THIS_MODULE, .name = DRV_NAME, .queuecommand = sas_queuecommand, @@ -1498,6 +1499,8 @@ static struct scsi_host_template hisi_sas_sht = { .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, }; +struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht; +EXPORT_SYMBOL_GPL(hisi_sas_sht); static struct sas_domain_function_template hisi_sas_transport_ops = { .lldd_dev_found = hisi_sas_dev_found, @@ -1545,7 +1548,7 @@ void hisi_sas_init_mem(struct hisi_hba *hisi_hba) } EXPORT_SYMBOL_GPL(hisi_sas_init_mem); -static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) +int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) { struct device *dev = hisi_hba->dev; int i, s, max_command_entries = hisi_hba->hw->max_command_entries; @@ -1664,8 +1667,9 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) err_out: return -ENOMEM; } +EXPORT_SYMBOL_GPL(hisi_sas_alloc); -static void hisi_sas_free(struct hisi_hba *hisi_hba) +void hisi_sas_free(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; int i, s, max_command_entries = hisi_hba->hw->max_command_entries; @@ -1720,6 +1724,7 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba) if (hisi_hba->wq) destroy_workqueue(hisi_hba->wq); } +EXPORT_SYMBOL_GPL(hisi_sas_free); static void hisi_sas_rst_work_handler(struct work_struct *work) { @@ -1805,7 +1810,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, struct hisi_hba *hisi_hba; struct device *dev = &pdev->dev; - shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba)); + shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba)); if (!shost) { dev_err(dev, "scsi host alloc failed\n"); return NULL; @@ -1847,7 +1852,7 @@ err_out: return NULL; } -static void hisi_sas_init_add(struct hisi_hba *hisi_hba) +void hisi_sas_init_add(struct hisi_hba *hisi_hba) { int i; @@ -1856,6 +1861,7 @@ static void hisi_sas_init_add(struct hisi_hba *hisi_hba) hisi_hba->sas_addr, SAS_ADDR_SIZE); } +EXPORT_SYMBOL_GPL(hisi_sas_init_add); int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *hw) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index cf72577c3e95..e9a9fb0db370 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -11,14 +11,168 @@ #include "hisi_sas.h" #define DRV_NAME "hisi_sas_v3_hw" +static const struct hisi_sas_hw hisi_sas_v3_hw = { +}; + +static struct Scsi_Host * +hisi_sas_shost_alloc_pci(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct hisi_hba *hisi_hba; + struct device *dev = &pdev->dev; + + shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba)); + if (!shost) + goto err_out; + hisi_hba = shost_priv(shost); + + hisi_hba->hw = &hisi_sas_v3_hw; + hisi_hba->pci_dev = pdev; + hisi_hba->dev = dev; + hisi_hba->shost = shost; + SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; + + init_timer(&hisi_hba->timer); + + if (hisi_sas_get_fw_info(hisi_hba) < 0) + goto err_out; + + if (hisi_sas_alloc(hisi_hba, shost)) { + hisi_sas_free(hisi_hba); + goto err_out; + } + + return shost; +err_out: + dev_err(dev, "shost alloc failed\n"); + return NULL; +} + static int hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct Scsi_Host *shost; + struct hisi_hba *hisi_hba; + struct device *dev = &pdev->dev; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + struct sas_ha_struct *sha; + int rc, phy_nr, port_nr, i; + + rc = pci_enable_device(pdev); + if (rc) + goto err_out; + + pci_set_master(pdev); + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_disable_device; + + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { + dev_err(dev, "No usable DMA addressing method\n"); + rc = -EIO; + goto err_out_regions; + } + } + + shost = hisi_sas_shost_alloc_pci(pdev); + if (!shost) { + rc = -ENOMEM; + goto err_out_regions; + } + + sha = SHOST_TO_SAS_HA(shost); + hisi_hba = shost_priv(shost); + dev_set_drvdata(dev, sha); + + hisi_hba->regs = pcim_iomap(pdev, 5, 0); + if (!hisi_hba->regs) { + dev_err(dev, "cannot map register.\n"); + rc = -ENOMEM; + goto err_out_ha; + } + + phy_nr = port_nr = hisi_hba->n_phy; + + arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); + arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); + if (!arr_phy || !arr_port) { + rc = -ENOMEM; + goto err_out_ha; + } + + sha->sas_phy = arr_phy; + sha->sas_port = arr_port; + sha->core.shost = shost; + sha->lldd_ha = hisi_hba; + + shost->transportt = hisi_sas_stt; + shost->max_id = HISI_SAS_MAX_DEVICES; + shost->max_lun = ~0; + shost->max_channel = 1; + shost->max_cmd_len = 16; + shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); + shost->can_queue = hisi_hba->hw->max_command_entries; + shost->cmd_per_lun = hisi_hba->hw->max_command_entries; + + sha->sas_ha_name = DRV_NAME; + sha->dev = dev; + sha->lldd_module = THIS_MODULE; + sha->sas_addr = &hisi_hba->sas_addr[0]; + sha->num_phys = hisi_hba->n_phy; + sha->core.shost = hisi_hba->shost; + + for (i = 0; i < hisi_hba->n_phy; i++) { + sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; + sha->sas_port[i] = &hisi_hba->port[i].sas_port; + } + + hisi_sas_init_add(hisi_hba); + + rc = scsi_add_host(shost, dev); + if (rc) + goto err_out_ha; + + rc = sas_register_ha(sha); + if (rc) + goto err_out_register_ha; + + rc = hisi_hba->hw->hw_init(hisi_hba); + if (rc) + goto err_out_register_ha; + + scsi_scan_host(shost); + return 0; + +err_out_register_ha: + scsi_remove_host(shost); +err_out_ha: + kfree(shost); +err_out_regions: + pci_release_regions(pdev); +err_out_disable_device: + pci_disable_device(pdev); +err_out: + return rc; } static void hisi_sas_v3_remove(struct pci_dev *pdev) { + struct device *dev = &pdev->dev; + struct sas_ha_struct *sha = dev_get_drvdata(dev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + + sas_unregister_ha(sha); + sas_remove_host(sha->core.shost); + + hisi_sas_free(hisi_hba); + pci_release_regions(pdev); + pci_disable_device(pdev); } enum { -- cgit From c94d8ca2b1a810649a20deae54751d94db716042 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:21 +0800 Subject: scsi: hisi_sas: add v3 hw init Add code to initialise v3 hardware. Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 277 +++++++++++++++++++++++++++++++++ 1 file changed, 277 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index e9a9fb0db370..1a5eae6bb102 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -11,7 +11,283 @@ #include "hisi_sas.h" #define DRV_NAME "hisi_sas_v3_hw" +/* global registers need init*/ +#define DLVRY_QUEUE_ENABLE 0x0 +#define IOST_BASE_ADDR_LO 0x8 +#define IOST_BASE_ADDR_HI 0xc +#define ITCT_BASE_ADDR_LO 0x10 +#define ITCT_BASE_ADDR_HI 0x14 +#define IO_BROKEN_MSG_ADDR_LO 0x18 +#define IO_BROKEN_MSG_ADDR_HI 0x1c +#define AXI_AHB_CLK_CFG 0x3c +#define AXI_USER1 0x48 +#define AXI_USER2 0x4c +#define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 +#define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c +#define SATA_INITI_D2H_STORE_ADDR_LO 0x60 +#define SATA_INITI_D2H_STORE_ADDR_HI 0x64 +#define CFG_MAX_TAG 0x68 +#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 +#define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 +#define HGC_GET_ITV_TIME 0x90 +#define DEVICE_MSG_WORK_MODE 0x94 +#define OPENA_WT_CONTI_TIME 0x9c +#define I_T_NEXUS_LOSS_TIME 0xa0 +#define MAX_CON_TIME_LIMIT_TIME 0xa4 +#define BUS_INACTIVE_LIMIT_TIME 0xa8 +#define REJECT_TO_OPEN_LIMIT_TIME 0xac +#define CFG_AGING_TIME 0xbc +#define HGC_DFX_CFG2 0xc0 +#define CFG_ABT_SET_QUERY_IPTT 0xd4 +#define CFG_SET_ABORTED_IPTT_OFF 0 +#define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) +#define CFG_1US_TIMER_TRSH 0xcc +#define INT_COAL_EN 0x19c +#define OQ_INT_COAL_TIME 0x1a0 +#define OQ_INT_COAL_CNT 0x1a4 +#define ENT_INT_COAL_TIME 0x1a8 +#define ENT_INT_COAL_CNT 0x1ac +#define OQ_INT_SRC 0x1b0 +#define OQ_INT_SRC_MSK 0x1b4 +#define ENT_INT_SRC1 0x1b8 +#define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 +#define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) +#define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 +#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) +#define ENT_INT_SRC2 0x1bc +#define ENT_INT_SRC3 0x1c0 +#define ENT_INT_SRC3_WP_DEPTH_OFF 8 +#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 +#define ENT_INT_SRC3_RP_DEPTH_OFF 10 +#define ENT_INT_SRC3_AXI_OFF 11 +#define ENT_INT_SRC3_FIFO_OFF 12 +#define ENT_INT_SRC3_LM_OFF 14 +#define ENT_INT_SRC3_ITC_INT_OFF 15 +#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) +#define ENT_INT_SRC3_ABT_OFF 16 +#define ENT_INT_SRC_MSK1 0x1c4 +#define ENT_INT_SRC_MSK2 0x1c8 +#define ENT_INT_SRC_MSK3 0x1cc +#define CHNL_PHYUPDOWN_INT_MSK 0x1d0 +#define CHNL_ENT_INT_MSK 0x1d4 +#define HGC_COM_INT_MSK 0x1d8 +#define SAS_ECC_INTR 0x1e8 +#define SAS_ECC_INTR_MSK 0x1ec +#define HGC_ERR_STAT_EN 0x238 +#define DLVRY_Q_0_BASE_ADDR_LO 0x260 +#define DLVRY_Q_0_BASE_ADDR_HI 0x264 +#define DLVRY_Q_0_DEPTH 0x268 +#define DLVRY_Q_0_WR_PTR 0x26c +#define DLVRY_Q_0_RD_PTR 0x270 +#define HYPER_STREAM_ID_EN_CFG 0xc80 +#define OQ0_INT_SRC_MSK 0xc90 +#define COMPL_Q_0_BASE_ADDR_LO 0x4e0 +#define COMPL_Q_0_BASE_ADDR_HI 0x4e4 +#define COMPL_Q_0_DEPTH 0x4e8 +#define COMPL_Q_0_WR_PTR 0x4ec +#define COMPL_Q_0_RD_PTR 0x4f0 +#define AWQOS_AWCACHE_CFG 0xc84 +#define ARQOS_ARCACHE_CFG 0xc88 + +/* phy registers requiring init */ +#define PORT_BASE (0x2000) +#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) +#define PHY_CTRL (PORT_BASE + 0x14) +#define PHY_CTRL_RESET_OFF 0 +#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) +#define SL_CFG (PORT_BASE + 0x84) +#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) +#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) +#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) +#define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c) +#define CHL_INT0 (PORT_BASE + 0x1b4) +#define CHL_INT0_HOTPLUG_TOUT_OFF 0 +#define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) +#define CHL_INT0_SL_RX_BCST_ACK_OFF 1 +#define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) +#define CHL_INT0_SL_PHY_ENABLE_OFF 2 +#define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) +#define CHL_INT0_NOT_RDY_OFF 4 +#define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) +#define CHL_INT0_PHY_RDY_OFF 5 +#define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) +#define CHL_INT1 (PORT_BASE + 0x1b8) +#define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15 +#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) +#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 +#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) +#define CHL_INT2 (PORT_BASE + 0x1bc) +#define CHL_INT0_MSK (PORT_BASE + 0x1c0) +#define CHL_INT1_MSK (PORT_BASE + 0x1c4) +#define CHL_INT2_MSK (PORT_BASE + 0x1c8) +#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) +#define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) +#define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) +#define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) +#define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) +#define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) +#define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) + +struct hisi_sas_complete_v3_hdr { + __le32 dw0; + __le32 dw1; + __le32 act; + __le32 dw3; +}; + +#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 + +static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) +{ + void __iomem *regs = hisi_hba->regs + off; + + writel(val, regs); +} + +static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, + u32 off, u32 val) +{ + void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; + + writel(val, regs); +} + +static void init_reg_v3_hw(struct hisi_hba *hisi_hba) +{ + int i; + + /* Global registers init */ + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, + (u32)((1ULL << hisi_hba->queue_count) - 1)); + hisi_sas_write32(hisi_hba, AXI_USER1, 0x0); + hisi_sas_write32(hisi_hba, AXI_USER2, 0x40000060); + hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); + hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); + hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); + hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); + hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30); + hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0); + hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); + for (i = 0; i < hisi_hba->queue_count; i++) + hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); + + hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1); + hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); + hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff07fff); + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x801); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); + hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x83f801fc); + hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199b4fa); + hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, + 0xa0064); + hisi_sas_phy_write32(hisi_hba, i, SAS_STP_CON_TIMER_CFG, + 0xa0064); + } + for (i = 0; i < hisi_hba->queue_count; i++) { + /* Delivery queue */ + hisi_sas_write32(hisi_hba, + DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), + upper_32_bits(hisi_hba->cmd_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), + lower_32_bits(hisi_hba->cmd_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), + HISI_SAS_QUEUE_SLOTS); + + /* Completion queue */ + hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), + upper_32_bits(hisi_hba->complete_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), + lower_32_bits(hisi_hba->complete_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), + HISI_SAS_QUEUE_SLOTS); + } + + /* itct */ + hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, + lower_32_bits(hisi_hba->itct_dma)); + + hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, + upper_32_bits(hisi_hba->itct_dma)); + + /* iost */ + hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, + lower_32_bits(hisi_hba->iost_dma)); + + hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, + upper_32_bits(hisi_hba->iost_dma)); + + /* breakpoint */ + hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, + lower_32_bits(hisi_hba->breakpoint_dma)); + + hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, + upper_32_bits(hisi_hba->breakpoint_dma)); + + /* SATA broken msg */ + hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, + lower_32_bits(hisi_hba->sata_breakpoint_dma)); + + hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, + upper_32_bits(hisi_hba->sata_breakpoint_dma)); + + /* SATA initial fis */ + hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, + lower_32_bits(hisi_hba->initial_fis_dma)); + + hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, + upper_32_bits(hisi_hba->initial_fis_dma)); +} + +static int hw_init_v3_hw(struct hisi_hba *hisi_hba) +{ + init_reg_v3_hw(hisi_hba); + + return 0; +} + +static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) +{ + int rc; + + rc = hw_init_v3_hw(hisi_hba); + if (rc) + return rc; + + return 0; +} + static const struct hisi_sas_hw hisi_sas_v3_hw = { + .hw_init = hisi_sas_v3_init, + .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW, + .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), }; static struct Scsi_Host * @@ -175,6 +451,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) pci_disable_device(pdev); } + enum { /* instances of the controller */ hip08, -- cgit From 3975f6054e31c55c7e73faa342780e2af2f30872 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:22 +0800 Subject: scsi: hisi_sas: add v3 hw PHY init Add code to configure PHYs for v3 hw. Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 127 ++++++++++++++++++++++++++++++++- 1 file changed, 126 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 1a5eae6bb102..558025013624 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -19,6 +19,10 @@ #define ITCT_BASE_ADDR_HI 0x14 #define IO_BROKEN_MSG_ADDR_LO 0x18 #define IO_BROKEN_MSG_ADDR_HI 0x1c +#define PHY_CONTEXT 0x20 +#define PHY_STATE 0x24 +#define PHY_PORT_NUM_MA 0x28 +#define PHY_CONN_RATE 0x30 #define AXI_AHB_CLK_CFG 0x3c #define AXI_USER1 0x48 #define AXI_USER2 0x4c @@ -42,6 +46,7 @@ #define CFG_SET_ABORTED_IPTT_OFF 0 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) #define CFG_1US_TIMER_TRSH 0xcc +#define CHNL_INT_STATUS 0x148 #define INT_COAL_EN 0x19c #define OQ_INT_COAL_TIME 0x1a0 #define OQ_INT_COAL_CNT 0x1a4 @@ -68,9 +73,11 @@ #define ENT_INT_SRC_MSK1 0x1c4 #define ENT_INT_SRC_MSK2 0x1c8 #define ENT_INT_SRC_MSK3 0x1cc +#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 #define CHNL_PHYUPDOWN_INT_MSK 0x1d0 #define CHNL_ENT_INT_MSK 0x1d4 #define HGC_COM_INT_MSK 0x1d8 +#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) #define SAS_ECC_INTR 0x1e8 #define SAS_ECC_INTR_MSK 0x1ec #define HGC_ERR_STAT_EN 0x238 @@ -91,11 +98,33 @@ /* phy registers requiring init */ #define PORT_BASE (0x2000) +#define PHY_CFG (PORT_BASE + 0x0) +#define HARD_PHY_LINKRATE (PORT_BASE + 0x4) +#define PHY_CFG_ENA_OFF 0 +#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) +#define PHY_CFG_DC_OPT_OFF 2 +#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) #define PHY_CTRL (PORT_BASE + 0x14) #define PHY_CTRL_RESET_OFF 0 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) #define SL_CFG (PORT_BASE + 0x84) +#define SL_CONTROL (PORT_BASE + 0x94) +#define SL_CONTROL_NOTIFY_EN_OFF 0 +#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) +#define SL_CTA_OFF 17 +#define SL_CTA_MSK (0x1 << SL_CTA_OFF) +#define TX_ID_DWORD0 (PORT_BASE + 0x9c) +#define TX_ID_DWORD1 (PORT_BASE + 0xa0) +#define TX_ID_DWORD2 (PORT_BASE + 0xa4) +#define TX_ID_DWORD3 (PORT_BASE + 0xa8) +#define TX_ID_DWORD4 (PORT_BASE + 0xaC) +#define TX_ID_DWORD5 (PORT_BASE + 0xb0) +#define TX_ID_DWORD6 (PORT_BASE + 0xb4) +#define TXID_AUTO (PORT_BASE + 0xb8) +#define CT3_OFF 1 +#define CT3_MSK (0x1 << CT3_OFF) +#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) @@ -136,6 +165,13 @@ struct hisi_sas_complete_v3_hdr { }; #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 +#define HISI_SAS_MSI_COUNT_V3_HW 32 + +enum { + HISI_SAS_PHY_PHY_UPDOWN, + HISI_SAS_PHY_CHNL_INT, + HISI_SAS_PHY_INT_NR +}; static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) { @@ -152,6 +188,14 @@ static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, writel(val, regs); } +static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, + int phy_no, u32 off) +{ + void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; + + return readl(regs); +} + static void init_reg_v3_hw(struct hisi_hba *hisi_hba) { int i; @@ -266,6 +310,45 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) upper_32_bits(hisi_hba->initial_fis_dma)); } +static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg &= ~PHY_CFG_DC_OPT_MSK; + cfg |= 1 << PHY_CFG_DC_OPT_OFF; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct sas_identify_frame identify_frame; + u32 *identify_buffer; + + memset(&identify_frame, 0, sizeof(identify_frame)); + identify_frame.dev_type = SAS_END_DEVICE; + identify_frame.frame_type = 0; + identify_frame._un1 = 1; + identify_frame.initiator_bits = SAS_PROTOCOL_ALL; + identify_frame.target_bits = SAS_PROTOCOL_NONE; + memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); + memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); + identify_frame.phy_id = phy_no; + identify_buffer = (u32 *)(&identify_frame); + + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, + __swab32(identify_buffer[0])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, + __swab32(identify_buffer[1])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, + __swab32(identify_buffer[2])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, + __swab32(identify_buffer[3])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, + __swab32(identify_buffer[4])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, + __swab32(identify_buffer[5])); +} + static int hw_init_v3_hw(struct hisi_hba *hisi_hba) { init_reg_v3_hw(hisi_hba); @@ -273,6 +356,47 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba) return 0; } +static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg |= PHY_CFG_ENA_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + config_id_frame_v3_hw(hisi_hba, phy_no); + config_phy_opt_mode_v3_hw(hisi_hba, phy_no); + enable_phy_v3_hw(hisi_hba, phy_no); +} + +static void start_phys_v3_hw(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) + start_phy_v3_hw(hisi_hba, i); +} + +static void phys_init_v3_hw(struct hisi_hba *hisi_hba) +{ + start_phys_v3_hw(hisi_hba); +} + +static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 sl_control; + + sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + sl_control |= SL_CONTROL_NOTIFY_EN_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); + msleep(1); + sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); +} + static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) { int rc; @@ -288,6 +412,8 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .hw_init = hisi_sas_v3_init, .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW, .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), + .sl_notify = sl_notify_v3_hw, + .phys_init = phys_init_v3_hw, }; static struct Scsi_Host * @@ -451,7 +577,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) pci_disable_device(pdev); } - enum { /* instances of the controller */ hip08, -- cgit From 54edeee1e1f3621632308212daf383ed6688e955 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:23 +0800 Subject: scsi: hisi_sas: add phy up/down/bcast and channel ISR Add code to initialise interrupts and add some interrupt handlers. Also add function hisi_sas_v3_destroy_irqs() to clean-up irqs upon module unloading. Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 283 +++++++++++++++++++++++++++++++++ 1 file changed, 283 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 558025013624..3065252499c4 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -173,6 +173,13 @@ enum { HISI_SAS_PHY_INT_NR }; +static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) +{ + void __iomem *regs = hisi_hba->regs + off; + + return readl(regs); +} + static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) { void __iomem *regs = hisi_hba->regs + off; @@ -397,6 +404,269 @@ static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); } +static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) +{ + int i, res = 0; + u32 context, port_id, link_rate, hard_phy_linkrate; + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct device *dev = hisi_hba->dev; + + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); + + port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); + port_id = (port_id >> (4 * phy_no)) & 0xf; + link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); + link_rate = (link_rate >> (phy_no * 4)) & 0xf; + + if (port_id == 0xf) { + dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); + res = IRQ_NONE; + goto end; + } + sas_phy->linkrate = link_rate; + hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, + HARD_PHY_LINKRATE); + phy->maximum_linkrate = hard_phy_linkrate & 0xf; + phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + + /* Check for SATA dev */ + context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); + if (context & (1 << phy_no)) { + struct hisi_sas_initial_fis *initial_fis; + struct dev_to_host_fis *fis; + u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; + + dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); + initial_fis = &hisi_hba->initial_fis[phy_no]; + fis = &initial_fis->fis; + sas_phy->oob_mode = SATA_OOB_MODE; + attached_sas_addr[0] = 0x50; + attached_sas_addr[7] = phy_no; + memcpy(sas_phy->attached_sas_addr, + attached_sas_addr, + SAS_ADDR_SIZE); + memcpy(sas_phy->frame_rcvd, fis, + sizeof(struct dev_to_host_fis)); + phy->phy_type |= PORT_TYPE_SATA; + phy->identify.device_type = SAS_SATA_DEV; + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + } else { + u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; + struct sas_identify_frame *id = + (struct sas_identify_frame *)frame_rcvd; + + dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); + for (i = 0; i < 6; i++) { + u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, + RX_IDAF_DWORD0 + (i * 4)); + frame_rcvd[i] = __swab32(idaf); + } + sas_phy->oob_mode = SAS_OOB_MODE; + memcpy(sas_phy->attached_sas_addr, + &id->sas_addr, + SAS_ADDR_SIZE); + phy->phy_type |= PORT_TYPE_SAS; + phy->identify.device_type = id->dev_type; + phy->frame_rcvd_size = sizeof(struct sas_identify_frame); + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SMP; + } + + phy->port_id = port_id; + phy->phy_attached = 1; + queue_work(hisi_hba->wq, &phy->phyup_ws); + +end: + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, + CHL_INT0_SL_PHY_ENABLE_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); + + return res; +} + +static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) +{ + int res = 0; + u32 phy_state, sl_ctrl, txid_auto; + struct device *dev = hisi_hba->dev; + + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); + + phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); + dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state); + hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); + + sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, + sl_ctrl&(~SL_CTA_MSK)); + + txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); + hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, + txid_auto | CT3_MSK); + + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); + + return res; +} + +static void phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_ha_struct *sas_ha = &hisi_hba->sha; + + hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, + CHL_INT0_SL_RX_BCST_ACK_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); +} + +static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) +{ + struct hisi_hba *hisi_hba = p; + u32 irq_msk; + int phy_no = 0; + irqreturn_t res = IRQ_NONE; + + irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) + & 0x11111111; + while (irq_msk) { + if (irq_msk & 1) { + u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT0); + u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); + int rdy = phy_state & (1 << phy_no); + + if (rdy) { + if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) + /* phy up */ + if (phy_up_v3_hw(phy_no, hisi_hba) + == IRQ_HANDLED) + res = IRQ_HANDLED; + if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK) + /* phy bcast */ + phy_bcast_v3_hw(phy_no, hisi_hba); + } else { + if (irq_value & CHL_INT0_NOT_RDY_MSK) + /* phy down */ + if (phy_down_v3_hw(phy_no, hisi_hba) + == IRQ_HANDLED) + res = IRQ_HANDLED; + } + } + irq_msk >>= 4; + phy_no++; + } + + return res; +} + +static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) +{ + struct hisi_hba *hisi_hba = p; + struct device *dev = hisi_hba->dev; + u32 ent_msk, ent_tmp, irq_msk; + int phy_no = 0; + + ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); + ent_tmp = ent_msk; + ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK; + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk); + + irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) + & 0xeeeeeeee; + + while (irq_msk) { + u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT0); + u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT1); + u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT2); + + if ((irq_msk & (4 << (phy_no * 4))) && + irq_value1) { + if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK | + CHL_INT1_DMAC_TX_ECC_ERR_MSK)) + panic("%s: DMAC RX/TX ecc bad error! (0x%x)", + dev_name(dev), irq_value1); + + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT1, irq_value1); + } + + if (irq_msk & (8 << (phy_no * 4)) && irq_value2) + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT2, irq_value2); + + + if (irq_msk & (2 << (phy_no * 4)) && irq_value0) { + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT0, irq_value0 + & (~CHL_INT0_HOTPLUG_TOUT_MSK) + & (~CHL_INT0_SL_PHY_ENABLE_MSK) + & (~CHL_INT0_NOT_RDY_MSK)); + } + irq_msk &= ~(0xe << (phy_no * 4)); + phy_no++; + } + + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp); + + return IRQ_HANDLED; +} + +static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + struct pci_dev *pdev = hisi_hba->pci_dev; + int vectors, rc; + int max_msi = HISI_SAS_MSI_COUNT_V3_HW; + + vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1, + max_msi, PCI_IRQ_MSI); + if (vectors < max_msi) { + dev_err(dev, "could not allocate all msi (%d)\n", vectors); + return -ENOENT; + } + + rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), + int_phy_up_down_bcast_v3_hw, 0, + DRV_NAME " phy", hisi_hba); + if (rc) { + dev_err(dev, "could not request phy interrupt, rc=%d\n", rc); + rc = -ENOENT; + goto free_irq_vectors; + } + + rc = devm_request_irq(dev, pci_irq_vector(pdev, 2), + int_chnl_int_v3_hw, 0, + DRV_NAME " channel", hisi_hba); + if (rc) { + dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc); + rc = -ENOENT; + goto free_phy_irq; + } + + + return 0; + +free_phy_irq: + free_irq(pci_irq_vector(pdev, 1), hisi_hba); +free_irq_vectors: + pci_free_irq_vectors(pdev); + return rc; +} + static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) { int rc; @@ -405,6 +675,10 @@ static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) if (rc) return rc; + rc = interrupt_init_v3_hw(hisi_hba); + if (rc) + return rc; + return 0; } @@ -563,6 +837,14 @@ err_out: return rc; } +static void +hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) +{ + free_irq(pci_irq_vector(pdev, 1), hisi_hba); + free_irq(pci_irq_vector(pdev, 2), hisi_hba); + pci_free_irq_vectors(pdev); +} + static void hisi_sas_v3_remove(struct pci_dev *pdev) { struct device *dev = &pdev->dev; @@ -573,6 +855,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) sas_remove_host(sha->core.shost); hisi_sas_free(hisi_hba); + hisi_sas_v3_destroy_irqs(pdev, hisi_hba); pci_release_regions(pdev); pci_disable_device(pdev); } -- cgit From 60b4a5ee90349a50fc7e199d3dd37dfd7e2c51a5 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:24 +0800 Subject: scsi: hisi_sas: add v3 cq interrupt handler Add v3 cq interrupt handler slot_complete_v3_hw(). Note: The slot error handling needs to be further refined in the future to examine all fields in the error record, and handle appropriately, instead of current solution - just report SAS_OPEN_REJECT. Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 340 +++++++++++++++++++++++++++++++++ 1 file changed, 340 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 3065252499c4..4869b73e0284 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -157,6 +157,32 @@ #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) +/* Completion header */ +/* dw0 */ +#define CMPLT_HDR_CMPLT_OFF 0 +#define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF) +#define CMPLT_HDR_ERROR_PHASE_OFF 2 +#define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF) +#define CMPLT_HDR_RSPNS_XFRD_OFF 10 +#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) +#define CMPLT_HDR_ERX_OFF 12 +#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) +#define CMPLT_HDR_ABORT_STAT_OFF 13 +#define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) +/* abort_stat */ +#define STAT_IO_NOT_VALID 0x1 +#define STAT_IO_NO_DEVICE 0x2 +#define STAT_IO_COMPLETE 0x3 +#define STAT_IO_ABORTED 0x4 +/* dw1 */ +#define CMPLT_HDR_IPTT_OFF 0 +#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) +#define CMPLT_HDR_DEV_ID_OFF 16 +#define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) +/* dw3 */ +#define CMPLT_HDR_IO_IN_TARGET_OFF 17 +#define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) + struct hisi_sas_complete_v3_hdr { __le32 dw0; __le32 dw1; @@ -164,6 +190,24 @@ struct hisi_sas_complete_v3_hdr { __le32 dw3; }; +struct hisi_sas_err_record_v3 { + /* dw0 */ + __le32 trans_tx_fail_type; + + /* dw1 */ + __le32 trans_rx_fail_type; + + /* dw2 */ + __le16 dma_tx_err_type; + __le16 sipc_rx_err_type; + + /* dw3 */ + __le32 dma_rx_err_type; +}; + +#define RX_DATA_LEN_UNDERFLOW_OFF 6 +#define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF) + #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 #define HISI_SAS_MSI_COUNT_V3_HW 32 @@ -625,11 +669,275 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) return IRQ_HANDLED; } +static void +slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, + struct hisi_sas_slot *slot) +{ + struct task_status_struct *ts = &task->task_status; + struct hisi_sas_complete_v3_hdr *complete_queue = + hisi_hba->complete_hdr[slot->cmplt_queue]; + struct hisi_sas_complete_v3_hdr *complete_hdr = + &complete_queue[slot->cmplt_queue_slot]; + struct hisi_sas_err_record_v3 *record = slot->status_buffer; + u32 dma_rx_err_type = record->dma_rx_err_type; + u32 trans_tx_fail_type = record->trans_tx_fail_type; + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { + ts->residual = trans_tx_fail_type; + ts->stat = SAS_DATA_UNDERRUN; + } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { + ts->stat = SAS_QUEUE_FULL; + slot->abort = 1; + } else { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + } + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { + ts->residual = trans_tx_fail_type; + ts->stat = SAS_DATA_UNDERRUN; + } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { + ts->stat = SAS_PHY_DOWN; + slot->abort = 1; + } else { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + } + hisi_sas_sata_done(task, slot); + break; + case SAS_PROTOCOL_SMP: + ts->stat = SAM_STAT_CHECK_CONDITION; + break; + default: + break; + } +} + +static int +slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_device *sas_dev; + struct device *dev = hisi_hba->dev; + struct task_status_struct *ts; + struct domain_device *device; + enum exec_status sts; + struct hisi_sas_complete_v3_hdr *complete_queue = + hisi_hba->complete_hdr[slot->cmplt_queue]; + struct hisi_sas_complete_v3_hdr *complete_hdr = + &complete_queue[slot->cmplt_queue_slot]; + int aborted; + unsigned long flags; + + if (unlikely(!task || !task->lldd_task || !task->dev)) + return -EINVAL; + + ts = &task->task_status; + device = task->dev; + sas_dev = device->lldd_dev; + + spin_lock_irqsave(&task->task_state_lock, flags); + aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; + task->task_state_flags &= + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); + spin_unlock_irqrestore(&task->task_state_lock, flags); + + memset(ts, 0, sizeof(*ts)); + ts->resp = SAS_TASK_COMPLETE; + if (unlikely(aborted)) { + ts->stat = SAS_ABORTED_TASK; + hisi_sas_slot_task_free(hisi_hba, task, slot); + return -1; + } + + if (unlikely(!sas_dev)) { + dev_dbg(dev, "slot complete: port has not device\n"); + ts->stat = SAS_PHY_DOWN; + goto out; + } + + /* + * Use SAS+TMF status codes + */ + switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK) + >> CMPLT_HDR_ABORT_STAT_OFF) { + case STAT_IO_ABORTED: + /* this IO has been aborted by abort command */ + ts->stat = SAS_ABORTED_TASK; + goto out; + case STAT_IO_COMPLETE: + /* internal abort command complete */ + ts->stat = TMF_RESP_FUNC_SUCC; + goto out; + case STAT_IO_NO_DEVICE: + ts->stat = TMF_RESP_FUNC_COMPLETE; + goto out; + case STAT_IO_NOT_VALID: + /* + * abort single IO, the controller can't find the IO + */ + ts->stat = TMF_RESP_FUNC_FAILED; + goto out; + default: + break; + } + + /* check for erroneous completion */ + if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { + slot_err_v3_hw(hisi_hba, task, slot); + if (unlikely(slot->abort)) + return ts->stat; + goto out; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: { + struct ssp_response_iu *iu = slot->status_buffer + + sizeof(struct hisi_sas_err_record); + + sas_ssp_task_response(dev, task, iu); + break; + } + case SAS_PROTOCOL_SMP: { + struct scatterlist *sg_resp = &task->smp_task.smp_resp; + void *to; + + ts->stat = SAM_STAT_GOOD; + to = kmap_atomic(sg_page(sg_resp)); + + dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); + dma_unmap_sg(dev, &task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + memcpy(to + sg_resp->offset, + slot->status_buffer + + sizeof(struct hisi_sas_err_record), + sg_dma_len(sg_resp)); + kunmap_atomic(to); + break; + } + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + ts->stat = SAM_STAT_GOOD; + hisi_sas_sata_done(task, slot); + break; + default: + ts->stat = SAM_STAT_CHECK_CONDITION; + break; + } + + if (!slot->port->port_attached) { + dev_err(dev, "slot complete: port %d has removed\n", + slot->port->sas_port.id); + ts->stat = SAS_PHY_DOWN; + } + +out: + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + spin_lock_irqsave(&hisi_hba->lock, flags); + hisi_sas_slot_task_free(hisi_hba, task, slot); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + sts = ts->stat; + + if (task->task_done) + task->task_done(task); + + return sts; +} + +static void cq_tasklet_v3_hw(unsigned long val) +{ + struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; + struct hisi_hba *hisi_hba = cq->hisi_hba; + struct hisi_sas_slot *slot; + struct hisi_sas_itct *itct; + struct hisi_sas_complete_v3_hdr *complete_queue; + u32 rd_point = cq->rd_point, wr_point, dev_id; + int queue = cq->id; + struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; + + complete_queue = hisi_hba->complete_hdr[queue]; + + spin_lock(&dq->lock); + wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + + (0x14 * queue)); + + while (rd_point != wr_point) { + struct hisi_sas_complete_v3_hdr *complete_hdr; + int iptt; + + complete_hdr = &complete_queue[rd_point]; + + /* Check for NCQ completion */ + if (complete_hdr->act) { + u32 act_tmp = complete_hdr->act; + int ncq_tag_count = ffs(act_tmp); + + dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >> + CMPLT_HDR_DEV_ID_OFF; + itct = &hisi_hba->itct[dev_id]; + + /* The NCQ tags are held in the itct header */ + while (ncq_tag_count) { + __le64 *ncq_tag = &itct->qw4_15[0]; + + ncq_tag_count -= 1; + iptt = (ncq_tag[ncq_tag_count / 5] + >> (ncq_tag_count % 5) * 12) & 0xfff; + + slot = &hisi_hba->slot_info[iptt]; + slot->cmplt_queue_slot = rd_point; + slot->cmplt_queue = queue; + slot_complete_v3_hw(hisi_hba, slot); + + act_tmp &= ~(1 << ncq_tag_count); + ncq_tag_count = ffs(act_tmp); + } + } else { + iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; + slot = &hisi_hba->slot_info[iptt]; + slot->cmplt_queue_slot = rd_point; + slot->cmplt_queue = queue; + slot_complete_v3_hw(hisi_hba, slot); + } + + if (++rd_point >= HISI_SAS_QUEUE_SLOTS) + rd_point = 0; + } + + /* update rd_point */ + cq->rd_point = rd_point; + hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + spin_unlock(&dq->lock); +} + +static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) +{ + struct hisi_sas_cq *cq = p; + struct hisi_hba *hisi_hba = cq->hisi_hba; + int queue = cq->id; + + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); + + tasklet_schedule(&cq->tasklet); + + return IRQ_HANDLED; +} + static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; struct pci_dev *pdev = hisi_hba->pci_dev; int vectors, rc; + int i, k; int max_msi = HISI_SAS_MSI_COUNT_V3_HW; vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1, @@ -657,9 +965,34 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) goto free_phy_irq; } + /* Init tasklets for cq only */ + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + struct tasklet_struct *t = &cq->tasklet; + + rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16), + cq_interrupt_v3_hw, 0, + DRV_NAME " cq", cq); + if (rc) { + dev_err(dev, + "could not request cq%d interrupt, rc=%d\n", + i, rc); + rc = -ENOENT; + goto free_cq_irqs; + } + + tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq); + } return 0; +free_cq_irqs: + for (k = 0; k < i; k++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[k]; + + free_irq(pci_irq_vector(pdev, k+16), cq); + } + free_irq(pci_irq_vector(pdev, 2), hisi_hba); free_phy_irq: free_irq(pci_irq_vector(pdev, 1), hisi_hba); free_irq_vectors: @@ -840,8 +1173,15 @@ err_out: static void hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) { + int i; + free_irq(pci_irq_vector(pdev, 1), hisi_hba); free_irq(pci_irq_vector(pdev, 2), hisi_hba); + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + + free_irq(pci_irq_vector(pdev, i+16), cq); + } pci_free_irq_vectors(pdev); } -- cgit From a2204723acefd504561b5dbc53d9c0a1c2528eab Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:25 +0800 Subject: scsi: hisi_sas: add v3 code to send SSP frame Add code to prepare SSP frame and deliver it to hardware. Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 208 +++++++++++++++++++++++++++++++++ 1 file changed, 208 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 4869b73e0284..c869aca273aa 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -157,6 +157,41 @@ #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) +/* HW dma structures */ +/* Delivery queue header */ +/* dw0 */ +#define CMD_HDR_RESP_REPORT_OFF 5 +#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) +#define CMD_HDR_TLR_CTRL_OFF 6 +#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) +#define CMD_HDR_PORT_OFF 18 +#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) +#define CMD_HDR_PRIORITY_OFF 27 +#define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) +#define CMD_HDR_CMD_OFF 29 +#define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) +/* dw1 */ +#define CMD_HDR_DIR_OFF 5 +#define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) +#define CMD_HDR_VDTL_OFF 10 +#define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) +#define CMD_HDR_FRAME_TYPE_OFF 11 +#define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) +#define CMD_HDR_DEV_ID_OFF 16 +#define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) +/* dw2 */ +#define CMD_HDR_CFL_OFF 0 +#define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) +#define CMD_HDR_MRFL_OFF 15 +#define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) +#define CMD_HDR_SG_MOD_OFF 24 +#define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) +/* dw6 */ +#define CMD_HDR_DIF_SGL_LEN_OFF 0 +#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) +#define CMD_HDR_DATA_SGL_LEN_OFF 16 +#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) + /* Completion header */ /* dw0 */ #define CMPLT_HDR_CMPLT_OFF 0 @@ -217,6 +252,11 @@ enum { HISI_SAS_PHY_INT_NR }; +#define DIR_NO_DATA 0 +#define DIR_TO_INI 1 +#define DIR_TO_DEVICE 2 +#define DIR_RESERVED 3 + static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { void __iomem *regs = hisi_hba->regs + off; @@ -224,6 +264,13 @@ static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) return readl(regs); } +static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) +{ + void __iomem *regs = hisi_hba->regs + off; + + return readl_relaxed(regs); +} + static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) { void __iomem *regs = hisi_hba->regs + off; @@ -448,6 +495,163 @@ static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); } +/** + * The callpath to this function and upto writing the write + * queue pointer should be safe from interruption. + */ +static int +get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) +{ + struct device *dev = hisi_hba->dev; + int queue = dq->id; + u32 r, w; + + w = dq->wr_point; + r = hisi_sas_read32_relaxed(hisi_hba, + DLVRY_Q_0_RD_PTR + (queue * 0x14)); + if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { + dev_warn(dev, "full queue=%d r=%d w=%d\n\n", + queue, r, w); + return -EAGAIN; + } + + return 0; +} + +static void start_delivery_v3_hw(struct hisi_sas_dq *dq) +{ + struct hisi_hba *hisi_hba = dq->hisi_hba; + int dlvry_queue = dq->slot_prep->dlvry_queue; + int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; + + dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; + hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), + dq->wr_point); +} + +static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, + struct hisi_sas_cmd_hdr *hdr, + struct scatterlist *scatter, + int n_elem) +{ + struct device *dev = hisi_hba->dev; + struct scatterlist *sg; + int i; + + if (n_elem > HISI_SAS_SGE_PAGE_CNT) { + dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", + n_elem); + return -EINVAL; + } + + slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC, + &slot->sge_page_dma); + if (!slot->sge_page) + return -ENOMEM; + + for_each_sg(scatter, sg, n_elem, i) { + struct hisi_sas_sge *entry = &slot->sge_page->sge[i]; + + entry->addr = cpu_to_le64(sg_dma_address(sg)); + entry->page_ctrl_0 = entry->page_ctrl_1 = 0; + entry->data_len = cpu_to_le32(sg_dma_len(sg)); + entry->data_off = 0; + } + + hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma); + hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); + + return 0; +} + +static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, int is_tmf, + struct hisi_sas_tmf_task *tmf) +{ + struct sas_task *task = slot->task; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_port *port = slot->port; + struct sas_ssp_task *ssp_task = &task->ssp_task; + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; + int has_data = 0, rc, priority = is_tmf; + u8 *buf_cmd; + u32 dw1 = 0, dw2 = 0; + + hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | + (2 << CMD_HDR_TLR_CTRL_OFF) | + (port->id << CMD_HDR_PORT_OFF) | + (priority << CMD_HDR_PRIORITY_OFF) | + (1 << CMD_HDR_CMD_OFF)); /* ssp */ + + dw1 = 1 << CMD_HDR_VDTL_OFF; + if (is_tmf) { + dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; + } else { + dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; + switch (scsi_cmnd->sc_data_direction) { + case DMA_TO_DEVICE: + has_data = 1; + dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; + break; + case DMA_FROM_DEVICE: + has_data = 1; + dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; + break; + default: + dw1 &= ~CMD_HDR_DIR_MSK; + } + } + + /* map itct entry */ + dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; + hdr->dw1 = cpu_to_le32(dw1); + + dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) + + 3) / 4) << CMD_HDR_CFL_OFF) | + ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | + (2 << CMD_HDR_SG_MOD_OFF); + hdr->dw2 = cpu_to_le32(dw2); + hdr->transfer_tags = cpu_to_le32(slot->idx); + + if (has_data) { + rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, + slot->n_elem); + if (rc) + return rc; + } + + hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); + hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); + hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + + buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr); + memcpy(buf_cmd, ssp_task->LUN, 8); + + if (!is_tmf) { + buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3); + memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); + } else { + buf_cmd[10] = tmf->tmf; + switch (tmf->tmf) { + case TMF_ABORT_TASK: + case TMF_QUERY_TASK: + buf_cmd[12] = + (tmf->tag_of_task_to_be_managed >> 8) & 0xff; + buf_cmd[13] = + tmf->tag_of_task_to_be_managed & 0xff; + break; + default: + break; + } + } + + return 0; +} + static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { int i, res = 0; @@ -1020,6 +1224,10 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW, .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), .sl_notify = sl_notify_v3_hw, + .prep_ssp = prep_ssp_v3_hw, + .get_free_slot = get_free_slot_v3_hw, + .start_delivery = start_delivery_v3_hw, + .slot_complete = slot_complete_v3_hw, .phys_init = phys_init_v3_hw, }; -- cgit From fa913de23aa2e7443ff34a61eadab37b6b20797d Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:26 +0800 Subject: scsi: hisi_sas: add v3 code to send SMP frame Add code to prepare SMP frame. Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 74 ++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index c869aca273aa..515f50c4032c 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -186,6 +186,9 @@ #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) #define CMD_HDR_SG_MOD_OFF 24 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) +/* dw3 */ +#define CMD_HDR_IPTT_OFF 0 +#define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) /* dw6 */ #define CMD_HDR_DIF_SGL_LEN_OFF 0 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) @@ -652,6 +655,76 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, return 0; } +static int prep_smp_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct domain_device *device = task->dev; + struct device *dev = hisi_hba->dev; + struct hisi_sas_port *port = slot->port; + struct scatterlist *sg_req, *sg_resp; + struct hisi_sas_device *sas_dev = device->lldd_dev; + dma_addr_t req_dma_addr; + unsigned int req_len, resp_len; + int elem, rc; + + /* + * DMA-map SMP request, response buffers + */ + /* req */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + req_dma_addr = sg_dma_address(sg_req); + + /* resp */ + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out_req; + } + resp_len = sg_dma_len(sg_resp); + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_resp; + } + + /* create header */ + /* dw0 */ + hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | + (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ + (2 << CMD_HDR_CMD_OFF)); /* smp */ + + /* map itct entry */ + hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | + (1 << CMD_HDR_FRAME_TYPE_OFF) | + (DIR_NO_DATA << CMD_HDR_DIR_OFF)); + + /* dw2 */ + hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | + (HISI_SAS_MAX_SMP_RESP_SZ / 4 << + CMD_HDR_MRFL_OFF)); + + hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); + + hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); + hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + + return 0; + +err_out_resp: + dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); +err_out_req: + dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + return rc; +} + static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { int i, res = 0; @@ -1225,6 +1298,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), .sl_notify = sl_notify_v3_hw, .prep_ssp = prep_ssp_v3_hw, + .prep_smp = prep_smp_v3_hw, .get_free_slot = get_free_slot_v3_hw, .start_delivery = start_delivery_v3_hw, .slot_complete = slot_complete_v3_hw, -- cgit From ce60689e12ddfee94afaaa23089e1131f892839a Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:27 +0800 Subject: scsi: hisi_sas: add v3 code to send ATA frame Add code to prepare ATA frame for v3 hw Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 106 +++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 515f50c4032c..30c103b20d88 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -171,8 +171,11 @@ #define CMD_HDR_CMD_OFF 29 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) /* dw1 */ +#define CMD_HDR_UNCON_CMD_OFF 3 #define CMD_HDR_DIR_OFF 5 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) +#define CMD_HDR_RESET_OFF 7 +#define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) #define CMD_HDR_VDTL_OFF 10 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) #define CMD_HDR_FRAME_TYPE_OFF 11 @@ -182,6 +185,8 @@ /* dw2 */ #define CMD_HDR_CFL_OFF 0 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) +#define CMD_HDR_NCQ_TAG_OFF 10 +#define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) #define CMD_HDR_MRFL_OFF 15 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) #define CMD_HDR_SG_MOD_OFF 24 @@ -260,6 +265,11 @@ enum { #define DIR_TO_DEVICE 2 #define DIR_RESERVED 3 +#define CMD_IS_UNCONSTRAINT(cmd) \ + ((cmd == ATA_CMD_READ_LOG_EXT) || \ + (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \ + (cmd == ATA_CMD_DEV_RESET)) + static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { void __iomem *regs = hisi_hba->regs + off; @@ -725,6 +735,101 @@ err_out_req: return rc; } +static int get_ncq_tag_v3_hw(struct sas_task *task, u32 *tag) +{ + struct ata_queued_cmd *qc = task->uldd_task; + + if (qc) { + if (qc->tf.command == ATA_CMD_FPDMA_WRITE || + qc->tf.command == ATA_CMD_FPDMA_READ) { + *tag = qc->tag; + return 1; + } + } + return 0; +} + +static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct domain_device *device = task->dev; + struct domain_device *parent_dev = device->parent; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct asd_sas_port *sas_port = device->port; + struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + u8 *buf_cmd; + int has_data = 0, rc = 0, hdr_tag = 0; + u32 dw1 = 0, dw2 = 0; + + hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) + hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); + else + hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); + + switch (task->data_dir) { + case DMA_TO_DEVICE: + has_data = 1; + dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; + break; + case DMA_FROM_DEVICE: + has_data = 1; + dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; + break; + default: + dw1 &= ~CMD_HDR_DIR_MSK; + } + + if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && + (task->ata_task.fis.control & ATA_SRST)) + dw1 |= 1 << CMD_HDR_RESET_OFF; + + dw1 |= (hisi_sas_get_ata_protocol( + task->ata_task.fis.command, task->data_dir)) + << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; + + if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command)) + dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; + + hdr->dw1 = cpu_to_le32(dw1); + + /* dw2 */ + if (task->ata_task.use_ncq && get_ncq_tag_v3_hw(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; + } + + dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | + 2 << CMD_HDR_SG_MOD_OFF; + hdr->dw2 = cpu_to_le32(dw2); + + /* dw3 */ + hdr->transfer_tags = cpu_to_le32(slot->idx); + + if (has_data) { + rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, + slot->n_elem); + if (rc) + return rc; + } + + hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); + hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); + hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + + buf_cmd = slot->command_table; + + if (likely(!task->ata_task.device_control_reg_update)) + task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + /* fill in command FIS */ + memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); + + return 0; +} + static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { int i, res = 0; @@ -1299,6 +1404,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .sl_notify = sl_notify_v3_hw, .prep_ssp = prep_ssp_v3_hw, .prep_smp = prep_smp_v3_hw, + .prep_stp = prep_ata_v3_hw, .get_free_slot = get_free_slot_v3_hw, .start_delivery = start_delivery_v3_hw, .slot_complete = slot_complete_v3_hw, -- cgit From 182e7222ebe0e638f8e22a1ad6724e8d128e96f2 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:28 +0800 Subject: scsi: hisi_sas: add v3 code for itct setup and free Add code to itct setup and free for v3 hw. Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 114 +++++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 30c103b20d88..b9ab24d2fc57 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -24,6 +24,11 @@ #define PHY_PORT_NUM_MA 0x28 #define PHY_CONN_RATE 0x30 #define AXI_AHB_CLK_CFG 0x3c +#define ITCT_CLR 0x44 +#define ITCT_CLR_EN_OFF 16 +#define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) +#define ITCT_DEV_OFF 0 +#define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) #define AXI_USER1 0x48 #define AXI_USER2 0x4c #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 @@ -226,6 +231,26 @@ #define CMPLT_HDR_IO_IN_TARGET_OFF 17 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) +/* ITCT header */ +/* qw0 */ +#define ITCT_HDR_DEV_TYPE_OFF 0 +#define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) +#define ITCT_HDR_VALID_OFF 2 +#define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) +#define ITCT_HDR_MCR_OFF 5 +#define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) +#define ITCT_HDR_VLN_OFF 9 +#define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) +#define ITCT_HDR_SMP_TIMEOUT_OFF 16 +#define ITCT_HDR_AWT_CONTINUE_OFF 25 +#define ITCT_HDR_PORT_ID_OFF 28 +#define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) +/* qw2 */ +#define ITCT_HDR_INLT_OFF 0 +#define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) +#define ITCT_HDR_RTOLT_OFF 48 +#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) + struct hisi_sas_complete_v3_hdr { __le32 dw0; __le32 dw1; @@ -460,6 +485,93 @@ static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no) __swab32(identify_buffer[5])); } +static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) +{ + struct domain_device *device = sas_dev->sas_device; + struct device *dev = hisi_hba->dev; + u64 qw0, device_id = sas_dev->device_id; + struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; + struct domain_device *parent_dev = device->parent; + struct asd_sas_port *sas_port = device->port; + struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + + memset(itct, 0, sizeof(*itct)); + + /* qw0 */ + qw0 = 0; + switch (sas_dev->dev_type) { + case SAS_END_DEVICE: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: + qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; + break; + case SAS_SATA_DEV: + case SAS_SATA_PENDING: + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) + qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; + else + qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; + break; + default: + dev_warn(dev, "setup itct: unsupported dev type (%d)\n", + sas_dev->dev_type); + } + + qw0 |= ((1 << ITCT_HDR_VALID_OFF) | + (device->linkrate << ITCT_HDR_MCR_OFF) | + (1 << ITCT_HDR_VLN_OFF) | + (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) | + (1 << ITCT_HDR_AWT_CONTINUE_OFF) | + (port->id << ITCT_HDR_PORT_ID_OFF)); + itct->qw0 = cpu_to_le64(qw0); + + /* qw1 */ + memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE); + itct->sas_addr = __swab64(itct->sas_addr); + + /* qw2 */ + if (!dev_is_sata(device)) + itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | + (0x1ULL << ITCT_HDR_RTOLT_OFF)); +} + +static void free_device_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) +{ + u64 dev_id = sas_dev->device_id; + struct device *dev = hisi_hba->dev; + struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; + u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + + /* clear the itct interrupt state */ + if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + ENT_INT_SRC3_ITC_INT_MSK); + + /* clear the itct table*/ + reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); + reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); + hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); + + udelay(10); + reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) { + dev_dbg(dev, "got clear ITCT done interrupt\n"); + + /* invalid the itct state*/ + memset(itct, 0, sizeof(struct hisi_sas_itct)); + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + ENT_INT_SRC3_ITC_INT_MSK); + hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED; + hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL; + + /* clear the itct */ + hisi_sas_write32(hisi_hba, ITCT_CLR, 0); + dev_dbg(dev, "clear ITCT ok\n"); + } +} + static int hw_init_v3_hw(struct hisi_hba *hisi_hba) { init_reg_v3_hw(hisi_hba); @@ -1399,8 +1511,10 @@ static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) static const struct hisi_sas_hw hisi_sas_v3_hw = { .hw_init = hisi_sas_v3_init, + .setup_itct = setup_itct_v3_hw, .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW, .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), + .free_device = free_device_v3_hw, .sl_notify = sl_notify_v3_hw, .prep_ssp = prep_ssp_v3_hw, .prep_smp = prep_smp_v3_hw, -- cgit From 4de0ca69e58d26c9038d4d4c290f3ed6a24a897a Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:29 +0800 Subject: scsi: hisi_sas: add v3 code to send internal abort command Add code to prepare internal abort command. Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 38 ++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index b9ab24d2fc57..ef5c15817181 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -165,6 +165,10 @@ /* HW dma structures */ /* Delivery queue header */ /* dw0 */ +#define CMD_HDR_ABORT_FLAG_OFF 0 +#define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) +#define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 +#define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) #define CMD_HDR_RESP_REPORT_OFF 5 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) #define CMD_HDR_TLR_CTRL_OFF 6 @@ -204,6 +208,11 @@ #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) #define CMD_HDR_DATA_SGL_LEN_OFF 16 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) +/* dw7 */ +#define CMD_HDR_ADDR_MODE_SEL_OFF 15 +#define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF) +#define CMD_HDR_ABORT_IPTT_OFF 16 +#define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) /* Completion header */ /* dw0 */ @@ -942,6 +951,34 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, return 0; } +static int prep_abort_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, + int device_id, int abort_flag, int tag_to_abort) +{ + struct sas_task *task = slot->task; + struct domain_device *dev = task->dev; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct hisi_sas_port *port = slot->port; + + /* dw0 */ + hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/ + (port->id << CMD_HDR_PORT_OFF) | + ((dev_is_sata(dev) ? 1:0) + << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | + (abort_flag + << CMD_HDR_ABORT_FLAG_OFF)); + + /* dw1 */ + hdr->dw1 = cpu_to_le32(device_id + << CMD_HDR_DEV_ID_OFF); + + /* dw7 */ + hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); + hdr->transfer_tags = cpu_to_le32(slot->idx); + + return 0; +} + static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { int i, res = 0; @@ -1519,6 +1556,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .prep_ssp = prep_ssp_v3_hw, .prep_smp = prep_smp_v3_hw, .prep_stp = prep_ata_v3_hw, + .prep_abort = prep_abort_v3_hw, .get_free_slot = get_free_slot_v3_hw, .start_delivery = start_delivery_v3_hw, .slot_complete = slot_complete_v3_hw, -- cgit From f771d3b08fe0e3ee9ac0226cc8f9d7930eb925da Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:30 +0800 Subject: scsi: hisi_sas: add get_wideport_bitmap_v3_hw() Add code for interface get_wideport_bitmap. Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index ef5c15817181..3cd4b9a77e22 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -629,6 +629,18 @@ static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); } +static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) +{ + int i, bitmap = 0; + u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); + + for (i = 0; i < hisi_hba->n_phy; i++) + if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) + bitmap |= 1 << i; + + return bitmap; +} + /** * The callpath to this function and upto writing the write * queue pointer should be safe from interruption. @@ -1550,6 +1562,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .hw_init = hisi_sas_v3_init, .setup_itct = setup_itct_v3_hw, .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW, + .get_wideport_bitmap = get_wideport_bitmap_v3_hw, .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), .free_device = free_device_v3_hw, .sl_notify = sl_notify_v3_hw, -- cgit From 402cd9f0ae9eefcdf9b6c0c87aadc32faec086ff Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:31 +0800 Subject: scsi: hisi_sas: add v3 code to fill some more hw function pointers Add code to fill the interface of phy_hard_reset, phy_get_max_linkrate, and phy enable/disable. Signed-off-by: John Garry Signed-off-by: Xiang Chen Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 39 ++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 3cd4b9a77e22..cf1eb472c404 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -129,6 +129,8 @@ #define TXID_AUTO (PORT_BASE + 0xb8) #define CT3_OFF 1 #define CT3_MSK (0x1 << CT3_OFF) +#define TX_HARDRST_OFF 2 +#define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) @@ -596,6 +598,14 @@ static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); } +static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg &= ~PHY_CFG_ENA_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { config_id_frame_v3_hw(hisi_hba, phy_no); @@ -603,6 +613,11 @@ static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) enable_phy_v3_hw(hisi_hba, phy_no); } +static void stop_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + disable_phy_v3_hw(hisi_hba, phy_no); +} + static void start_phys_v3_hw(struct hisi_hba *hisi_hba) { int i; @@ -611,6 +626,26 @@ static void start_phys_v3_hw(struct hisi_hba *hisi_hba) start_phy_v3_hw(hisi_hba, i); } +static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + u32 txid_auto; + + stop_phy_v3_hw(hisi_hba, phy_no); + if (phy->identify.device_type == SAS_END_DEVICE) { + txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); + hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, + txid_auto | TX_HARDRST_MSK); + } + msleep(100); + start_phy_v3_hw(hisi_hba, phy_no); +} + +enum sas_linkrate phy_get_max_linkrate_v3_hw(void) +{ + return SAS_LINK_RATE_12_0_GBPS; +} + static void phys_init_v3_hw(struct hisi_hba *hisi_hba) { start_phys_v3_hw(hisi_hba); @@ -1574,6 +1609,10 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .start_delivery = start_delivery_v3_hw, .slot_complete = slot_complete_v3_hw, .phys_init = phys_init_v3_hw, + .phy_enable = enable_phy_v3_hw, + .phy_disable = disable_phy_v3_hw, + .phy_hard_reset = phy_hard_reset_v3_hw, + .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw, }; static struct Scsi_Host * -- cgit From d30ff2632333dab794d3be14e16ca8c42dfc294d Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 14 Jun 2017 23:33:32 +0800 Subject: scsi: hisi_sas: modify internal abort dev flow for v3 hw There is a change for abort dev for v3 hw: add registers to configure unaborted iptt for a device, and then inform this to logic. Signed-off-by: Xiang Chen Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas.h | 2 ++ drivers/scsi/hisi_sas/hisi_sas_main.c | 15 +++++++++++++++ drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 28 ++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index e89f6aec844f..4fc23087a939 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -188,6 +188,8 @@ struct hisi_sas_hw { void (*free_device)(struct hisi_hba *hisi_hba, struct hisi_sas_device *dev); int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id); + void (*dereg_device)(struct hisi_hba *hisi_hba, + struct domain_device *device); int (*soft_reset)(struct hisi_hba *hisi_hba); int max_command_entries; int complete_hdr_size; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 124a3ff569fd..e2f8d928e579 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -727,6 +727,13 @@ static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) } } +static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, + struct domain_device *device) +{ + if (hisi_hba->hw->dereg_device) + hisi_hba->hw->dereg_device(hisi_hba, device); +} + static void hisi_sas_dev_gone(struct domain_device *device) { struct hisi_sas_device *sas_dev = device->lldd_dev; @@ -740,6 +747,8 @@ static void hisi_sas_dev_gone(struct domain_device *device) hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_DEV, 0); + hisi_sas_dereg_device(hisi_hba, device); + hisi_hba->hw->free_device(hisi_hba, sas_dev); device->lldd_dev = NULL; memset(sas_dev, 0, sizeof(*sas_dev)); @@ -1071,6 +1080,7 @@ static int hisi_sas_abort_task(struct sas_task *task) if (task->dev->dev_type == SAS_SATA_DEV) { hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_DEV, 0); + hisi_sas_dereg_device(hisi_hba, device); rc = hisi_sas_softreset_ata_disk(device); } } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { @@ -1137,6 +1147,10 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device) return TMF_RESP_FUNC_FAILED; sas_dev->dev_status = HISI_SAS_DEV_NORMAL; + hisi_sas_internal_task_abort(hisi_hba, device, + HISI_SAS_INT_ABT_DEV, 0); + hisi_sas_dereg_device(hisi_hba, device); + rc = hisi_sas_debug_I_T_nexus_reset(device); if (rc == TMF_RESP_FUNC_COMPLETE) { @@ -1164,6 +1178,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) HISI_SAS_INT_ABT_DEV, 0); if (rc == TMF_RESP_FUNC_FAILED) goto out; + hisi_sas_dereg_device(hisi_hba, device); phy = sas_get_local_phy(device); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index cf1eb472c404..c998b81f33de 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -50,6 +50,10 @@ #define CFG_ABT_SET_QUERY_IPTT 0xd4 #define CFG_SET_ABORTED_IPTT_OFF 0 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) +#define CFG_SET_ABORTED_EN_OFF 12 +#define CFG_ABT_SET_IPTT_DONE 0xd8 +#define CFG_ABT_SET_IPTT_DONE_OFF 0 +#define HGC_IOMB_PROC1_STATUS 0x104 #define CFG_1US_TIMER_TRSH 0xcc #define CHNL_INT_STATUS 0x148 #define INT_COAL_EN 0x19c @@ -583,6 +587,29 @@ static void free_device_v3_hw(struct hisi_hba *hisi_hba, } } +static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, + struct domain_device *device) +{ + struct hisi_sas_slot *slot, *slot2; + struct hisi_sas_device *sas_dev = device->lldd_dev; + u32 cfg_abt_set_query_iptt; + + cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba, + CFG_ABT_SET_QUERY_IPTT); + list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) { + cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK; + cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) | + (slot->idx << CFG_SET_ABORTED_IPTT_OFF); + hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, + cfg_abt_set_query_iptt); + } + cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF); + hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, + cfg_abt_set_query_iptt); + hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE, + 1 << CFG_ABT_SET_IPTT_DONE_OFF); +} + static int hw_init_v3_hw(struct hisi_hba *hisi_hba) { init_reg_v3_hw(hisi_hba); @@ -1613,6 +1640,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .phy_disable = disable_phy_v3_hw, .phy_hard_reset = phy_hard_reset_v3_hw, .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw, + .dereg_device = dereg_device_v3_hw, }; static struct Scsi_Host * -- cgit From d41b65bcdc43fed82ea6b3c006a268df6c53e80d Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 15 Jun 2017 22:56:41 -0700 Subject: scsi: lpfc: Fix system panic when express lane enabled. There is a null pointer dereference that can happen in the FOF interrupt handler. The driver was not setting up cq->assoc_qp_for sli4_hba->oas_cq. Initialize cq->assoc_qp before accessing it. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_sli.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 040575adf9c6..4f2cc395597e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -13560,6 +13560,9 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) return; } + /* Save EQ associated with this CQ */ + cq->assoc_qp = phba->sli4_hba.fof_eq; + /* Process all the entries to the OAS CQ */ while ((cqe = lpfc_sli4_cq_get(cq))) { workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); -- cgit From b4fd681e8a353b1d492c159d6effa070c3c00c23 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 15 Jun 2017 22:56:42 -0700 Subject: scsi: lpfc: Fix nvme_info sysfs output to be consistent First line of nvme_info output is not consistent. There is an Extra colon in the format. First line of output will contain one of the following strings: NVME Initiator Enabled NVME Target Enabled NVME Disabled Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_attr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 66269e342c7e..af22602b1058 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -171,7 +171,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, else statep = "INIT"; len += snprintf(buf + len, PAGE_SIZE - len, - "NVME Target: Enabled State %s\n", + "NVME Target Enabled State %s\n", statep); len += snprintf(buf + len, PAGE_SIZE - len, "%s%d WWPN x%llx WWNN x%llx DID x%06x\n", -- cgit From 56bc802842839174befc97749ea01d4004275c14 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 15 Jun 2017 22:56:43 -0700 Subject: scsi: lpfc: Vport creation is failing with "Link Down" error Vport creation fails for SLI-3 adapters. Mailbox submission fails because mailbox interrupt is disabled. Mailbox interrupt is disabled during port reset. Do reset only for physical port. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_init.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 9d3a12636455..77283705eb8d 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -3691,14 +3691,6 @@ lpfc_get_wwpn(struct lpfc_hba *phba) LPFC_MBOXQ_t *mboxq; MAILBOX_t *mb; - if (phba->sli_rev < LPFC_SLI_REV4) { - /* Reset the port first */ - lpfc_sli_brdrestart(phba); - rc = lpfc_sli_chipset_init(phba); - if (rc) - return (uint64_t)-1; - } - mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) @@ -3852,8 +3844,19 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) int i; uint64_t wwn; bool use_no_reset_hba = false; + int rc; - wwn = lpfc_get_wwpn(phba); + if (lpfc_no_hba_reset_cnt) { + if (phba->sli_rev < LPFC_SLI_REV4 && + dev == &phba->pcidev->dev) { + /* Reset the port first */ + lpfc_sli_brdrestart(phba); + rc = lpfc_sli_chipset_init(phba); + if (rc) + return NULL; + } + wwn = lpfc_get_wwpn(phba); + } for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { if (wwn == lpfc_no_hba_reset[i]) { -- cgit From 810ffa4789c8453caf0b876619c428bf64fef304 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 15 Jun 2017 22:56:44 -0700 Subject: scsi: lpfc: Reduce time spent in IRQ for received NVME commands Removed unnecessary bzero of context area. Due to size of sg list, added a substantial delay and played havoc on cpu caches. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_nvmet.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index dba1bd216be3..431faa0a4f3e 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -205,7 +205,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) sid = sli4_sid_from_fc_hdr(fc_hdr); ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; - memset(ctxp, 0, sizeof(ctxp->ctx)); ctxp->wqeq = NULL; ctxp->txrdy = NULL; ctxp->offset = 0; @@ -1422,7 +1421,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, "6414 NVMET Context corrupt %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); } - memset(ctxp, 0, sizeof(ctxp->ctx)); ctxp->wqeq = NULL; ctxp->txrdy = NULL; ctxp->offset = 0; -- cgit From 966bb5b7119607cf3d9a0d668eb67af67c2bab45 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 15 Jun 2017 22:56:45 -0700 Subject: scsi: lpfc: Break up IO ctx list into a separate get and put list Since unsol rcv ISR and command cmpl ISR both access/lock this list, separate get/put lists will reduce contention. Replaced struct list_head lpfc_nvmet_ctx_list; with struct list_head lpfc_nvmet_ctx_get_list; struct list_head lpfc_nvmet_ctx_put_list; and all correpsonding locks and counters. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_attr.c | 11 ++++-- drivers/scsi/lpfc/lpfc_debugfs.c | 11 ++++-- drivers/scsi/lpfc/lpfc_init.c | 16 +++++--- drivers/scsi/lpfc/lpfc_nvmet.c | 82 +++++++++++++++++++++++++++++----------- drivers/scsi/lpfc/lpfc_sli4.h | 9 +++-- 5 files changed, 89 insertions(+), 40 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index af22602b1058..4ed48ed38e79 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -245,15 +245,18 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&tgtp->xmt_abort_rsp), atomic_read(&tgtp->xmt_abort_rsp_error)); - spin_lock(&phba->sli4_hba.nvmet_io_lock); + spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock); + spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); tot = phba->sli4_hba.nvmet_xri_cnt - - phba->sli4_hba.nvmet_ctx_cnt; - spin_unlock(&phba->sli4_hba.nvmet_io_lock); + (phba->sli4_hba.nvmet_ctx_get_cnt + + phba->sli4_hba.nvmet_ctx_put_cnt); + spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); + spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock); len += snprintf(buf + len, PAGE_SIZE - len, "IO_CTX: %08x WAIT: cur %08x tot %08x\n" "CTX Outstanding %08llx\n", - phba->sli4_hba.nvmet_ctx_cnt, + phba->sli4_hba.nvmet_xri_cnt, phba->sli4_hba.nvmet_io_wait_cnt, phba->sli4_hba.nvmet_io_wait_total, tot); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index cc49850e18a9..ed2850645e70 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -848,15 +848,18 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); } - spin_lock(&phba->sli4_hba.nvmet_io_lock); + spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock); + spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); tot = phba->sli4_hba.nvmet_xri_cnt - - phba->sli4_hba.nvmet_ctx_cnt; - spin_unlock(&phba->sli4_hba.nvmet_io_lock); + (phba->sli4_hba.nvmet_ctx_get_cnt + + phba->sli4_hba.nvmet_ctx_put_cnt); + spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); + spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock); len += snprintf(buf + len, size - len, "IO_CTX: %08x WAIT: cur %08x tot %08x\n" "CTX Outstanding %08llx\n", - phba->sli4_hba.nvmet_ctx_cnt, + phba->sli4_hba.nvmet_xri_cnt, phba->sli4_hba.nvmet_io_wait_cnt, phba->sli4_hba.nvmet_io_wait_total, tot); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 77283705eb8d..7e73fdc154f7 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1281,10 +1281,13 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) /* Check outstanding IO count */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { if (phba->nvmet_support) { - spin_lock(&phba->sli4_hba.nvmet_io_lock); + spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock); + spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); tot = phba->sli4_hba.nvmet_xri_cnt - - phba->sli4_hba.nvmet_ctx_cnt; - spin_unlock(&phba->sli4_hba.nvmet_io_lock); + (phba->sli4_hba.nvmet_ctx_get_cnt + + phba->sli4_hba.nvmet_ctx_put_cnt); + spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); + spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock); } else { tot = atomic_read(&phba->fc4NvmeIoCmpls); data1 = atomic_read( @@ -3487,7 +3490,6 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; - if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { /* els xri-sgl expanded */ xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; @@ -5935,7 +5937,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_get_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); /* Fast-path XRI aborted CQ Event work queue list */ @@ -5944,7 +5947,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* This abort list used by worker thread */ spin_lock_init(&phba->sli4_hba.sgl_list_lock); - spin_lock_init(&phba->sli4_hba.nvmet_io_lock); + spin_lock_init(&phba->sli4_hba.nvmet_ctx_get_lock); + spin_lock_init(&phba->sli4_hba.nvmet_ctx_put_lock); spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); /* diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 431faa0a4f3e..5fb29735e236 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -267,11 +267,11 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) } spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); - spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag); + spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag); list_add_tail(&ctx_buf->list, - &phba->sli4_hba.lpfc_nvmet_ctx_list); - phba->sli4_hba.nvmet_ctx_cnt++; - spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag); + &phba->sli4_hba.lpfc_nvmet_ctx_put_list); + phba->sli4_hba.nvmet_ctx_put_cnt++; + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag); #endif } @@ -865,28 +865,46 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf; unsigned long flags; - list_for_each_entry_safe( - ctx_buf, next_ctx_buf, - &phba->sli4_hba.lpfc_nvmet_ctx_list, list) { - spin_lock_irqsave( - &phba->sli4_hba.abts_nvme_buf_list_lock, flags); + spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags); + spin_lock_irq(&phba->sli4_hba.nvmet_ctx_put_lock); + list_for_each_entry_safe(ctx_buf, next_ctx_buf, + &phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) { + spin_lock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock); list_del_init(&ctx_buf->list); - spin_unlock_irqrestore( - &phba->sli4_hba.abts_nvme_buf_list_lock, flags); + spin_unlock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock); __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); ctx_buf->sglq->state = SGL_FREED; ctx_buf->sglq->ndlp = NULL; - spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags); + spin_lock_irq(&phba->sli4_hba.sgl_list_lock); list_add_tail(&ctx_buf->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list); - spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, - flags); + spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); kfree(ctx_buf->context); } + list_for_each_entry_safe(ctx_buf, next_ctx_buf, + &phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) { + spin_lock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock); + list_del_init(&ctx_buf->list); + spin_unlock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock); + __lpfc_clear_active_sglq(phba, + ctx_buf->sglq->sli4_lxritag); + ctx_buf->sglq->state = SGL_FREED; + ctx_buf->sglq->ndlp = NULL; + + spin_lock_irq(&phba->sli4_hba.sgl_list_lock); + list_add_tail(&ctx_buf->sglq->list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); + + lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); + kfree(ctx_buf->context); + } + spin_unlock_irq(&phba->sli4_hba.nvmet_ctx_put_lock); + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags); } static int @@ -958,12 +976,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) "6407 Ran out of NVMET XRIs\n"); return -ENOMEM; } - spin_lock(&phba->sli4_hba.nvmet_io_lock); + spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock); list_add_tail(&ctx_buf->list, - &phba->sli4_hba.lpfc_nvmet_ctx_list); - spin_unlock(&phba->sli4_hba.nvmet_io_lock); + &phba->sli4_hba.lpfc_nvmet_ctx_get_list); + spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock); } - phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt; + phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt; return 0; } @@ -1370,13 +1388,31 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, goto dropit; } - spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag); - if (phba->sli4_hba.nvmet_ctx_cnt) { - list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list, + spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag); + if (phba->sli4_hba.nvmet_ctx_get_cnt) { + list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list, ctx_buf, struct lpfc_nvmet_ctxbuf, list); - phba->sli4_hba.nvmet_ctx_cnt--; + phba->sli4_hba.nvmet_ctx_get_cnt--; + } else { + spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); + if (phba->sli4_hba.nvmet_ctx_put_cnt) { + list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list, + &phba->sli4_hba.lpfc_nvmet_ctx_get_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list); + phba->sli4_hba.nvmet_ctx_get_cnt = + phba->sli4_hba.nvmet_ctx_put_cnt; + phba->sli4_hba.nvmet_ctx_put_cnt = 0; + spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); + + list_remove_head( + &phba->sli4_hba.lpfc_nvmet_ctx_get_list, + ctx_buf, struct lpfc_nvmet_ctxbuf, list); + phba->sli4_hba.nvmet_ctx_get_cnt--; + } else { + spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); + } } - spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag); + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag); fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); oxid = be16_to_cpu(fc_hdr->fh_ox_id); diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 830dc83b9c21..7a1d74e9e877 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -621,7 +621,8 @@ struct lpfc_sli4_hba { uint16_t scsi_xri_start; uint16_t els_xri_cnt; uint16_t nvmet_xri_cnt; - uint16_t nvmet_ctx_cnt; + uint16_t nvmet_ctx_get_cnt; + uint16_t nvmet_ctx_put_cnt; uint16_t nvmet_io_wait_cnt; uint16_t nvmet_io_wait_total; struct list_head lpfc_els_sgl_list; @@ -630,7 +631,8 @@ struct lpfc_sli4_hba { struct list_head lpfc_abts_nvmet_ctx_list; struct list_head lpfc_abts_scsi_buf_list; struct list_head lpfc_abts_nvme_buf_list; - struct list_head lpfc_nvmet_ctx_list; + struct list_head lpfc_nvmet_ctx_get_list; + struct list_head lpfc_nvmet_ctx_put_list; struct list_head lpfc_nvmet_io_wait_list; struct lpfc_sglq **lpfc_sglq_active_list; struct list_head lpfc_rpi_hdr_list; @@ -662,7 +664,8 @@ struct lpfc_sli4_hba { spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t sgl_list_lock; /* list of aborted els IOs */ - spinlock_t nvmet_io_lock; + spinlock_t nvmet_ctx_get_lock; /* list of avail XRI contexts */ + spinlock_t nvmet_ctx_put_lock; /* list of avail XRI contexts */ spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */ uint32_t physical_port; -- cgit From 09559e81121f9663e8f1f6ed05672c49e31be3db Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 15 Jun 2017 22:56:46 -0700 Subject: scsi: lpfc: Fix SLI3 drivers attempting NVME ELS commands. In a server with an 8G adapter and a 32G adapter, running NVME and FCP, the server would crash with the following stack. RIP: 0010: ... lpfc_nvme_register_port+0x38/0x420 [lpfc] lpfc_nlp_state_cleanup+0x154/0x4f0 [lpfc] lpfc_nlp_set_state+0x9d/0x1a0 [lpfc] lpfc_cmpl_prli_prli_issue+0x35f/0x440 [lpfc] lpfc_disc_state_machine+0x78/0x1c0 [lpfc] lpfc_cmpl_els_prli+0x17c/0x1f0 [lpfc] lpfc_sli_sp_handle_rspiocb+0x39b/0x6b0 [lpfc] lpfc_sli_handle_slow_ring_event_s3+0x134/0x2d0 [lpfc] lpfc_work_done+0x8ac/0x13b0 [lpfc] lpfc_do_work+0xf1/0x1b0 [lpfc] Crash, on the 8G adapter, is due to a vport which does not have a nvme local port structure. It's not supposed to have one. NVME is not supported on the 8G adapter, so the NVME PRLI, which started this flow shouldn't have been sent in the first place. Correct discovery engine to recognize when on an SLI3 rport, which doesn't support SLI3, if the rport supports only NVME, don't send a NVME PRLI. Instead, as no FC4 will be used, a LOGO is sent. If rport is FCP and NVME, only execute the SCSI PRLI. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_els.c | 16 +++++++++++++++- drivers/scsi/lpfc/lpfc_hbadisc.c | 3 ++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index a140318d6159..54de984d695f 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2168,6 +2168,19 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_fc4_type, ndlp->nlp_DID); return 1; } + + /* SLI3 ports don't support NVME. If this rport is a strict NVME + * FC4 type, implicitly LOGO. + */ + if (phba->sli_rev == LPFC_SLI_REV3 && + ndlp->nlp_fc4_type == NLP_FC4_NVME) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", + ndlp->nlp_type); + lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); + return 1; + } + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, elscmd); if (!elsiocb) @@ -2268,7 +2281,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* The driver supports 2 FC4 types. Make sure * a PRLI is issued for all types before exiting. */ - if (local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) + if (phba->sli_rev == LPFC_SLI_REV4 && + local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) goto send_next_prli; return 0; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index db2d0e692ddf..aa5e5ff56dfb 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -4194,7 +4194,8 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_register_remote_port(vport, ndlp); } /* Notify the NVME transport of this new rport. */ - if (ndlp->nlp_fc4_type & NLP_FC4_NVME) { + if (vport->phba->sli_rev >= LPFC_SLI_REV4 && + ndlp->nlp_fc4_type & NLP_FC4_NVME) { if (vport->phba->nvmet_support == 0) { /* Register this rport with the transport. * Initiators take the NDLP ref count in -- cgit From 569dbe84a3e769009aa4a5d1030d000168889580 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 15 Jun 2017 22:56:47 -0700 Subject: scsi: lpfc: Fix crash after firmware flash when IO is running. OS crashes after the completion of firmware download. Failure in posting SCSI SGL buffers because number of SGL buffers is less than total count. Some of the pending IOs are not completed by driver. SGL buffers for these IOs are not added back to the list. Pending IOs are not completed because lpfc_wq_list list is initialized before completion of pending IOs. Postpone lpfc_wq_list reinitialization by moving lpfc_sli4_queue_destroy() after lpfc_hba_down_post(). Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_sli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 4f2cc395597e..8de70b9d79dd 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4303,7 +4303,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) /* Perform FCoE PCI function reset before freeing queue memory */ rc = lpfc_pci_function_reset(phba); - lpfc_sli4_queue_destroy(phba); /* Restore PCI cmd register */ pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); @@ -4428,6 +4427,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) pci_disable_pcie_error_reporting(phba->pcidev); lpfc_hba_down_post(phba); + lpfc_sli4_queue_destroy(phba); return rc; } -- cgit From 11e644e2a2afa34a4d0ca896cf722572317b21ed Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 15 Jun 2017 22:56:48 -0700 Subject: scsi: lpfc: Fix crash doing IO with resets During every reset, IOCBs are allocated. So, at one point, number of allocated IOCBs reaches maximum limit and lpfc_sli_next_iotag fails. Allocate IOCBs only during initialization. Reuse them after every reset instead of allocating new set of IOCBs. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_sli.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8de70b9d79dd..e948ea05fd33 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -6927,18 +6927,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) cnt = phba->cfg_iocb_cnt * 1024; /* We need 1 iocbq for every SGL, for IO processing */ cnt += phba->sli4_hba.nvmet_xri_cnt; - /* Initialize and populate the iocb list per host */ - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2821 initialize iocb list %d total %d\n", - phba->cfg_iocb_cnt, cnt); - rc = lpfc_init_iocb_list(phba, cnt); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1413 Failed to init iocb list.\n"); - goto out_destroy_queue; - } - - lpfc_nvmet_create_targetport(phba); } else { /* update host scsi xri-sgl sizes and mappings */ rc = lpfc_sli4_scsi_sgl_update(phba); @@ -6959,18 +6947,24 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) } cnt = phba->cfg_iocb_cnt * 1024; + } + + if (!phba->sli.iocbq_lookup) { /* Initialize and populate the iocb list per host */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2820 initialize iocb list %d total %d\n", + "2821 initialize iocb list %d total %d\n", phba->cfg_iocb_cnt, cnt); rc = lpfc_init_iocb_list(phba, cnt); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6301 Failed to init iocb list.\n"); + "1413 Failed to init iocb list.\n"); goto out_destroy_queue; } } + if (phba->nvmet_support) + lpfc_nvmet_create_targetport(phba); + if (phba->nvmet_support && phba->cfg_nvmet_mrq) { /* Post initial buffers to all RQs created */ for (i = 0; i < phba->cfg_nvmet_mrq; i++) { -- cgit From 4550f9c75e6abdc1f80170adf74d7610d059afd7 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 15 Jun 2017 22:56:49 -0700 Subject: scsi: lpfc: Fix crash in lpfc_sli_ringtxcmpl_put when nvmet gets an abort request. When running nvme detach-ns /dev/nvme0n1 -n 1 command, the nvmet lpfc driver crashes with this stack dump: kernel BUG at /root/NVME/lpfc_8.4/lpfc_sli.c:1393! invalid opcode: 0000 [#1] SMP Workqueue: nvmet-fc-cpu0 nvmet_fc_do_work_on_cpu [nvmet_fc] lpfc_sli4_issue_wqe+0x357/0x440 [lpfc] lpfc_nvmet_xmt_fcp_abort+0x36b/0x5c0 [lpfc] nvmet_fc_abort_op+0x30/0x50 [nvmet_fc] nvmet_fc_do_work_on_cpu+0xd9/0x130 [nvmet_fc] process_one_work+0x14e/0x410 worker_thread+0x116/0x490 kthread+0xc7/0xe0 ret_from_fork+0x3f/0x70 Crash is due to an uninitialized iocbq->vport pointer. Explicitly set the iocbq->vport field to phba->pport in lpfc_nvmet_sol_fcp_issue_abort as it does all abort iocbq initialization in the routine. Using phba->pport is ok because target does not support NPIV instances. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_nvmet.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 5fb29735e236..7dc061a14f95 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -2523,6 +2523,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, abts_wqeq->iocb_cmpl = 0; abts_wqeq->iocb_flag |= LPFC_IO_NVME; abts_wqeq->context2 = ctxp; + abts_wqeq->vport = phba->pport; rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { -- cgit From d6564e52605c55e9d63441782b634b6f131b73c9 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 15 Jun 2017 22:56:50 -0700 Subject: scsi: lpfc: Driver responds LS_RJT to Beacon Off ELS - Linux Beacon OFF from switch is rejected by driver. Driver fails Beacon OFF if frequency is set to 0. As per fc-ls spec, status, capability, frequency and duration fields are only applicable for Beacon ON. Remove frequency and type checks. Reject Beacon ON if duration is non zero. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_els.c | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 54de984d695f..6d1d6f691df4 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -5703,27 +5703,13 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, rjt_err = LSRJT_CMD_UNSUPPORTED; goto rjt; } - if (beacon->lcb_frequency == 0) { + if (beacon->lcb_sub_command != LPFC_LCB_ON && + beacon->lcb_sub_command != LPFC_LCB_OFF) { rjt_err = LSRJT_CMD_UNSUPPORTED; goto rjt; } - if ((beacon->lcb_type != LPFC_LCB_GREEN) && - (beacon->lcb_type != LPFC_LCB_AMBER)) { - rjt_err = LSRJT_CMD_UNSUPPORTED; - goto rjt; - } - if ((beacon->lcb_sub_command != LPFC_LCB_ON) && - (beacon->lcb_sub_command != LPFC_LCB_OFF)) { - rjt_err = LSRJT_CMD_UNSUPPORTED; - goto rjt; - } - if ((beacon->lcb_sub_command == LPFC_LCB_ON) && - (beacon->lcb_type != LPFC_LCB_GREEN) && - (beacon->lcb_type != LPFC_LCB_AMBER)) { - rjt_err = LSRJT_CMD_UNSUPPORTED; - goto rjt; - } - if (be16_to_cpu(beacon->lcb_duration) != 0) { + if (beacon->lcb_sub_command == LPFC_LCB_ON && + be16_to_cpu(beacon->lcb_duration) != 0) { rjt_err = LSRJT_CMD_UNSUPPORTED; goto rjt; } -- cgit From b609b1c753364c6bf6f8666c82047d239fba6889 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 15 Jun 2017 22:56:51 -0700 Subject: scsi: lpfc: update to revision to 11.4.0.1 Set lpfc driver revision to 11.4.0.1 Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 067c9e8a4b2d..c6a24c3e2d5e 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "11.4.0.0" +#define LPFC_DRIVER_VERSION "11.4.0.1" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ -- cgit From 5cc973f09e21b5a2f746307641879bc9f1da623b Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 15 May 2017 15:56:05 +0100 Subject: scsi: aacraid: fix leak of data from stack back to userspace The fields sense_data_size and sense_data are unitialized garbage from the stack and are being copied back to userspace. Fix this leak of stack information by ensuring they are zero'd. Detected by CoverityScan, CID#1435473 ("Uninitialized scalar variable") Fixes: 423400e64d377 ("scsi: aacraid: Include HBA direct interface") Signed-off-by: Colin Ian King Acked-by: Dave Carroll Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/commctrl.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 106b9332f718..6bb6ed48e31d 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -955,6 +955,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) reply.srb_status = SRB_STATUS_SUCCESS; reply.scsi_status = 0; reply.data_xfer_length = byte_count; + reply.sense_data_size = 0; + memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE); } else { reply.srb_status = err->service_response; reply.scsi_status = err->status; -- cgit From 3930d7309807ba0bfa460dfa9ed68d5560347dd2 Mon Sep 17 00:00:00 2001 From: Long Li Date: Tue, 13 Jun 2017 14:34:05 -0700 Subject: scsi: storvsc: use default I/O timeout handler for FC devices FC disks issue I/O directly to the host storage port driver, this is diffirent to VHD disks where I/O is virtualized and timeout is handled by the host VSP (Virtualization Service Provider). FC disks are usually setup in a multipath system, and they don't want to reset timer on I/O timeout. Timeout is detected by multipath as a good time to failover and recover. Signed-off-by: Long Li Signed-off-by: Martin K. Petersen --- drivers/scsi/storvsc_drv.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 8d955db6424f..3cc8d67783a1 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1495,6 +1495,10 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) */ static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) { +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + if (scmnd->device->host->transportt == fc_transport_template) + return fc_eh_timed_out(scmnd); +#endif return BLK_EH_RESET_TIMER; } -- cgit From 496c91bbc9109ff99907d2a94fccb7d8d4349010 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Jun 2017 14:27:23 +0200 Subject: scsi: remove various unused blist flags Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_scan.c | 25 ++----------------------- include/scsi/scsi_devinfo.h | 9 --------- 2 files changed, 2 insertions(+), 32 deletions(-) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index e6de4eee97a3..3c4403210a1a 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -656,8 +656,6 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, if (pass == 1) { if (BLIST_INQUIRY_36 & *bflags) next_inquiry_len = 36; - else if (BLIST_INQUIRY_58 & *bflags) - next_inquiry_len = 58; else if (sdev->inquiry_len) next_inquiry_len = sdev->inquiry_len; else @@ -927,15 +925,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, sdev->use_10_for_rw = 1; - if (*bflags & BLIST_MS_SKIP_PAGE_08) - sdev->skip_ms_page_8 = 1; - - if (*bflags & BLIST_MS_SKIP_PAGE_3F) - sdev->skip_ms_page_3f = 1; - - if (*bflags & BLIST_USE_10_BYTE_MS) - sdev->use_10_for_ms = 1; - /* some devices don't like REPORT SUPPORTED OPERATION CODES * and will simply timeout causing sd_mod init to take a very * very long time */ @@ -957,9 +946,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, return SCSI_SCAN_NO_RESPONSE; } - if (*bflags & BLIST_MS_192_BYTES_FOR_3F) - sdev->use_192_bytes_for_3f = 1; - if (*bflags & BLIST_NOT_LOCKABLE) sdev->lockable = 0; @@ -969,9 +955,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, if (*bflags & BLIST_NO_DIF) sdev->no_dif = 1; - if (*bflags & BLIST_SYNC_ALUA) - sdev->synchronous_alua = 1; - sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; if (*bflags & BLIST_TRY_VPD_PAGES) @@ -1109,7 +1092,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, /* * result contains valid SCSI INQUIRY data. */ - if (((result[0] >> 5) == 3) && !(bflags & BLIST_ATTACH_PQ3)) { + if ((result[0] >> 5) == 3) { /* * For a Peripheral qualifier 3 (011b), the SCSI * spec says: The device server is not capable of @@ -1267,11 +1250,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget, */ if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN)) max_dev_lun = min(8U, max_dev_lun); - - /* - * Stop scanning at 255 unless BLIST_SCSI3LUN - */ - if (!(bflags & BLIST_SCSI3LUN)) + else max_dev_lun = min(256U, max_dev_lun); /* diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h index 9f750cb63b03..9592570e092a 100644 --- a/include/scsi/scsi_devinfo.h +++ b/include/scsi/scsi_devinfo.h @@ -15,12 +15,7 @@ #define BLIST_ISROM 0x100 /* Treat as (removable) CD-ROM */ #define BLIST_LARGELUN 0x200 /* LUNs past 7 on a SCSI-2 device */ #define BLIST_INQUIRY_36 0x400 /* override additional length field */ -#define BLIST_INQUIRY_58 0x800 /* ... for broken inquiry responses */ #define BLIST_NOSTARTONADD 0x1000 /* do not do automatic start on add */ -#define BLIST_MS_SKIP_PAGE_08 0x2000 /* do not send ms page 0x08 */ -#define BLIST_MS_SKIP_PAGE_3F 0x4000 /* do not send ms page 0x3f */ -#define BLIST_USE_10_BYTE_MS 0x8000 /* use 10 byte ms before 6 byte ms */ -#define BLIST_MS_192_BYTES_FOR_3F 0x10000 /* 192 byte ms page 0x3f request */ #define BLIST_REPORTLUN2 0x20000 /* try REPORT_LUNS even for SCSI-2 devs (if HBA supports more than 8 LUNs) */ #define BLIST_NOREPORTLUN 0x40000 /* don't try REPORT_LUNS scan (SCSI-3 devs) */ @@ -29,14 +24,10 @@ #define BLIST_SELECT_NO_ATN 0x200000 /* select without ATN */ #define BLIST_RETRY_HWERROR 0x400000 /* retry HARDWARE_ERROR */ #define BLIST_MAX_512 0x800000 /* maximum 512 sector cdb length */ -#define BLIST_ATTACH_PQ3 0x1000000 /* Scan: Attach to PQ3 devices */ #define BLIST_NO_DIF 0x2000000 /* Disable T10 PI (DIF) */ #define BLIST_SKIP_VPD_PAGES 0x4000000 /* Ignore SBC-3 VPD pages */ -#define BLIST_SCSI3LUN 0x8000000 /* Scan more than 256 LUNs - for sequential scan */ #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ #define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */ -#define BLIST_SYNC_ALUA 0x80000000 /* Synchronous ALUA commands */ #endif -- cgit From 6934be4f016311cf88c316dad5c75e4ccf8a7fc0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Jun 2017 14:27:24 +0200 Subject: scsi: scsi_dh_alua: remove synchronous STPG support Since 9c58b395 ("scsi: scsi_devinfo: remove synchronous ALUA for NETAPP devices") this code is unused. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/device_handler/scsi_dh_alua.c | 27 +++++---------------------- include/scsi/scsi_device.h | 1 - 2 files changed, 5 insertions(+), 23 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index c01b47e5b55a..0962fd544401 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -57,7 +57,6 @@ /* device handler flags */ #define ALUA_OPTIMIZE_STPG 0x01 #define ALUA_RTPG_EXT_HDR_UNSUPP 0x02 -#define ALUA_SYNC_STPG 0x04 /* State machine flags */ #define ALUA_PG_RUN_RTPG 0x10 #define ALUA_PG_RUN_STPG 0x20 @@ -70,7 +69,6 @@ MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than static LIST_HEAD(port_group_list); static DEFINE_SPINLOCK(port_group_lock); static struct workqueue_struct *kaluad_wq; -static struct workqueue_struct *kaluad_sync_wq; struct alua_port_group { struct kref kref; @@ -380,8 +378,6 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, } spin_lock_irqsave(&pg->lock, flags); - if (sdev->synchronous_alua) - pg->flags |= ALUA_SYNC_STPG; if (pg_updated) list_add_rcu(&h->node, &pg->dh_list); spin_unlock_irqrestore(&pg->lock, flags); @@ -785,7 +781,6 @@ static void alua_rtpg_work(struct work_struct *work) int err = SCSI_DH_OK; struct alua_queue_data *qdata, *tmp; unsigned long flags; - struct workqueue_struct *alua_wq = kaluad_wq; spin_lock_irqsave(&pg->lock, flags); sdev = pg->rtpg_sdev; @@ -796,8 +791,6 @@ static void alua_rtpg_work(struct work_struct *work) kref_put(&pg->kref, release_port_group); return; } - if (pg->flags & ALUA_SYNC_STPG) - alua_wq = kaluad_sync_wq; pg->flags |= ALUA_PG_RUNNING; if (pg->flags & ALUA_PG_RUN_RTPG) { int state = pg->state; @@ -810,7 +803,7 @@ static void alua_rtpg_work(struct work_struct *work) pg->flags &= ~ALUA_PG_RUNNING; pg->flags |= ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); - queue_delayed_work(alua_wq, &pg->rtpg_work, + queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); return; } @@ -822,7 +815,7 @@ static void alua_rtpg_work(struct work_struct *work) pg->flags &= ~ALUA_PG_RUNNING; pg->flags |= ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); - queue_delayed_work(alua_wq, &pg->rtpg_work, + queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); return; } @@ -839,7 +832,7 @@ static void alua_rtpg_work(struct work_struct *work) pg->interval = 0; pg->flags &= ~ALUA_PG_RUNNING; spin_unlock_irqrestore(&pg->lock, flags); - queue_delayed_work(alua_wq, &pg->rtpg_work, + queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); return; } @@ -874,8 +867,6 @@ static bool alua_rtpg_queue(struct alua_port_group *pg, { int start_queue = 0; unsigned long flags; - struct workqueue_struct *alua_wq = kaluad_wq; - if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) return false; @@ -900,12 +891,10 @@ static bool alua_rtpg_queue(struct alua_port_group *pg, } } - if (pg->flags & ALUA_SYNC_STPG) - alua_wq = kaluad_sync_wq; spin_unlock_irqrestore(&pg->lock, flags); if (start_queue) { - if (queue_delayed_work(alua_wq, &pg->rtpg_work, + if (queue_delayed_work(kaluad_wq, &pg->rtpg_work, msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) sdev = NULL; else @@ -1166,16 +1155,11 @@ static int __init alua_init(void) /* Temporary failure, bypass */ return SCSI_DH_DEV_TEMP_BUSY; } - kaluad_sync_wq = create_workqueue("kaluad_sync"); - if (!kaluad_sync_wq) { - destroy_workqueue(kaluad_wq); - return SCSI_DH_DEV_TEMP_BUSY; - } + r = scsi_register_device_handler(&alua_dh); if (r != 0) { printk(KERN_ERR "%s: Failed to register scsi device handler", ALUA_DH_NAME); - destroy_workqueue(kaluad_sync_wq); destroy_workqueue(kaluad_wq); } return r; @@ -1184,7 +1168,6 @@ static int __init alua_init(void) static void __exit alua_exit(void) { scsi_unregister_device_handler(&alua_dh); - destroy_workqueue(kaluad_sync_wq); destroy_workqueue(kaluad_wq); } diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index d13bc80825b1..d3fb98f72a03 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -181,7 +181,6 @@ struct scsi_device { unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ unsigned broken_fua:1; /* Don't set FUA bit */ unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ - unsigned synchronous_alua:1; /* Synchronous ALUA commands */ atomic_t disk_events_disable_depth; /* disable depth for disk events */ -- cgit From 4c11712ad0e1aa38154854a2a3efb1f4a90a059d Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Thu, 25 May 2017 09:34:30 -0400 Subject: scsi: sd: Use sysfs_match_string() Avoid unnecessary snprintf() when formatting variables for display in sysfs and switch to sysfs_match_string() for validating user input. Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/sd.c | 71 +++++++++++++++++++++++-------------------------------- 1 file changed, 29 insertions(+), 42 deletions(-) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 53b6d72c214d..7dc8cb8f1818 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -155,7 +155,7 @@ static ssize_t cache_type_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int i, ct = -1, rcd, wce, sp; + int ct, rcd, wce, sp; struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; char buffer[64]; @@ -178,16 +178,10 @@ cache_type_store(struct device *dev, struct device_attribute *attr, sdkp->cache_override = 0; } - for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) { - len = strlen(sd_cache_types[i]); - if (strncmp(sd_cache_types[i], buf, len) == 0 && - buf[len] == '\n') { - ct = i; - break; - } - } + ct = sysfs_match_string(sd_cache_types, buf); if (ct < 0) return -EINVAL; + rcd = ct & 0x01 ? 1 : 0; wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; @@ -227,7 +221,7 @@ manage_start_stop_show(struct device *dev, struct device_attribute *attr, struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; - return snprintf(buf, 20, "%u\n", sdp->manage_start_stop); + return sprintf(buf, "%u\n", sdp->manage_start_stop); } static ssize_t @@ -251,7 +245,7 @@ allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); - return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart); + return sprintf(buf, "%u\n", sdkp->device->allow_restart); } static ssize_t @@ -279,7 +273,7 @@ cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) struct scsi_disk *sdkp = to_scsi_disk(dev); int ct = sdkp->RCD + 2*sdkp->WCE; - return snprintf(buf, 40, "%s\n", sd_cache_types[ct]); + return sprintf(buf, "%s\n", sd_cache_types[ct]); } static DEVICE_ATTR_RW(cache_type); @@ -288,7 +282,7 @@ FUA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); - return snprintf(buf, 20, "%u\n", sdkp->DPOFUA); + return sprintf(buf, "%u\n", sdkp->DPOFUA); } static DEVICE_ATTR_RO(FUA); @@ -298,7 +292,7 @@ protection_type_show(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); - return snprintf(buf, 20, "%u\n", sdkp->protection_type); + return sprintf(buf, "%u\n", sdkp->protection_type); } static ssize_t @@ -341,9 +335,9 @@ protection_mode_show(struct device *dev, struct device_attribute *attr, } if (!dif && !dix) - return snprintf(buf, 20, "none\n"); + return sprintf(buf, "none\n"); - return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif); + return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif); } static DEVICE_ATTR_RO(protection_mode); @@ -352,7 +346,7 @@ app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); - return snprintf(buf, 20, "%u\n", sdkp->ATO); + return sprintf(buf, "%u\n", sdkp->ATO); } static DEVICE_ATTR_RO(app_tag_own); @@ -362,10 +356,11 @@ thin_provisioning_show(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); - return snprintf(buf, 20, "%u\n", sdkp->lbpme); + return sprintf(buf, "%u\n", sdkp->lbpme); } static DEVICE_ATTR_RO(thin_provisioning); +/* sysfs_match_string() requires dense arrays */ static const char *lbp_mode[] = { [SD_LBP_FULL] = "full", [SD_LBP_UNMAP] = "unmap", @@ -381,7 +376,7 @@ provisioning_mode_show(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); - return snprintf(buf, 20, "%s\n", lbp_mode[sdkp->provisioning_mode]); + return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]); } static ssize_t @@ -390,6 +385,7 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; + int mode; if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -402,23 +398,17 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr, if (sdp->type != TYPE_DISK) return -EINVAL; - if (!strncmp(buf, lbp_mode[SD_LBP_UNMAP], 20)) - sd_config_discard(sdkp, SD_LBP_UNMAP); - else if (!strncmp(buf, lbp_mode[SD_LBP_WS16], 20)) - sd_config_discard(sdkp, SD_LBP_WS16); - else if (!strncmp(buf, lbp_mode[SD_LBP_WS10], 20)) - sd_config_discard(sdkp, SD_LBP_WS10); - else if (!strncmp(buf, lbp_mode[SD_LBP_ZERO], 20)) - sd_config_discard(sdkp, SD_LBP_ZERO); - else if (!strncmp(buf, lbp_mode[SD_LBP_DISABLE], 20)) - sd_config_discard(sdkp, SD_LBP_DISABLE); - else + mode = sysfs_match_string(lbp_mode, buf); + if (mode < 0) return -EINVAL; + sd_config_discard(sdkp, mode); + return count; } static DEVICE_ATTR_RW(provisioning_mode); +/* sysfs_match_string() requires dense arrays */ static const char *zeroing_mode[] = { [SD_ZERO_WRITE] = "write", [SD_ZERO_WS] = "writesame", @@ -432,7 +422,7 @@ zeroing_mode_show(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); - return snprintf(buf, 20, "%s\n", zeroing_mode[sdkp->zeroing_mode]); + return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]); } static ssize_t @@ -440,21 +430,17 @@ zeroing_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); + int mode; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (!strncmp(buf, zeroing_mode[SD_ZERO_WRITE], 20)) - sdkp->zeroing_mode = SD_ZERO_WRITE; - else if (!strncmp(buf, zeroing_mode[SD_ZERO_WS], 20)) - sdkp->zeroing_mode = SD_ZERO_WS; - else if (!strncmp(buf, zeroing_mode[SD_ZERO_WS16_UNMAP], 20)) - sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP; - else if (!strncmp(buf, zeroing_mode[SD_ZERO_WS10_UNMAP], 20)) - sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP; - else + mode = sysfs_match_string(zeroing_mode, buf); + if (mode < 0) return -EINVAL; + sdkp->zeroing_mode = mode; + return count; } static DEVICE_ATTR_RW(zeroing_mode); @@ -465,7 +451,7 @@ max_medium_access_timeouts_show(struct device *dev, { struct scsi_disk *sdkp = to_scsi_disk(dev); - return snprintf(buf, 20, "%u\n", sdkp->max_medium_access_timeouts); + return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts); } static ssize_t @@ -491,7 +477,7 @@ max_write_same_blocks_show(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); - return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks); + return sprintf(buf, "%u\n", sdkp->max_ws_blocks); } static ssize_t @@ -696,6 +682,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) switch (mode) { + case SD_LBP_FULL: case SD_LBP_DISABLE: blk_queue_max_discard_sectors(q, 0); queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); -- cgit From 054c62f5b7a45a7142982f68e5d05be158775fbf Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Tue, 20 Jun 2017 19:41:21 +0530 Subject: scsi: csiostor: update module version Add -ko to the module version similar to module version of other Chelsio drivers. Signed-off-by: Varun Prakash Signed-off-by: Martin K. Petersen --- drivers/scsi/csiostor/csio_init.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h index 5cc5d317a442..96b31e5af91e 100644 --- a/drivers/scsi/csiostor/csio_init.h +++ b/drivers/scsi/csiostor/csio_init.h @@ -50,7 +50,7 @@ #define CSIO_DRV_AUTHOR "Chelsio Communications" #define CSIO_DRV_LICENSE "Dual BSD/GPL" #define CSIO_DRV_DESC "Chelsio FCoE driver" -#define CSIO_DRV_VERSION "1.0.0" +#define CSIO_DRV_VERSION "1.0.0-ko" extern struct fc_function_template csio_fc_transport_funcs; extern struct fc_function_template csio_fc_transport_vport_funcs; -- cgit From d821bbdae4f3187ffd52877265f8eab25e21d3ba Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 21 Jun 2017 10:48:30 +0800 Subject: scsi: fc: drop residual tsk_mgmt_response and it_nexus_response After commit 556e26a70b64 ("scsi: remove tsk_mgmt_response and it_nexus_response transport methods"), the target driver support was removed totally. Drop the residual. Signed-off-by: Kefeng Wang Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- include/scsi/scsi_transport_fc.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 6e208bb32c78..e308cd59e556 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -658,10 +658,6 @@ struct fc_function_template { int (*vport_disable)(struct fc_vport *, bool); int (*vport_delete)(struct fc_vport *); - /* target-mode drivers' functions */ - int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int); - int (* it_nexus_response)(struct Scsi_Host *, u64, int); - /* bsg support */ int (*bsg_request)(struct bsg_job *); int (*bsg_timeout)(struct bsg_job *); -- cgit From 49541a04b12ff0f4086c41d71ee5d3000d64a387 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 21 Jun 2017 13:40:05 +0200 Subject: scsi: qla2xxx: don't include There's no need to use the static UTS_RELEASE string, since utsname()->release contains the same. This avoids rebuilding this file for every change of the release string. Signed-off-by: Johannes Berg Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/tcm_qla2xxx.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 238abad4b481..00ae1f4090e2 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -25,7 +25,6 @@ #include #include -#include #include #include #include @@ -1883,9 +1882,9 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item, char *page) { return sprintf(page, - "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " - UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname, - utsname()->machine); + "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on %s\n", + QLA2XXX_VERSION, utsname()->sysname, + utsname()->machine, utsname()->release); } CONFIGFS_ATTR_RO(tcm_qla2xxx_wwn_, version); @@ -1989,9 +1988,9 @@ static int tcm_qla2xxx_register_configfs(void) { int ret; - pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " - UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname, - utsname()->machine); + pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on %s\n", + QLA2XXX_VERSION, utsname()->sysname, + utsname()->machine, utsname()->release); ret = target_register_template(&tcm_qla2xxx_ops); if (ret) -- cgit From e72c9a2a67a6400c8ef3d01d4c461dbbbfa0e1f0 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 21 Jun 2017 16:35:46 +0200 Subject: scsi: virtio_scsi: let host do exception handling virtio_scsi tries to do exception handling after the default 30 seconds timeout expires. However, it's better to let the host control the timeout, otherwise with a heavy I/O load it is likely that an abort will also timeout. This leads to fatal errors like filesystems going offline. Disable the 'sd' timeout and allow the host to do exception handling, following the precedent of the storvsc driver. Hannes has a proposal to introduce timeouts in virtio, but this provides an immediate solution for stable kernels too. [mkp: fixed typo] Reported-by: Douglas Miller Cc: "James E.J. Bottomley" Cc: "Martin K. Petersen" Cc: Hannes Reinecke Cc: linux-scsi@vger.kernel.org Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini Signed-off-by: Martin K. Petersen --- drivers/scsi/virtio_scsi.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index dc2e97c543a5..8b93197daefe 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -795,6 +795,16 @@ static int virtscsi_map_queues(struct Scsi_Host *shost) return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2); } +/* + * The host guarantees to respond to each command, although I/O + * latencies might be higher than on bare metal. Reset the timer + * unconditionally to give the host a chance to perform EH. + */ +static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd) +{ + return BLK_EH_RESET_TIMER; +} + static struct scsi_host_template virtscsi_host_template_single = { .module = THIS_MODULE, .name = "Virtio SCSI HBA", @@ -805,6 +815,7 @@ static struct scsi_host_template virtscsi_host_template_single = { .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, + .eh_timed_out = virtscsi_eh_timed_out, .slave_alloc = virtscsi_device_alloc, .can_queue = 1024, @@ -825,6 +836,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, + .eh_timed_out = virtscsi_eh_timed_out, .can_queue = 1024, .dma_boundary = UINT_MAX, -- cgit From 00cefeb964f525672b5344446e5bb72188b03e38 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 21 Jun 2017 10:37:58 -0700 Subject: scsi: lpfc: Fix nvme io stoppage after link bounce On link down, transport is calling driver to abort outstanding ios. Driver erroneously rejects the abort if the port indicates it isn't logged in - which will be the case after the link down. Thus, the io can't clean up. This prevents reconnection at the transport level. Fix by allowing abort to proceed. Signed-off-by: James Smart Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_nvme.c | 35 +---------------------------------- 1 file changed, 1 insertion(+), 34 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 8206aa5493e3..0a0a1b92d01d 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1427,7 +1427,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, struct lpfc_nvme_lport *lport; struct lpfc_vport *vport; struct lpfc_hba *phba; - struct lpfc_nodelist *ndlp; struct lpfc_nvme_rport *rport; struct lpfc_nvme_buf *lpfc_nbuf; struct lpfc_iocbq *abts_buf; @@ -1449,38 +1448,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, pnvme_rport->port_id, pnvme_fcreq); - /* - * Catch race where our node has transitioned, but the - * transport is still transitioning. - */ - ndlp = rport->ndlp; - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS, - "6054 rport %p, ndlp %p, DID x%06x ndlp " - " not ready.\n", - rport, ndlp, pnvme_rport->port_id); - - ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); - if (!ndlp) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, - "6055 Could not find node for " - "DID %x\n", - pnvme_rport->port_id); - return; - } - } - - /* The remote node has to be ready to send an abort. */ - if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && - (ndlp->nlp_type & NLP_NVME_TARGET)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, - "6048 rport %p, DID x%06x not ready for " - "IO. State x%x, Type x%x\n", - rport, pnvme_rport->port_id, - ndlp->nlp_state, ndlp->nlp_type); - return; - } - /* If the hba is getting reset, this flag is set. It is * cleared when the reset is complete and rings reestablished. */ @@ -1541,7 +1508,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n", nvmereq_wqe->sli4_xritag, - nvmereq_wqe->hba_wqidx, ndlp->nlp_DID); + nvmereq_wqe->hba_wqidx, pnvme_rport->port_id); /* Outstanding abort is in progress */ if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { -- cgit From cb45e5295a21eba23edc64839cd60122805f37f3 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 21 Jun 2017 10:51:23 -0700 Subject: scsi: lpfc: fix refcount error on node list A change in remote port removal introduced a spurious put which can cause a premature structure teardown. The affects were most notable when the driver attempted to unload as a null pointer would be hit. Fix by removing the unnecessary put. Signed-off-by: James Smart Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_init.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 7e73fdc154f7..491aa95eb0f6 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2805,13 +2805,6 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); - if (ndlp->nlp_fc4_type & NLP_FC4_NVME) { - /* Remove the NVME transport reference now and - * continue to remove the node. - */ - lpfc_nlp_put(ndlp); - } - lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } -- cgit From 134699d2a896c68781da7416904787011988738b Mon Sep 17 00:00:00 2001 From: Tyrel Datwyler Date: Wed, 21 Jun 2017 16:24:39 -0400 Subject: scsi: fix typos and grammar in comments of scsi_transport_fc.c Signed-off-by: Tyrel Datwyler Reviewed-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_transport_fc.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index d4cf32d55546..6dd0922a499d 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -1692,7 +1692,7 @@ fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20); * Host Statistics Management */ -/* Show a given an attribute in the statistics group */ +/* Show a given attribute in the statistics group */ static ssize_t fc_stat_show(const struct device *dev, char *buf, unsigned long offset) { @@ -2923,7 +2923,7 @@ EXPORT_SYMBOL(fc_remote_port_add); * attached to it. However, we want to semi-persist the target id assigned * to that port if it eventually does exist. The port structure will * remain (although with minimal information) so that the target id - * bindings remails. + * bindings also remain. * * If the remote port is not an FCP Target, it will be fully torn down * and deallocated, including the fc_remote_port class device. @@ -2937,7 +2937,7 @@ EXPORT_SYMBOL(fc_remote_port_add); * If the remote port does not return (signaled by a LLDD call to * fc_remote_port_add()) within the dev_loss_tmo timeout, then the * scsi target is removed - killing all outstanding i/o and removing the - * scsi devices attached ot it. The port structure will be marked Not + * scsi devices attached to it. The port structure will be marked Not * Present and be partially cleared, leaving only enough information to * recognize the remote port relative to the scsi target id binding if * it later appears. The port will remain as long as there is a valid @@ -3056,7 +3056,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) * There may have been a delete timer running on the * port. Ensure that it is cancelled as we now know * the port is an FCP Target. - * Note: we know the rport is exists and in an online + * Note: we know the rport exists and is in an online * state as the LLDD would not have had an rport * reference to pass us. * @@ -3317,7 +3317,7 @@ EXPORT_SYMBOL(fc_block_scsi_eh); * @ret_vport: The pointer to the created vport. * * Allocates and creates the vport structure, calls the parent host - * to instantiate the vport, the completes w/ class and sysfs creation. + * to instantiate the vport, this completes w/ class and sysfs creation. * * Notes: * This routine assumes no locks are held on entry. @@ -3397,7 +3397,7 @@ fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev, /* * if the parent isn't the physical adapter's Scsi_Host, ensure - * the Scsi_Host at least contains ia symlink to the vport. + * the Scsi_Host at least contains a symlink to the vport. */ if (pdev != &shost->shost_gendev) { error = sysfs_create_link(&shost->shost_gendev.kobj, -- cgit From 6c9e27703594131d0f170f27ddb1c12a4eb599c6 Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Thu, 22 Jun 2017 15:47:49 +0530 Subject: scsi: cxgb4i: assign rxqs in round robin mode Assign rxq to TCP connections in round robin mode to use all available rxqs. Signed-off-by: Varun Prakash Signed-off-by: Martin K. Petersen --- drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 5 ++++- drivers/scsi/cxgbi/libcxgbi.h | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 0aae094ab91c..fa994713e718 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1606,6 +1606,7 @@ static int init_act_open(struct cxgbi_sock *csk) struct neighbour *n = NULL; void *daddr; unsigned int step; + unsigned int rxq_idx; unsigned int size, size6; unsigned int linkspeed; unsigned int rcv_winf, snd_winf; @@ -1684,7 +1685,9 @@ static int init_act_open(struct cxgbi_sock *csk) step = lldi->ntxq / lldi->nchan; csk->txq_idx = cxgb4_port_idx(ndev) * step; step = lldi->nrxq / lldi->nchan; - csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; + rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step); + cdev->rxq_idx_cntr++; + csk->rss_qid = lldi->rxq_ids[rxq_idx]; linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed; csk->snd_win = cxgb4i_snd_win; csk->rcv_win = cxgb4i_rcv_win; diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 37f07aaab1e4..31a5816c2e8d 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -477,6 +477,7 @@ struct cxgbi_device { unsigned int skb_rx_extra; /* for msg coalesced mode */ unsigned int tx_max_size; unsigned int rx_max_size; + unsigned int rxq_idx_cntr; struct cxgbi_ports_map pmap; void (*dev_ddp_cleanup)(struct cxgbi_device *); -- cgit From 06d9eb4e71c316f231dc079e66b58592d733c737 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 22 Jun 2017 16:52:19 +0100 Subject: scsi: hptiop: make function hptiop_iop_request_callback_itl static The function hptiop_iop_request_callback_itl does not need to be in global scope, so make it static. Cleans up sparse warning: "symbol 'hptiop_iop_request_callback_itl' was not declared. Should it be static?" Signed-off-by: Colin Ian King Signed-off-by: Martin K. Petersen --- drivers/scsi/hptiop.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index db17ad15b0c1..7226226f7383 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -800,7 +800,7 @@ static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag) hptiop_finish_scsi_req(hba, tag, req); } -void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag) +static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag) { struct hpt_iop_request_header __iomem *req; struct hpt_iop_request_ioctl_command __iomem *p; -- cgit From 1d32a62c74b3bcb69822b0f4745af5410cfec3a7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 23 Jun 2017 10:02:00 +0300 Subject: scsi: bnx2i: missing error code in bnx2i_ep_connect() If bnx2i_map_ep_dbell_regs() then we accidentally return NULL instead of an error pointer. It results in a NULL dereference in iscsi_if_ep_connect(). Fixes: cf4e6363859d ("[SCSI] bnx2i: Add bnx2i iSCSI driver.") Signed-off-by: Dan Carpenter Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/bnx2i/bnx2i_iscsi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index f32a66f89d25..03c104b47f31 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1909,7 +1909,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, bnx2i_ep_active_list_add(hba, bnx2i_ep); - if (bnx2i_map_ep_dbell_regs(bnx2i_ep)) + rc = bnx2i_map_ep_dbell_regs(bnx2i_ep); + if (rc) goto del_active_ep; mutex_unlock(&hba->net_dev_lock); -- cgit From 342ffc26693b528648bdc9377e51e4f2450b4860 Mon Sep 17 00:00:00 2001 From: Seth Forshee Date: Fri, 23 Jun 2017 09:04:22 -0500 Subject: scsi: aacraid: Don't copy uninitialized stack memory to userspace Both aac_send_raw_srb() and aac_get_hba_info() may copy stack allocated structs to userspace without initializing all members of these structs. Clear out this memory to prevent information leaks. Fixes: 423400e64d377 ("scsi: aacraid: Include HBA direct interface") Fixes: c799d519bf088 ("scsi: aacraid: Retrieve HBA host information ioctl") Signed-off-by: Seth Forshee Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/commctrl.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 6bb6ed48e31d..9ab0fa959d83 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -949,6 +949,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err; struct aac_srb_reply reply; + memset(&reply, 0, sizeof(reply)); reply.status = ST_OK; if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) { /* fast response */ @@ -1020,6 +1021,7 @@ static int aac_get_hba_info(struct aac_dev *dev, void __user *arg) { struct aac_hba_info hbainfo; + memset(&hbainfo, 0, sizeof(hbainfo)); hbainfo.adapter_number = (u8) dev->id; hbainfo.system_io_bus_number = dev->pdev->bus->number; hbainfo.device_number = (dev->pdev->devfn >> 3); -- cgit From 6e9017feaaaaaaa02069cfb267b13f625f9cf6d7 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 23 Jun 2017 10:32:39 -0700 Subject: scsi: Remove the definition of VLC_SA_RECEIVE_CREDENTIAL The symbolic name VLC_SA_RECEIVE_CREDENTIAL is not used anywhere in the kernel. Additionally, since SPC 5 the RECEIVE CREDENTIAL command is obsolete. The VLC_SA_RECEIVE_CREDENTIAL definition is misleading since it occurs outside the list of other variable length CDB service action codes (READ_32, WRITE_32, ...). Hence remove this definition. References: commit e9ccc998b70f ("[SCSI] Add missing SPC-4 CDB and MAINTENANCE_[IN,OUT] service action definitions") Signed-off-by: Bart Van Assche Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Johannes Thumshirn Reviewed-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- include/scsi/scsi_proto.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h index ce78ec8e367d..78215fd6cdfd 100644 --- a/include/scsi/scsi_proto.h +++ b/include/scsi/scsi_proto.h @@ -125,9 +125,6 @@ #define SAI_READ_CAPACITY_16 0x10 #define SAI_GET_LBA_STATUS 0x12 #define SAI_REPORT_REFERRALS 0x13 -/* values for VARIABLE_LENGTH_CMD service action codes - * see spc4r17 Section D.3.5, table D.7 and D.8 */ -#define VLC_SA_RECEIVE_CREDENTIAL 0x1800 /* values for maintenance in */ #define MI_REPORT_IDENTIFYING_INFORMATION 0x05 #define MI_REPORT_TARGET_PGS 0x0a -- cgit From d0ef10c9a42ee2008a5106d04ed34a53b9c6e6c6 Mon Sep 17 00:00:00 2001 From: John Garry Date: Mon, 26 Jun 2017 18:27:28 +0800 Subject: scsi: hisi_sas: redefine hisi_sas_phy.phy_type as u32 Element phy_type is a bitmask and it only ever has 2 bits possibly set, and it is overkill to define as a u64, so redefine as a u32. This change resolves static code check complaint that "phy->phy_type &= ~PORT_TYPE_SAS;" would unintentionally clear the high 32 bits as well. Structure hisi_sas_phy is also reordered to ensure packing efficiency. Reported-by: Dan Carpenter Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 4fc23087a939..22dd48bc2fa0 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -85,11 +85,11 @@ struct hisi_sas_phy { struct work_struct phyup_ws; u64 port_id; /* from hw */ u64 dev_sas_addr; - u64 phy_type; u64 frame_rcvd_size; u8 frame_rcvd[32]; u8 phy_attached; u8 reserved[3]; + u32 phy_type; enum sas_linkrate minimum_linkrate; enum sas_linkrate maximum_linkrate; }; -- cgit From ba82d91b7567774242534460910530289192d212 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 26 Jun 2017 14:31:10 +0100 Subject: scsi: hpsa: fix spelling mistake: "encrypytion" -> "encryption" Trivial fix to spelling mistake in dev_info message Signed-off-by: Colin Ian King Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 9934947073e6..8914eab84337 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3127,7 +3127,7 @@ static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, le16_to_cpu(map_buff->layout_map_count)); dev_info(&h->pdev->dev, "flags = 0x%x\n", le16_to_cpu(map_buff->flags)); - dev_info(&h->pdev->dev, "encrypytion = %s\n", + dev_info(&h->pdev->dev, "encryption = %s\n", le16_to_cpu(map_buff->flags) & RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); dev_info(&h->pdev->dev, "dekindex = %u\n", -- cgit From e365cab14fc8c15ad754039cde2f4e574453a61b Mon Sep 17 00:00:00 2001 From: Christos Gkekas Date: Sat, 24 Jun 2017 17:24:45 +0100 Subject: scsi: qedi: Remove comparison of u16 idx with zero. Variable idx is defined as u16 thus statement (idx < 0) is always false and should be removed. Signed-off-by: Christos Gkekas Acked-by: Manish Rangankar Signed-off-by: Martin K. Petersen --- drivers/scsi/qedi/qedi_fw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 8bc7ee1a8ca8..d0dd92f6a365 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -333,7 +333,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi, /* Obtain buffer address from rqe_opaque */ idx = cqe->rqe_opaque.lo; - if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) { + if (idx > (QEDI_BDQ_NUM - 1)) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "wrong idx %d returned by FW, dropping the unsolicited pkt\n", idx); @@ -370,7 +370,7 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi, /* Obtain buffer address from rqe_opaque */ idx = cqe->rqe_opaque.lo; - if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) { + if (idx > (QEDI_BDQ_NUM - 1)) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "wrong idx %d returned by FW, dropping the unsolicited pkt\n", idx); -- cgit From 106e97b92e644b7f90ce84ba02998c86b4ec747f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 16 Jun 2017 09:17:12 +0200 Subject: scsi: 53c700: switch to dma_alloc_attrs Use dma_alloc_attrs directly instead of the dma_alloc_noncoherent wrapper. Signed-off-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- drivers/scsi/53c700.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 95e32a47face..4b3b08025ef6 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -296,8 +296,8 @@ NCR_700_detect(struct scsi_host_template *tpnt, if(tpnt->sdev_attrs == NULL) tpnt->sdev_attrs = NCR_700_dev_attrs; - memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE, - &pScript, GFP_KERNEL); + memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript, + GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); if(memory == NULL) { printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); return NULL; @@ -410,8 +410,8 @@ NCR_700_release(struct Scsi_Host *host) struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; - dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE, - hostdata->script, hostdata->pScript); + dma_free_attrs(hostdata->dev, TOTAL_MEM_SIZE, hostdata->script, + hostdata->pScript, DMA_ATTR_NON_CONSISTENT); return 1; } -- cgit From d31b77911505bbd0cd06403bd5eff26d691ad62d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 16 Jun 2017 09:17:11 +0200 Subject: scsi: sgiwd93: switch to dma_alloc_attrs Use dma_alloc_attrs directly instead of the dma_alloc_noncoherent wrapper. [mkp: fixed driver name] Signed-off-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- drivers/scsi/sgiwd93.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c index 71b4b91d2215..80cfa93e407c 100644 --- a/drivers/scsi/sgiwd93.c +++ b/drivers/scsi/sgiwd93.c @@ -249,8 +249,8 @@ static int sgiwd93_probe(struct platform_device *pdev) hdata = host_to_hostdata(host); hdata->dev = &pdev->dev; - hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE, - &hdata->dma, GFP_KERNEL); + hdata->cpu = dma_alloc_attrs(&pdev->dev, HPC_DMA_SIZE, &hdata->dma, + GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); if (!hdata->cpu) { printk(KERN_WARNING "sgiwd93: Could not allocate memory for " "host %d buffer.\n", unit); @@ -289,7 +289,8 @@ static int sgiwd93_probe(struct platform_device *pdev) out_irq: free_irq(irq, host); out_free: - dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma); + dma_free_attrs(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma, + DMA_ATTR_NON_CONSISTENT); out_put: scsi_host_put(host); out: @@ -305,7 +306,8 @@ static int sgiwd93_remove(struct platform_device *pdev) scsi_remove_host(host); free_irq(pd->irq, host); - dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma); + dma_free_attrs(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma, + DMA_ATTR_NON_CONSISTENT); scsi_host_put(host); return 0; } -- cgit From 66ea9bcc392017b6df465b6f5847f6eac966a801 Mon Sep 17 00:00:00 2001 From: Uma Krishnan Date: Wed, 21 Jun 2017 21:13:32 -0500 Subject: scsi: cxlflash: Combine the send queue locks Currently there are separate spin locks for the two supported I/O queueing models. This makes it difficult to serialize with paths outside the enqueue path. As a design simplification and to support serialization with enqueue operations, move to only a single lock that is used for enqueueing regardless of the queueing model. Signed-off-by: Uma Krishnan Acked-by: Matthew R. Ochs Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/common.h | 3 +-- drivers/scsi/cxlflash/main.c | 9 +++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index 256af819377d..6fc32cfc6026 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -193,7 +193,7 @@ struct hwq { u32 index; /* Index of this hwq */ atomic_t hsq_credits; - spinlock_t hsq_slock; + spinlock_t hsq_slock; /* Hardware send queue lock */ struct sisl_ioarcb *hsq_start; struct sisl_ioarcb *hsq_end; struct sisl_ioarcb *hsq_curr; @@ -204,7 +204,6 @@ struct hwq { bool toggle; s64 room; - spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */ struct irq_poll irqpoll; } __aligned(cache_line_size()); diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index a7d57c343492..64ea597ca98e 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -261,7 +261,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) * To avoid the performance penalty of MMIO, spread the update of * 'room' over multiple commands. */ - spin_lock_irqsave(&hwq->rrin_slock, lock_flags); + spin_lock_irqsave(&hwq->hsq_slock, lock_flags); if (--hwq->room < 0) { room = readq_be(&hwq->host_map->cmd_room); if (room <= 0) { @@ -277,7 +277,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin); out: - spin_unlock_irqrestore(&hwq->rrin_slock, lock_flags); + spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); return rc; @@ -1722,7 +1722,10 @@ static int start_afu(struct cxlflash_cfg *cfg) hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1]; hwq->hrrq_curr = hwq->hrrq_start; hwq->toggle = 1; + + /* Initialize spin locks */ spin_lock_init(&hwq->hrrq_slock); + spin_lock_init(&hwq->hsq_slock); /* Initialize SQ */ if (afu_is_sq_cmd_mode(afu)) { @@ -1731,7 +1734,6 @@ static int start_afu(struct cxlflash_cfg *cfg) hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1]; hwq->hsq_curr = hwq->hsq_start; - spin_lock_init(&hwq->hsq_slock); atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1); } @@ -1984,7 +1986,6 @@ static int init_afu(struct cxlflash_cfg *cfg) for (i = 0; i < afu->num_hwqs; i++) { hwq = get_hwq(afu, i); - spin_lock_init(&hwq->rrin_slock); hwq->room = readq_be(&hwq->host_map->cmd_room); } -- cgit From 539d890cecee6b5d7304914afc51b7f53150163d Mon Sep 17 00:00:00 2001 From: Uma Krishnan Date: Wed, 21 Jun 2017 21:13:48 -0500 Subject: scsi: cxlflash: Update cxlflash_afu_sync() to return errno The cxlflash_afu_sync() routine returns a negative one to indicate any kind of failure. This makes it impossible to establish why the error occurred. Update the return codes to clearly indicate the failure cause to the caller. Signed-off-by: Uma Krishnan Acked-by: Matthew R. Ochs Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/main.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 64ea597ca98e..815d04b3e7aa 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -2022,8 +2022,7 @@ err1: * going away). * * Return: - * 0 on success - * -1 on failure + * 0 on success, -errno on failure */ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, res_hndl_t res_hndl_u, u8 mode) @@ -2047,7 +2046,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); if (unlikely(!buf)) { dev_err(dev, "%s: no memory for command\n", __func__); - rc = -1; + rc = -ENOMEM; goto out; } @@ -2071,12 +2070,14 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); rc = afu->send_cmd(afu, cmd); - if (unlikely(rc)) + if (unlikely(rc)) { + rc = -ENOBUFS; goto out; + } rc = wait_resp(afu, cmd); if (unlikely(rc)) - rc = -1; + rc = -EIO; out: atomic_dec(&afu->cmds_active); mutex_unlock(&sync_active); -- cgit From a96851d3372bf8ee7023712163ad3da9a3e30a29 Mon Sep 17 00:00:00 2001 From: Uma Krishnan Date: Wed, 21 Jun 2017 21:14:02 -0500 Subject: scsi: cxlflash: Reset hardware queue context via specified register Per the SISLite specification, context_reset() writes 0x1 to the LSB of the reset register. When the AFU processes this reset request, it is expected to clear the bit after reset is complete. The current implementation simply checks that the entire value read back is not 1, instead of masking off the LSB and evaluating it for a change to 0. Should the AFU manipulate other bits during the reset (reading back a value of 0xF for example), successful completion will be prematurely indicated given the existing logic. Additionally, in the event that the context reset operation fails, there does not currently exist a way to provide feedback to the initiator of the reset. This poses a problem for the rare case that a context reset fails as the caller will proceed on the assumption that all is well. To remedy these issues, refactor the context reset routine to only mask off the LSB when evaluating for success and return status to the caller. Also update the context reset handler parameters to pass a hardware queue reference instead of a single command to better reflect that the entire queue associated with the context is impacted by the reset. Signed-off-by: Uma Krishnan Acked-by: Matthew R. Ochs Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/common.h | 2 +- drivers/scsi/cxlflash/main.c | 83 +++++++++++++++++++++++------------------- 2 files changed, 47 insertions(+), 38 deletions(-) diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index 6fc32cfc6026..75decf671743 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -211,7 +211,7 @@ struct hwq { struct afu { struct hwq hwqs[CXLFLASH_MAX_HWQS]; int (*send_cmd)(struct afu *, struct afu_cmd *); - void (*context_reset)(struct afu_cmd *); + int (*context_reset)(struct hwq *); /* AFU HW */ struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */ diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 815d04b3e7aa..b8dc379227f0 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -189,55 +189,59 @@ static void cmd_complete(struct afu_cmd *cmd) } /** - * context_reset() - reset command owner context via specified register - * @cmd: AFU command that timed out. + * context_reset() - reset context via specified register + * @hwq: Hardware queue owning the context to be reset. * @reset_reg: MMIO register to perform reset. + * + * Return: 0 on success, -errno on failure */ -static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg) +static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) { - int nretry = 0; - u64 rrin = 0x1; - struct afu *afu = cmd->parent; - struct cxlflash_cfg *cfg = afu->parent; + struct cxlflash_cfg *cfg = hwq->afu->parent; struct device *dev = &cfg->dev->dev; + int rc = -ETIMEDOUT; + int nretry = 0; + u64 val = 0x1; - dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd); + dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq); - writeq_be(rrin, reset_reg); + writeq_be(val, reset_reg); do { - rrin = readq_be(reset_reg); - if (rrin != 0x1) + val = readq_be(reset_reg); + if ((val & 0x1) == 0x0) { + rc = 0; break; + } + /* Double delay each time */ udelay(1 << nretry); } while (nretry++ < MC_ROOM_RETRY_CNT); - dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n", - __func__, rrin, nretry); + dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n", + __func__, rc, val, nretry); + return rc; } /** - * context_reset_ioarrin() - reset command owner context via IOARRIN register - * @cmd: AFU command that timed out. + * context_reset_ioarrin() - reset context via IOARRIN register + * @hwq: Hardware queue owning the context to be reset. + * + * Return: 0 on success, -errno on failure */ -static void context_reset_ioarrin(struct afu_cmd *cmd) +static int context_reset_ioarrin(struct hwq *hwq) { - struct afu *afu = cmd->parent; - struct hwq *hwq = get_hwq(afu, cmd->hwq_index); - - context_reset(cmd, &hwq->host_map->ioarrin); + return context_reset(hwq, &hwq->host_map->ioarrin); } /** - * context_reset_sq() - reset command owner context w/ SQ Context Reset register - * @cmd: AFU command that timed out. + * context_reset_sq() - reset context via SQ_CONTEXT_RESET register + * @hwq: Hardware queue owning the context to be reset. + * + * Return: 0 on success, -errno on failure */ -static void context_reset_sq(struct afu_cmd *cmd) +static int context_reset_sq(struct hwq *hwq) { - struct afu *afu = cmd->parent; - struct hwq *hwq = get_hwq(afu, cmd->hwq_index); - - context_reset(cmd, &hwq->host_map->sq_ctx_reset); + return context_reset(hwq, &hwq->host_map->sq_ctx_reset); } /** @@ -332,8 +336,7 @@ out: * @afu: AFU associated with the host. * @cmd: AFU command that was sent. * - * Return: - * 0 on success, -1 on timeout/error + * Return: 0 on success, -errno on failure */ static int wait_resp(struct afu *afu, struct afu_cmd *cmd) { @@ -343,15 +346,13 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd) ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); timeout = wait_for_completion_timeout(&cmd->cevent, timeout); - if (!timeout) { - afu->context_reset(cmd); - rc = -1; - } + if (!timeout) + rc = -ETIMEDOUT; if (unlikely(cmd->sa.ioasc != 0)) { dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n", __func__, cmd->rcb.cdb[0], cmd->sa.ioasc); - rc = -1; + rc = -EIO; } return rc; @@ -2033,6 +2034,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); char *buf = NULL; int rc = 0; + int nretry = 0; static DEFINE_MUTEX(sync_active); if (cfg->state != STATE_NORMAL) { @@ -2051,11 +2053,14 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, } cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); + +retry: init_completion(&cmd->cevent); cmd->parent = afu; cmd->hwq_index = hwq->index; - dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); + dev_dbg(dev, "%s: afu=%p cmd=%p ctx=%d nretry=%d\n", + __func__, afu, cmd, ctx_hndl_u, nretry); cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; cmd->rcb.ctx_id = hwq->ctx_hndl; @@ -2076,8 +2081,12 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, } rc = wait_resp(afu, cmd); - if (unlikely(rc)) - rc = -EIO; + if (rc == -ETIMEDOUT) { + rc = afu->context_reset(hwq); + if (!rc && ++nretry < 2) + goto retry; + } + out: atomic_dec(&afu->cmds_active); mutex_unlock(&sync_active); -- cgit From 0b09e711189952ff9d411593a8d74ec12a956c57 Mon Sep 17 00:00:00 2001 From: Uma Krishnan Date: Wed, 21 Jun 2017 21:14:17 -0500 Subject: scsi: cxlflash: Schedule asynchronous reset of the host A context reset failure indicates the AFU is in a bad state. At present, when such a situation occurs, no further action is taken. This leaves the adapter in an unusable state with no recoverable actions. To avoid this situation, context reset failures will be escalated to a host reset operation. This will be done asynchronously to allow the acting thread to return to the user with a failure. Signed-off-by: Uma Krishnan Acked-by: Matthew R. Ochs Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/common.h | 2 + drivers/scsi/cxlflash/main.c | 137 ++++++++++++++++++++++++++++++----------- 2 files changed, 104 insertions(+), 35 deletions(-) diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index 75decf671743..e9b61087b72b 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -15,6 +15,7 @@ #ifndef _CXLFLASH_COMMON_H #define _CXLFLASH_COMMON_H +#include #include #include #include @@ -144,6 +145,7 @@ struct cxlflash_cfg { bool tmf_active; wait_queue_head_t reset_waitq; enum cxlflash_state state; + async_cookie_t async_reset_cookie; }; struct afu_cmd { diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index b8dc379227f0..20c2c5e111b4 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -585,6 +585,20 @@ static void free_mem(struct cxlflash_cfg *cfg) } } +/** + * cxlflash_reset_sync() - synchronizing point for asynchronous resets + * @cfg: Internal structure associated with the host. + */ +static void cxlflash_reset_sync(struct cxlflash_cfg *cfg) +{ + if (cfg->async_reset_cookie == 0) + return; + + /* Wait until all async calls prior to this cookie have completed */ + async_synchronize_cookie(cfg->async_reset_cookie + 1); + cfg->async_reset_cookie = 0; +} + /** * stop_afu() - stops the AFU command timers and unmaps the MMIO space * @cfg: Internal structure associated with the host. @@ -601,6 +615,8 @@ static void stop_afu(struct cxlflash_cfg *cfg) int i; cancel_work_sync(&cfg->work_q); + if (!current_is_async()) + cxlflash_reset_sync(cfg); if (likely(afu)) { while (atomic_read(&afu->cmds_active)) @@ -2004,6 +2020,91 @@ err1: goto out; } +/** + * afu_reset() - resets the AFU + * @cfg: Internal structure associated with the host. + * + * Return: 0 on success, -errno on failure + */ +static int afu_reset(struct cxlflash_cfg *cfg) +{ + struct device *dev = &cfg->dev->dev; + int rc = 0; + + /* Stop the context before the reset. Since the context is + * no longer available restart it after the reset is complete + */ + term_afu(cfg); + + rc = init_afu(cfg); + + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * drain_ioctls() - wait until all currently executing ioctls have completed + * @cfg: Internal structure associated with the host. + * + * Obtain write access to read/write semaphore that wraps ioctl + * handling to 'drain' ioctls currently executing. + */ +static void drain_ioctls(struct cxlflash_cfg *cfg) +{ + down_write(&cfg->ioctl_rwsem); + up_write(&cfg->ioctl_rwsem); +} + +/** + * cxlflash_async_reset_host() - asynchronous host reset handler + * @data: Private data provided while scheduling reset. + * @cookie: Cookie that can be used for checkpointing. + */ +static void cxlflash_async_reset_host(void *data, async_cookie_t cookie) +{ + struct cxlflash_cfg *cfg = data; + struct device *dev = &cfg->dev->dev; + int rc = 0; + + if (cfg->state != STATE_RESET) { + dev_dbg(dev, "%s: Not performing a reset, state=%d\n", + __func__, cfg->state); + goto out; + } + + drain_ioctls(cfg); + cxlflash_mark_contexts_error(cfg); + rc = afu_reset(cfg); + if (rc) + cfg->state = STATE_FAILTERM; + else + cfg->state = STATE_NORMAL; + wake_up_all(&cfg->reset_waitq); + +out: + scsi_unblock_requests(cfg->host); +} + +/** + * cxlflash_schedule_async_reset() - schedule an asynchronous host reset + * @cfg: Internal structure associated with the host. + */ +static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg) +{ + struct device *dev = &cfg->dev->dev; + + if (cfg->state != STATE_NORMAL) { + dev_dbg(dev, "%s: Not performing reset state=%d\n", + __func__, cfg->state); + return; + } + + cfg->state = STATE_RESET; + scsi_block_requests(cfg->host); + cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host, + cfg); +} + /** * cxlflash_afu_sync() - builds and sends an AFU sync command * @afu: AFU associated with the host. @@ -2085,6 +2186,7 @@ retry: rc = afu->context_reset(hwq); if (!rc && ++nretry < 2) goto retry; + cxlflash_schedule_async_reset(cfg); } out: @@ -2095,41 +2197,6 @@ out: return rc; } -/** - * afu_reset() - resets the AFU - * @cfg: Internal structure associated with the host. - * - * Return: 0 on success, -errno on failure - */ -static int afu_reset(struct cxlflash_cfg *cfg) -{ - struct device *dev = &cfg->dev->dev; - int rc = 0; - - /* Stop the context before the reset. Since the context is - * no longer available restart it after the reset is complete - */ - term_afu(cfg); - - rc = init_afu(cfg); - - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * drain_ioctls() - wait until all currently executing ioctls have completed - * @cfg: Internal structure associated with the host. - * - * Obtain write access to read/write semaphore that wraps ioctl - * handling to 'drain' ioctls currently executing. - */ -static void drain_ioctls(struct cxlflash_cfg *cfg) -{ - down_write(&cfg->ioctl_rwsem); - up_write(&cfg->ioctl_rwsem); -} - /** * cxlflash_eh_device_reset_handler() - reset a single LUN * @scp: SCSI command to send. -- cgit From c2c292f45029a6850cd14c7c2fa4fc479b8f74aa Mon Sep 17 00:00:00 2001 From: Uma Krishnan Date: Wed, 21 Jun 2017 21:14:30 -0500 Subject: scsi: cxlflash: Handle AFU sync failures AFU sync operations are not currently evaluated for failure. This is acceptable for paths where there is not a dependency on the AFU being consistent with the host. Examples include link reset events and LUN cleanup operations. On paths where there is a dependency, such as a LUN open, a sync failure should be acted upon. In the event of AFU sync failures, either log or cleanup as appropriate for operations that are dependent on a successful sync completion. Update documentation to reflect behavior in the event of an AFU sync failure. Signed-off-by: Uma Krishnan Acked-by: Matthew R. Ochs Signed-off-by: Martin K. Petersen --- Documentation/powerpc/cxlflash.txt | 12 ++++++ drivers/scsi/cxlflash/superpipe.c | 34 +++++++++++++-- drivers/scsi/cxlflash/vlun.c | 88 +++++++++++++++++++++++++++----------- 3 files changed, 107 insertions(+), 27 deletions(-) diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.txt index 66b4496d6619..f9036cb0170d 100644 --- a/Documentation/powerpc/cxlflash.txt +++ b/Documentation/powerpc/cxlflash.txt @@ -257,6 +257,12 @@ DK_CXLFLASH_VLUN_RESIZE operating in the virtual mode and used to program a LUN translation table that the AFU references when provided with a resource handle. + This ioctl can return -EAGAIN if an AFU sync operation takes too long. + In addition to returning a failure to user, cxlflash will also schedule + an asynchronous AFU reset. Should the user choose to retry the operation, + it is expected to succeed. If this ioctl fails with -EAGAIN, the user + can either retry the operation or treat it as a failure. + DK_CXLFLASH_RELEASE ------------------- This ioctl is responsible for releasing a previously obtained @@ -309,6 +315,12 @@ DK_CXLFLASH_VLUN_CLONE clone. This is to avoid a stale entry in the file descriptor table of the child process. + This ioctl can return -EAGAIN if an AFU sync operation takes too long. + In addition to returning a failure to user, cxlflash will also schedule + an asynchronous AFU reset. Should the user choose to retry the operation, + it is expected to succeed. If this ioctl fails with -EAGAIN, the user + can either retry the operation or treat it as a failure. + DK_CXLFLASH_VERIFY ------------------ This ioctl is used to detect various changes such as the capacity of diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index fe9f17a6268b..ad0f9968ccfb 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -56,6 +56,19 @@ static void marshal_det_to_rele(struct dk_cxlflash_detach *detach, release->context_id = detach->context_id; } +/** + * marshal_udir_to_rele() - translate udirect to release structure + * @udirect: Source structure from which to translate/copy. + * @release: Destination structure for the translate/copy. + */ +static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect, + struct dk_cxlflash_release *release) +{ + release->hdr = udirect->hdr; + release->context_id = udirect->context_id; + release->rsrc_handle = udirect->rsrc_handle; +} + /** * cxlflash_free_errpage() - frees resources associated with global error page */ @@ -622,6 +635,7 @@ int _cxlflash_disk_release(struct scsi_device *sdev, res_hndl_t rhndl = release->rsrc_handle; int rc = 0; + int rcr = 0; u64 ctxid = DECODE_CTXID(release->context_id), rctxid = release->context_id; @@ -686,8 +700,12 @@ int _cxlflash_disk_release(struct scsi_device *sdev, rhte_f1->dw = 0; dma_wmb(); /* Make RHT entry bottom-half clearing visible */ - if (!ctxi->err_recovery_active) - cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); + if (!ctxi->err_recovery_active) { + rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); + if (unlikely(rcr)) + dev_dbg(dev, "%s: AFU sync failed rc=%d\n", + __func__, rcr); + } break; default: WARN(1, "Unsupported LUN mode!"); @@ -1929,6 +1947,7 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) struct afu *afu = cfg->afu; struct llun_info *lli = sdev->hostdata; struct glun_info *gli = lli->parent; + struct dk_cxlflash_release rel = { { 0 }, 0 }; struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg; @@ -1970,13 +1989,18 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) rsrc_handle = (rhte - ctxi->rht_start); rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port); - cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC); last_lba = gli->max_lba; pphys->hdr.return_flags = 0; pphys->last_lba = last_lba; pphys->rsrc_handle = rsrc_handle; + rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC); + if (unlikely(rc)) { + dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc); + goto err2; + } + out: if (likely(ctxi)) put_context(ctxi); @@ -1984,6 +2008,10 @@ out: __func__, rsrc_handle, rc, last_lba); return rc; +err2: + marshal_udir_to_rele(pphys, &rel); + _cxlflash_disk_release(sdev, ctxi, &rel); + goto out; err1: cxlflash_lun_detach(gli); goto out; diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c index 90b5c19f81f0..0800bcba5a00 100644 --- a/drivers/scsi/cxlflash/vlun.c +++ b/drivers/scsi/cxlflash/vlun.c @@ -594,7 +594,9 @@ static int grow_lxt(struct afu *afu, rhte->lxt_cnt = my_new_size; dma_wmb(); /* Make RHT entry's LXT table size update visible */ - cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC); + rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC); + if (unlikely(rc)) + rc = -EAGAIN; /* free old lxt if reallocated */ if (lxt != lxt_old) @@ -673,8 +675,11 @@ static int shrink_lxt(struct afu *afu, rhte->lxt_start = lxt; dma_wmb(); /* Make RHT entry's LXT table update visible */ - if (needs_sync) - cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); + if (needs_sync) { + rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); + if (unlikely(rc)) + rc = -EAGAIN; + } if (needs_ws) { /* @@ -792,6 +797,21 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev, rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size); else if (new_size < rhte->lxt_cnt) rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size); + else { + /* + * Rare case where there is already sufficient space, just + * need to perform a translation sync with the AFU. This + * scenario likely follows a previous sync failure during + * a resize operation. Accordingly, perform the heavyweight + * form of translation sync as it is unknown which type of + * resize failed previously. + */ + rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); + if (unlikely(rc)) { + rc = -EAGAIN; + goto out; + } + } resize->hdr.return_flags = 0; resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len); @@ -1084,10 +1104,13 @@ static int clone_lxt(struct afu *afu, { struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; - struct sisl_lxt_entry *lxt; + struct sisl_lxt_entry *lxt = NULL; + bool locked = false; u32 ngrps; u64 aun; /* chunk# allocated by block allocator */ - int i, j; + int j; + int i = 0; + int rc = 0; ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt); @@ -1095,33 +1118,29 @@ static int clone_lxt(struct afu *afu, /* allocate new LXTs for clone */ lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps), GFP_KERNEL); - if (unlikely(!lxt)) - return -ENOMEM; + if (unlikely(!lxt)) { + rc = -ENOMEM; + goto out; + } /* copy over */ memcpy(lxt, rhte_src->lxt_start, (sizeof(*lxt) * rhte_src->lxt_cnt)); - /* clone the LBAs in block allocator via ref_cnt */ + /* clone the LBAs in block allocator via ref_cnt, note that the + * block allocator mutex must be held until it is established + * that this routine will complete without the need for a + * cleanup. + */ mutex_lock(&blka->mutex); + locked = true; for (i = 0; i < rhte_src->lxt_cnt; i++) { aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT); if (ba_clone(&blka->ba_lun, aun) == -1ULL) { - /* free the clones already made */ - for (j = 0; j < i; j++) { - aun = (lxt[j].rlba_base >> - MC_CHUNK_SHIFT); - ba_free(&blka->ba_lun, aun); - } - - mutex_unlock(&blka->mutex); - kfree(lxt); - return -EIO; + rc = -EIO; + goto err; } } - mutex_unlock(&blka->mutex); - } else { - lxt = NULL; } /* @@ -1136,10 +1155,31 @@ static int clone_lxt(struct afu *afu, rhte->lxt_cnt = rhte_src->lxt_cnt; dma_wmb(); /* Make RHT entry's LXT table size update visible */ - cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC); + rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC); + if (unlikely(rc)) { + rc = -EAGAIN; + goto err2; + } - dev_dbg(dev, "%s: returning\n", __func__); - return 0; +out: + if (locked) + mutex_unlock(&blka->mutex); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +err2: + /* Reset the RHTE */ + rhte->lxt_cnt = 0; + dma_wmb(); + rhte->lxt_start = NULL; + dma_wmb(); +err: + /* free the clones already made */ + for (j = 0; j < i; j++) { + aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT); + ba_free(&blka->ba_lun, aun); + } + kfree(lxt); + goto out; } /** -- cgit From a002bf830f5df3e622e32fdbde1756bcbb6aedad Mon Sep 17 00:00:00 2001 From: Uma Krishnan Date: Wed, 21 Jun 2017 21:14:43 -0500 Subject: scsi: cxlflash: Track pending scsi commands in each hardware queue Currently, there is no book keeping of the pending scsi commands in the cxlflash driver. This lack of tracking in-flight requests is too restrictive and requires a heavy-hammer reset each time an adapter error is encountered. Additionally, it does not allow for commands to be properly retried. In order to avoid this problem and to better handle error path command cleanup, introduce a linked list for each hardware queue that tracks pending commands. Signed-off-by: Uma Krishnan Acked-by: Matthew R. Ochs Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/common.h | 2 ++ drivers/scsi/cxlflash/main.c | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index e9b61087b72b..3eaa3be43d24 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -158,6 +158,7 @@ struct afu_cmd { u32 hwq_index; u8 cmd_tmf:1; + struct list_head list; /* Pending commands link */ /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned. * However for performance reasons the IOARCB/IOASA should be @@ -193,6 +194,7 @@ struct hwq { struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */ ctx_hndl_t ctx_hndl; /* master's context handle */ u32 index; /* Index of this hwq */ + struct list_head pending_cmds; /* Commands pending completion */ atomic_t hsq_credits; spinlock_t hsq_slock; /* Hardware send queue lock */ diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 20c2c5e111b4..1446fabe4cf6 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -162,8 +162,13 @@ static void cmd_complete(struct afu_cmd *cmd) struct afu *afu = cmd->parent; struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; + struct hwq *hwq = get_hwq(afu, cmd->hwq_index); bool cmd_is_tmf; + spin_lock_irqsave(&hwq->hsq_slock, lock_flags); + list_del(&cmd->list); + spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); + if (cmd->scp) { scp = cmd->scp; if (unlikely(cmd->sa.ioasc)) @@ -279,6 +284,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) hwq->room = room - 1; } + list_add(&cmd->list, &hwq->pending_cmds); writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin); out: spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); @@ -319,6 +325,8 @@ static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) hwq->hsq_curr++; else hwq->hsq_curr = hwq->hsq_start; + + list_add(&cmd->list, &hwq->pending_cmds); writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail); spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); @@ -1840,6 +1848,7 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index) hwq->afu = cfg->afu; hwq->index = index; + INIT_LIST_HEAD(&hwq->pending_cmds); if (index == PRIMARY_HWQ) ctx = cxl_get_context(cfg->dev); -- cgit From a1ea04b3ebd9ae5c1cd5bf48be37aba0d93c1acc Mon Sep 17 00:00:00 2001 From: Uma Krishnan Date: Wed, 21 Jun 2017 21:14:56 -0500 Subject: scsi: cxlflash: Flush pending commands in cleanup path When the AFU is reset in an error path, pending scsi commands can be silently dropped without completion or a formal abort. This puts the onus on the cxlflash driver to notify mid-layer and indicating that the command can be retried. Once the card has been quiesced, the hardware send queue lock is acquired to prevent any data movement while the pending commands are processed. Signed-off-by: Uma Krishnan Acked-by: Matthew R. Ochs Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/common.h | 5 +++- drivers/scsi/cxlflash/main.c | 57 +++++++++++++++++++++++++++++++++++++++--- 2 files changed, 57 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index 3eaa3be43d24..11a5b0a10a34 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -157,7 +157,9 @@ struct afu_cmd { struct list_head queue; u32 hwq_index; - u8 cmd_tmf:1; + u8 cmd_tmf:1, + cmd_aborted:1; + struct list_head list; /* Pending commands link */ /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned. @@ -176,6 +178,7 @@ static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc) struct afu_cmd *afuc = sc_to_afuc(sc); memset(afuc, 0, sizeof(*afuc)); + INIT_LIST_HEAD(&afuc->queue); return afuc; } diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 1446fabe4cf6..0a3de42310ec 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -193,6 +193,36 @@ static void cmd_complete(struct afu_cmd *cmd) complete(&cmd->cevent); } +/** + * flush_pending_cmds() - flush all pending commands on this hardware queue + * @hwq: Hardware queue to flush. + * + * The hardware send queue lock associated with this hardware queue must be + * held when calling this routine. + */ +static void flush_pending_cmds(struct hwq *hwq) +{ + struct afu_cmd *cmd, *tmp; + struct scsi_cmnd *scp; + + list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) { + /* Bypass command when on a doneq, cmd_complete() will handle */ + if (!list_empty(&cmd->queue)) + continue; + + list_del(&cmd->list); + + if (cmd->scp) { + scp = cmd->scp; + scp->result = (DID_IMM_RETRY << 16); + scp->scsi_done(scp); + } else { + cmd->cmd_aborted = true; + complete(&cmd->cevent); + } + } +} + /** * context_reset() - reset context via specified register * @hwq: Hardware queue owning the context to be reset. @@ -357,6 +387,9 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd) if (!timeout) rc = -ETIMEDOUT; + if (cmd->cmd_aborted) + rc = -EAGAIN; + if (unlikely(cmd->sa.ioasc != 0)) { dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n", __func__, cmd->rcb.cdb[0], cmd->sa.ioasc); @@ -702,6 +735,7 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index) struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; struct hwq *hwq; + ulong lock_flags; if (!afu) { dev_err(dev, "%s: returning with NULL afu\n", __func__); @@ -719,6 +753,10 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index) if (index != PRIMARY_HWQ) WARN_ON(cxl_release_context(hwq->ctx)); hwq->ctx = NULL; + + spin_lock_irqsave(&hwq->hsq_slock, lock_flags); + flush_pending_cmds(hwq); + spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); } /** @@ -2155,7 +2193,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, mutex_lock(&sync_active); atomic_inc(&afu->cmds_active); - buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); + buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); if (unlikely(!buf)) { dev_err(dev, "%s: no memory for command\n", __func__); rc = -ENOMEM; @@ -2165,6 +2203,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); retry: + memset(cmd, 0, sizeof(*cmd)); + INIT_LIST_HEAD(&cmd->queue); init_completion(&cmd->cevent); cmd->parent = afu; cmd->hwq_index = hwq->index; @@ -2191,11 +2231,20 @@ retry: } rc = wait_resp(afu, cmd); - if (rc == -ETIMEDOUT) { + switch (rc) { + case -ETIMEDOUT: rc = afu->context_reset(hwq); - if (!rc && ++nretry < 2) + if (rc) { + cxlflash_schedule_async_reset(cfg); + break; + } + /* fall through to retry */ + case -EAGAIN: + if (++nretry < 2) goto retry; - cxlflash_schedule_async_reset(cfg); + /* fall through to exit */ + default: + break; } out: -- cgit From 7c4c41f172b6d5dda1119ce5f59151bef732a058 Mon Sep 17 00:00:00 2001 From: Uma Krishnan Date: Wed, 21 Jun 2017 21:15:06 -0500 Subject: scsi: cxlflash: Add scsi command abort handler To date, CXL flash devices do not support a single command abort operation. Instead, the SISLite specification provides a context reset operation to cleanup all pending commands for a given context. When a context reset is successful, it is guaranteed that the AFU has aborted all currently pending I/O. This sequence is less invasive than a device or host reset and can be executed to support scsi command abort requests. Add eh_abort_handler callback support to process command timeouts and abort requests. Signed-off-by: Uma Krishnan Acked-by: Matthew R. Ochs Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/main.c | 61 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 0a3de42310ec..ceb247b5d1f2 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -228,6 +228,10 @@ static void flush_pending_cmds(struct hwq *hwq) * @hwq: Hardware queue owning the context to be reset. * @reset_reg: MMIO register to perform reset. * + * When the reset is successful, the SISLite specification guarantees that + * the AFU has aborted all currently pending I/O. Accordingly, these commands + * must be flushed. + * * Return: 0 on success, -errno on failure */ static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) @@ -237,9 +241,12 @@ static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) int rc = -ETIMEDOUT; int nretry = 0; u64 val = 0x1; + ulong lock_flags; dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq); + spin_lock_irqsave(&hwq->hsq_slock, lock_flags); + writeq_be(val, reset_reg); do { val = readq_be(reset_reg); @@ -252,6 +259,11 @@ static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) udelay(1 << nretry); } while (nretry++ < MC_ROOM_RETRY_CNT); + if (!rc) + flush_pending_cmds(hwq); + + spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); + dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n", __func__, rc, val, nretry); return rc; @@ -2255,6 +2267,54 @@ out: return rc; } +/** + * cxlflash_eh_abort_handler() - abort a SCSI command + * @scp: SCSI command to abort. + * + * CXL Flash devices do not support a single command abort. Reset the context + * as per SISLite specification. Flush any pending commands in the hardware + * queue before the reset. + * + * Return: SUCCESS/FAILED as defined in scsi/scsi.h + */ +static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp) +{ + int rc = FAILED; + struct Scsi_Host *host = scp->device->host; + struct cxlflash_cfg *cfg = shost_priv(host); + struct afu_cmd *cmd = sc_to_afuc(scp); + struct device *dev = &cfg->dev->dev; + struct afu *afu = cfg->afu; + struct hwq *hwq = get_hwq(afu, cmd->hwq_index); + + dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " + "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, + scp->device->channel, scp->device->id, scp->device->lun, + get_unaligned_be32(&((u32 *)scp->cmnd)[0]), + get_unaligned_be32(&((u32 *)scp->cmnd)[1]), + get_unaligned_be32(&((u32 *)scp->cmnd)[2]), + get_unaligned_be32(&((u32 *)scp->cmnd)[3])); + + /* When the state is not normal, another reset/reload is in progress. + * Return failed and the mid-layer will invoke host reset handler. + */ + if (cfg->state != STATE_NORMAL) { + dev_dbg(dev, "%s: Invalid state for abort, state=%d\n", + __func__, cfg->state); + goto out; + } + + rc = afu->context_reset(hwq); + if (unlikely(rc)) + goto out; + + rc = SUCCESS; + +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + /** * cxlflash_eh_device_reset_handler() - reset a single LUN * @scp: SCSI command to send. @@ -2969,6 +3029,7 @@ static struct scsi_host_template driver_template = { .ioctl = cxlflash_ioctl, .proc_name = CXLFLASH_NAME, .queuecommand = cxlflash_queuecommand, + .eh_abort_handler = cxlflash_eh_abort_handler, .eh_device_reset_handler = cxlflash_eh_device_reset_handler, .eh_host_reset_handler = cxlflash_eh_host_reset_handler, .change_queue_depth = cxlflash_change_queue_depth, -- cgit From a834a36b57d93b31f683a5d2cf7d87e3e617cb70 Mon Sep 17 00:00:00 2001 From: Uma Krishnan Date: Wed, 21 Jun 2017 21:15:18 -0500 Subject: scsi: cxlflash: Create character device to provide host management interface The cxlflash driver currently lacks host management interface. Future devices supported by cxlflash will provide a variety of host-wide management functions. Examples include LUN provisioning, hardware debug support, and firmware download. In order to provide a way to manage the device, a character device will be created during probe of each adapter. This device will support a set of ioctls defined in the SISLite specification from which administrators can manage the adapter. Signed-off-by: Uma Krishnan Acked-by: Matthew R. Ochs Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/common.h | 6 +- drivers/scsi/cxlflash/main.c | 207 ++++++++++++++++++++++++++++++++++++++++- drivers/scsi/cxlflash/main.h | 1 + 3 files changed, 212 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index 11a5b0a10a34..cbc0eb70ab47 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -16,6 +16,7 @@ #define _CXLFLASH_COMMON_H #include +#include #include #include #include @@ -86,7 +87,8 @@ enum cxlflash_init_state { INIT_STATE_NONE, INIT_STATE_PCI, INIT_STATE_AFU, - INIT_STATE_SCSI + INIT_STATE_SCSI, + INIT_STATE_CDEV }; enum cxlflash_state { @@ -116,6 +118,8 @@ struct cxlflash_cfg { struct pci_device_id *dev_id; struct Scsi_Host *host; int num_fc_ports; + struct cdev cdev; + struct device *chardev; ulong cxlflash_regs_pci; diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index ceb247b5d1f2..0656dd2d3547 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -34,6 +34,10 @@ MODULE_AUTHOR("Manoj N. Kumar "); MODULE_AUTHOR("Matthew R. Ochs "); MODULE_LICENSE("GPL"); +static struct class *cxlflash_class; +static u32 cxlflash_major; +static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS); + /** * process_cmd_err() - command error handler * @cmd: AFU command that experienced the error. @@ -862,6 +866,47 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) } } +/** + * cxlflash_get_minor() - gets the first available minor number + * + * Return: Unique minor number that can be used to create the character device. + */ +static int cxlflash_get_minor(void) +{ + int minor; + long bit; + + bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS); + if (bit >= CXLFLASH_MAX_ADAPTERS) + return -1; + + minor = bit & MINORMASK; + set_bit(minor, cxlflash_minor); + return minor; +} + +/** + * cxlflash_put_minor() - releases the minor number + * @minor: Minor number that is no longer needed. + */ +static void cxlflash_put_minor(int minor) +{ + clear_bit(minor, cxlflash_minor); +} + +/** + * cxlflash_release_chrdev() - release the character device for the host + * @cfg: Internal structure associated with the host. + */ +static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg) +{ + put_device(cfg->chardev); + device_unregister(cfg->chardev); + cfg->chardev = NULL; + cdev_del(&cfg->cdev); + cxlflash_put_minor(MINOR(cfg->cdev.dev)); +} + /** * cxlflash_remove() - PCI entry point to tear down host * @pdev: PCI device associated with the host. @@ -897,6 +942,8 @@ static void cxlflash_remove(struct pci_dev *pdev) cxlflash_stop_term_user_contexts(cfg); switch (cfg->init_state) { + case INIT_STATE_CDEV: + cxlflash_release_chrdev(cfg); case INIT_STATE_SCSI: cxlflash_term_local_luns(cfg); scsi_remove_host(cfg->host); @@ -3119,6 +3166,86 @@ static void cxlflash_worker_thread(struct work_struct *work) scsi_scan_host(cfg->host); } +/** + * cxlflash_chr_open() - character device open handler + * @inode: Device inode associated with this character device. + * @file: File pointer for this device. + * + * Only users with admin privileges are allowed to open the character device. + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_chr_open(struct inode *inode, struct file *file) +{ + struct cxlflash_cfg *cfg; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev); + file->private_data = cfg; + + return 0; +} + +/* + * Character device file operations + */ +static const struct file_operations cxlflash_chr_fops = { + .owner = THIS_MODULE, + .open = cxlflash_chr_open, +}; + +/** + * init_chrdev() - initialize the character device for the host + * @cfg: Internal structure associated with the host. + * + * Return: 0 on success, -errno on failure + */ +static int init_chrdev(struct cxlflash_cfg *cfg) +{ + struct device *dev = &cfg->dev->dev; + struct device *char_dev; + dev_t devno; + int minor; + int rc = 0; + + minor = cxlflash_get_minor(); + if (unlikely(minor < 0)) { + dev_err(dev, "%s: Exhausted allowed adapters\n", __func__); + rc = -ENOSPC; + goto out; + } + + devno = MKDEV(cxlflash_major, minor); + cdev_init(&cfg->cdev, &cxlflash_chr_fops); + + rc = cdev_add(&cfg->cdev, devno, 1); + if (rc) { + dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc); + goto err1; + } + + char_dev = device_create(cxlflash_class, NULL, devno, + NULL, "cxlflash%d", minor); + if (IS_ERR(char_dev)) { + rc = PTR_ERR(char_dev); + dev_err(dev, "%s: device_create failed rc=%d\n", + __func__, rc); + goto err2; + } + + cfg->chardev = char_dev; +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +err2: + cdev_del(&cfg->cdev); +err1: + cxlflash_put_minor(minor); + goto out; +} + /** * cxlflash_probe() - PCI entry point to add host * @pdev: PCI device associated with the host. @@ -3229,6 +3356,13 @@ static int cxlflash_probe(struct pci_dev *pdev, } cfg->init_state = INIT_STATE_SCSI; + rc = init_chrdev(cfg); + if (rc) { + dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc); + goto out_remove; + } + cfg->init_state = INIT_STATE_CDEV; + if (wq_has_sleeper(&cfg->reset_waitq)) { cfg->state = STATE_PROBED; wake_up_all(&cfg->reset_waitq); @@ -3331,6 +3465,63 @@ static void cxlflash_pci_resume(struct pci_dev *pdev) scsi_unblock_requests(cfg->host); } +/** + * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class + * @dev: Character device. + * @mode: Mode that can be used to verify access. + * + * Return: Allocated string describing the devtmpfs structure. + */ +static char *cxlflash_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev)); +} + +/** + * cxlflash_class_init() - create character device class + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_class_init(void) +{ + dev_t devno; + int rc = 0; + + rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash"); + if (unlikely(rc)) { + pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc); + goto out; + } + + cxlflash_major = MAJOR(devno); + + cxlflash_class = class_create(THIS_MODULE, "cxlflash"); + if (IS_ERR(cxlflash_class)) { + rc = PTR_ERR(cxlflash_class); + pr_err("%s: class_create failed rc=%d\n", __func__, rc); + goto err; + } + + cxlflash_class->devnode = cxlflash_devnode; +out: + pr_debug("%s: returning rc=%d\n", __func__, rc); + return rc; +err: + unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS); + goto out; +} + +/** + * cxlflash_class_exit() - destroy character device class + */ +static void cxlflash_class_exit(void) +{ + dev_t devno = MKDEV(cxlflash_major, 0); + + class_destroy(cxlflash_class); + unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS); +} + static const struct pci_error_handlers cxlflash_err_handler = { .error_detected = cxlflash_pci_error_detected, .slot_reset = cxlflash_pci_slot_reset, @@ -3356,10 +3547,23 @@ static struct pci_driver cxlflash_driver = { */ static int __init init_cxlflash(void) { + int rc; + check_sizes(); cxlflash_list_init(); + rc = cxlflash_class_init(); + if (unlikely(rc)) + goto out; - return pci_register_driver(&cxlflash_driver); + rc = pci_register_driver(&cxlflash_driver); + if (unlikely(rc)) + goto err; +out: + pr_debug("%s: returning rc=%d\n", __func__, rc); + return rc; +err: + cxlflash_class_exit(); + goto out; } /** @@ -3371,6 +3575,7 @@ static void __exit exit_cxlflash(void) cxlflash_free_errpage(); pci_unregister_driver(&cxlflash_driver); + cxlflash_class_exit(); } module_init(init_cxlflash); diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index 49657f1f409e..44c86c2d84f5 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h @@ -22,6 +22,7 @@ #define CXLFLASH_NAME "cxlflash" #define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter" +#define CXLFLASH_MAX_ADAPTERS 32 #define PCI_DEVICE_ID_IBM_CORSA 0x04F0 #define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600 -- cgit From cf2430279006e4afa67dfa4cf952ded38c7ed5b4 Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 21 Jun 2017 21:15:31 -0500 Subject: scsi: cxlflash: Separate AFU internal command handling from AFU sync specifics To date the only supported internal AFU command is AFU sync. The logic to send an internal AFU command is embedded in the specific AFU sync handler and would need to be duplicated for new internal AFU commands. In order to support new internal AFU commands, separate code that is common for AFU internal commands into a generic transmission routine and support passing back command status through an IOASA structure. The first user of this new routine is the existing AFU sync command. As a cleanup, use a descriptive name for the AFU sync command instead of a magic number. Signed-off-by: Matthew R. Ochs Signed-off-by: Uma Krishnan Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/main.c | 79 ++++++++++++++++++++++++++--------------- drivers/scsi/cxlflash/sislite.h | 2 ++ 2 files changed, 53 insertions(+), 28 deletions(-) diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 0656dd2d3547..7732dfc099c4 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -2212,28 +2212,22 @@ static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg) } /** - * cxlflash_afu_sync() - builds and sends an AFU sync command + * send_afu_cmd() - builds and sends an internal AFU command * @afu: AFU associated with the host. - * @ctx_hndl_u: Identifies context requesting sync. - * @res_hndl_u: Identifies resource requesting sync. - * @mode: Type of sync to issue (lightweight, heavyweight, global). + * @rcb: Pre-populated IOARCB describing command to send. * - * The AFU can only take 1 sync command at a time. This routine enforces this - * limitation by using a mutex to provide exclusive access to the AFU during - * the sync. This design point requires calling threads to not be on interrupt - * context due to the possibility of sleeping during concurrent sync operations. + * The AFU can only take one internal AFU command at a time. This limitation is + * enforced by using a mutex to provide exclusive access to the AFU during the + * operation. This design point requires calling threads to not be on interrupt + * context due to the possibility of sleeping during concurrent AFU operations. * - * AFU sync operations are only necessary and allowed when the device is - * operating normally. When not operating normally, sync requests can occur as - * part of cleaning up resources associated with an adapter prior to removal. - * In this scenario, these requests are simply ignored (safe due to the AFU - * going away). + * The command status is optionally passed back to the caller when the caller + * populates the IOASA field of the IOARCB with a pointer to an IOASA structure. * * Return: * 0 on success, -errno on failure */ -int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, - res_hndl_t res_hndl_u, u8 mode) +static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb) { struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; @@ -2263,25 +2257,15 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, retry: memset(cmd, 0, sizeof(*cmd)); + memcpy(&cmd->rcb, rcb, sizeof(*rcb)); INIT_LIST_HEAD(&cmd->queue); init_completion(&cmd->cevent); cmd->parent = afu; cmd->hwq_index = hwq->index; - - dev_dbg(dev, "%s: afu=%p cmd=%p ctx=%d nretry=%d\n", - __func__, afu, cmd, ctx_hndl_u, nretry); - - cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; cmd->rcb.ctx_id = hwq->ctx_hndl; - cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; - cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; - - cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ - cmd->rcb.cdb[1] = mode; - /* The cdb is aligned, no unaligned accessors required */ - *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u); - *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); + dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n", + __func__, afu, cmd, cmd->rcb.cdb[0], nretry); rc = afu->send_cmd(afu, cmd); if (unlikely(rc)) { @@ -2306,6 +2290,8 @@ retry: break; } + if (rcb->ioasa) + *rcb->ioasa = cmd->sa; out: atomic_dec(&afu->cmds_active); mutex_unlock(&sync_active); @@ -2314,6 +2300,43 @@ out: return rc; } +/** + * cxlflash_afu_sync() - builds and sends an AFU sync command + * @afu: AFU associated with the host. + * @ctx: Identifies context requesting sync. + * @res: Identifies resource requesting sync. + * @mode: Type of sync to issue (lightweight, heavyweight, global). + * + * AFU sync operations are only necessary and allowed when the device is + * operating normally. When not operating normally, sync requests can occur as + * part of cleaning up resources associated with an adapter prior to removal. + * In this scenario, these requests are simply ignored (safe due to the AFU + * going away). + * + * Return: + * 0 on success, -errno on failure + */ +int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode) +{ + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + struct sisl_ioarcb rcb = { 0 }; + + dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n", + __func__, afu, ctx, res, mode); + + rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; + rcb.msi = SISL_MSI_RRQ_UPDATED; + rcb.timeout = MC_AFU_SYNC_TIMEOUT; + + rcb.cdb[0] = SISL_AFU_CMD_SYNC; + rcb.cdb[1] = mode; + put_unaligned_be16(ctx, &rcb.cdb[2]); + put_unaligned_be32(res, &rcb.cdb[4]); + + return send_afu_cmd(afu, &rcb); +} + /** * cxlflash_eh_abort_handler() - abort a SCSI command * @scp: SCSI command to abort. diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h index a768360d2fa6..483710a89781 100644 --- a/drivers/scsi/cxlflash/sislite.h +++ b/drivers/scsi/cxlflash/sislite.h @@ -72,6 +72,8 @@ struct sisl_ioarcb { u16 timeout; /* in units specified by req_flags */ u32 rsvd1; u8 cdb[16]; /* must be in big endian */ +#define SISL_AFU_CMD_SYNC 0xC0 /* AFU sync command */ + union { u64 reserved; /* Reserved for IOARRIN mode */ struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */ -- cgit From d6e32f530df9827070c45b55a6c67dfa8562184c Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 21 Jun 2017 21:15:42 -0500 Subject: scsi: cxlflash: Introduce host ioctl support As staging for supporting various host management functions, add a host ioctl infrastructure to filter ioctl commands and perform operations that are common for all host ioctls. Also update the cxlflash documentation to create a new section for documenting host ioctls. Signed-off-by: Matthew R. Ochs Signed-off-by: Uma Krishnan Signed-off-by: Martin K. Petersen --- Documentation/ioctl/ioctl-number.txt | 2 +- Documentation/powerpc/cxlflash.txt | 19 +++++- drivers/scsi/cxlflash/main.c | 121 ++++++++++++++++++++++++++++++++++- include/uapi/scsi/cxlflash_ioctl.h | 31 ++++++++- 4 files changed, 168 insertions(+), 5 deletions(-) diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 1e9fcb4d0ec8..3e3fdae5f3ed 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -326,7 +326,7 @@ Code Seq#(hex) Include File Comments 0xB5 00-0F uapi/linux/rpmsg.h 0xC0 00-0F linux/usb/iowarrior.h 0xCA 00-0F uapi/misc/cxl.h -0xCA 80-8F uapi/scsi/cxlflash_ioctl.h +0xCA 80-BF uapi/scsi/cxlflash_ioctl.h 0xCB 00-1F CBM serial IEC bus in development: 0xCD 01 linux/reiserfs_fs.h diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.txt index f9036cb0170d..ee67021bbfe4 100644 --- a/Documentation/powerpc/cxlflash.txt +++ b/Documentation/powerpc/cxlflash.txt @@ -124,8 +124,8 @@ Block library API http://github.com/open-power/capiflash -CXL Flash Driver IOCTLs -======================= +CXL Flash Driver LUN IOCTLs +=========================== Users, such as the block library, that wish to interface with a flash device (LUN) via user space access need to use the services provided @@ -367,3 +367,18 @@ DK_CXLFLASH_MANAGE_LUN exclusive user space access (superpipe). In case a LUN is visible across multiple ports and adapters, this ioctl is used to uniquely identify each LUN by its World Wide Node Name (WWNN). + + +CXL Flash Driver Host IOCTLs +============================ + + Each host adapter instance that is supported by the cxlflash driver + has a special character device associated with it to enable a set of + host management function. These character devices are hosted in a + class dedicated for cxlflash and can be accessed via /dev/cxlflash/*. + + Applications can be written to perform various functions using the + host ioctl APIs below. + + The structure definitions for these IOCTLs are available in: + uapi/scsi/cxlflash_ioctl.h diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 7732dfc099c4..be468ed1d884 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -2693,7 +2693,14 @@ static ssize_t lun_mode_store(struct device *dev, static ssize_t ioctl_version_show(struct device *dev, struct device_attribute *attr, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0); + ssize_t bytes = 0; + + bytes = scnprintf(buf, PAGE_SIZE, + "disk: %u\n", DK_CXLFLASH_VERSION_0); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + "host: %u\n", HT_CXLFLASH_VERSION_0); + + return bytes; } /** @@ -3211,12 +3218,124 @@ static int cxlflash_chr_open(struct inode *inode, struct file *file) return 0; } +/** + * decode_hioctl() - translates encoded host ioctl to easily identifiable string + * @cmd: The host ioctl command to decode. + * + * Return: A string identifying the decoded host ioctl. + */ +static char *decode_hioctl(int cmd) +{ + switch (cmd) { + default: + return "UNKNOWN"; + } + + return "UNKNOWN"; +} + +/** + * cxlflash_chr_ioctl() - character device IOCTL handler + * @file: File pointer for this device. + * @cmd: IOCTL command. + * @arg: Userspace ioctl data structure. + * + * A read/write semaphore is used to implement a 'drain' of currently + * running ioctls. The read semaphore is taken at the beginning of each + * ioctl thread and released upon concluding execution. Additionally the + * semaphore should be released and then reacquired in any ioctl execution + * path which will wait for an event to occur that is outside the scope of + * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, + * a thread simply needs to acquire the write semaphore. + * + * Return: 0 on success, -errno on failure + */ +static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + typedef int (*hioctl) (struct cxlflash_cfg *, void *); + + struct cxlflash_cfg *cfg = file->private_data; + struct device *dev = &cfg->dev->dev; + char buf[sizeof(union cxlflash_ht_ioctls)]; + void __user *uarg = (void __user *)arg; + struct ht_cxlflash_hdr *hdr; + size_t size = 0; + bool known_ioctl = false; + int idx = 0; + int rc = 0; + hioctl do_ioctl = NULL; + + static const struct { + size_t size; + hioctl ioctl; + } ioctl_tbl[] = { /* NOTE: order matters here */ + }; + + /* Hold read semaphore so we can drain if needed */ + down_read(&cfg->ioctl_rwsem); + + dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n", + __func__, cmd, idx, sizeof(ioctl_tbl)); + + switch (cmd) { + default: + rc = -EINVAL; + goto out; + } + + if (unlikely(copy_from_user(&buf, uarg, size))) { + dev_err(dev, "%s: copy_from_user() fail " + "size=%lu cmd=%d (%s) uarg=%p\n", + __func__, size, cmd, decode_hioctl(cmd), uarg); + rc = -EFAULT; + goto out; + } + + hdr = (struct ht_cxlflash_hdr *)&buf; + if (hdr->version != HT_CXLFLASH_VERSION_0) { + dev_dbg(dev, "%s: Version %u not supported for %s\n", + __func__, hdr->version, decode_hioctl(cmd)); + rc = -EINVAL; + goto out; + } + + if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) { + dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__); + rc = -EINVAL; + goto out; + } + + rc = do_ioctl(cfg, (void *)&buf); + if (likely(!rc)) + if (unlikely(copy_to_user(uarg, &buf, size))) { + dev_err(dev, "%s: copy_to_user() fail " + "size=%lu cmd=%d (%s) uarg=%p\n", + __func__, size, cmd, decode_hioctl(cmd), uarg); + rc = -EFAULT; + } + + /* fall through to exit */ + +out: + up_read(&cfg->ioctl_rwsem); + if (unlikely(rc && known_ioctl)) + dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n", + __func__, decode_hioctl(cmd), cmd, rc); + else + dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n", + __func__, decode_hioctl(cmd), cmd, rc); + return rc; +} + /* * Character device file operations */ static const struct file_operations cxlflash_chr_fops = { .owner = THIS_MODULE, .open = cxlflash_chr_open, + .unlocked_ioctl = cxlflash_chr_ioctl, + .compat_ioctl = cxlflash_chr_ioctl, }; /** diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h index e9fdc12ad984..87e1f63029bf 100644 --- a/include/uapi/scsi/cxlflash_ioctl.h +++ b/include/uapi/scsi/cxlflash_ioctl.h @@ -31,7 +31,7 @@ struct dk_cxlflash_hdr { }; /* - * Return flag definitions available to all ioctls + * Return flag definitions available to all superpipe ioctls * * Similar to the input flags, these are grown from the bottom-up with the * intention that ioctl-specific return flag definitions would grow from the @@ -180,6 +180,10 @@ union cxlflash_ioctls { #define CXL_MAGIC 0xCA #define CXL_IOWR(_n, _s) _IOWR(CXL_MAGIC, _n, struct _s) +/* + * CXL Flash superpipe ioctls start at base of the reserved CXL_MAGIC + * region (0x80) and grow upwards. + */ #define DK_CXLFLASH_ATTACH CXL_IOWR(0x80, dk_cxlflash_attach) #define DK_CXLFLASH_USER_DIRECT CXL_IOWR(0x81, dk_cxlflash_udirect) #define DK_CXLFLASH_RELEASE CXL_IOWR(0x82, dk_cxlflash_release) @@ -191,4 +195,29 @@ union cxlflash_ioctls { #define DK_CXLFLASH_VLUN_RESIZE CXL_IOWR(0x88, dk_cxlflash_resize) #define DK_CXLFLASH_VLUN_CLONE CXL_IOWR(0x89, dk_cxlflash_clone) +/* + * Structure and flag definitions CXL Flash host ioctls + */ + +#define HT_CXLFLASH_VERSION_0 0 + +struct ht_cxlflash_hdr { + __u16 version; /* Version data */ + __u16 subcmd; /* Sub-command */ + __u16 rsvd[2]; /* Reserved for future use */ + __u64 flags; /* Input flags */ + __u64 return_flags; /* Returned flags */ +}; + +union cxlflash_ht_ioctls { +}; + +#define MAX_HT_CXLFLASH_IOCTL_SZ (sizeof(union cxlflash_ht_ioctls)) + +/* + * CXL Flash host ioctls start at the top of the reserved CXL_MAGIC + * region (0xBF) and grow downwards. + */ + + #endif /* ifndef _CXLFLASH_IOCTL_H */ -- cgit From efa1c818d3458fe97d8f83f40051518b44183234 Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 21 Jun 2017 21:16:02 -0500 Subject: scsi: cxlflash: Refactor AFU capability checking The existing AFU capability checking infrastructure is closely tied to the command mode capability bits. In order to support new capabilities, refactor the existing infrastructure to be more generic. Signed-off-by: Matthew R. Ochs Signed-off-by: Uma Krishnan Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/common.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index cbc0eb70ab47..c96526e1fb11 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -255,21 +255,21 @@ static inline bool afu_is_irqpoll_enabled(struct afu *afu) return !!afu->irqpoll_weight; } -static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode) +static inline bool afu_has_cap(struct afu *afu, u64 cap) { u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT; - return afu_cap & cmd_mode; + return afu_cap & cap; } static inline bool afu_is_sq_cmd_mode(struct afu *afu) { - return afu_is_cmd_mode(afu, SISL_INTVER_CAP_SQ_CMD_MODE); + return afu_has_cap(afu, SISL_INTVER_CAP_SQ_CMD_MODE); } static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu) { - return afu_is_cmd_mode(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE); + return afu_has_cap(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE); } static inline u64 lun_to_lunid(u64 lun) -- cgit From 9cf43a360450ddd758b0021d1b55f1cc5643b9ed Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 21 Jun 2017 21:16:13 -0500 Subject: scsi: cxlflash: Support LUN provisioning Adopt the SISLite AFU LUN provisioning capability to allow future CXL Flash adapters the ability to better manage storage. Update the SISLite header with the changes necessary to support LUN provision operations and create a host ioctl interface for user LUN management software. Also update the cxlflash documentation to describe this new host ioctl. Signed-off-by: Matthew R. Ochs Signed-off-by: Uma Krishnan Signed-off-by: Martin K. Petersen --- Documentation/powerpc/cxlflash.txt | 31 +++++++++++ drivers/scsi/cxlflash/common.h | 5 ++ drivers/scsi/cxlflash/main.c | 107 ++++++++++++++++++++++++++++++++++++- drivers/scsi/cxlflash/main.h | 5 ++ drivers/scsi/cxlflash/sislite.h | 22 +++++++- include/uapi/scsi/cxlflash_ioctl.h | 31 +++++++++-- 6 files changed, 192 insertions(+), 9 deletions(-) diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.txt index ee67021bbfe4..2d6297b4ad80 100644 --- a/Documentation/powerpc/cxlflash.txt +++ b/Documentation/powerpc/cxlflash.txt @@ -382,3 +382,34 @@ CXL Flash Driver Host IOCTLs The structure definitions for these IOCTLs are available in: uapi/scsi/cxlflash_ioctl.h + +HT_CXLFLASH_LUN_PROVISION +------------------------- + This ioctl is used to create and delete persistent LUNs on cxlflash + devices that lack an external LUN management interface. It is only + valid when used with AFUs that support the LUN provision capability. + + When sufficient space is available, LUNs can be created by specifying + the target port to host the LUN and a desired size in 4K blocks. Upon + success, the LUN ID and WWID of the created LUN will be returned and + the SCSI bus can be scanned to detect the change in LUN topology. Note + that partial allocations are not supported. Should a creation fail due + to a space issue, the target port can be queried for its current LUN + geometry. + + To remove a LUN, the device must first be disassociated from the Linux + SCSI subsystem. The LUN deletion can then be initiated by specifying a + target port and LUN ID. Upon success, the LUN geometry associated with + the port will be updated to reflect new number of provisioned LUNs and + available capacity. + + To query the LUN geometry of a port, the target port is specified and + upon success, the following information is presented: + + - Maximum number of provisioned LUNs allowed for the port + - Current number of provisioned LUNs for the port + - Maximum total capacity of provisioned LUNs for the port (4K blocks) + - Current total capacity of provisioned LUNs for the port (4K blocks) + + With this information, the number of available LUNs and capacity can be + can be calculated. diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index c96526e1fb11..58107246c32f 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -262,6 +262,11 @@ static inline bool afu_has_cap(struct afu *afu, u64 cap) return afu_cap & cap; } +static inline bool afu_is_lun_provision(struct afu *afu) +{ + return afu_has_cap(afu, SISL_INTVER_CAP_LUN_PROVISION); +} + static inline bool afu_is_sq_cmd_mode(struct afu *afu) { return afu_has_cap(afu, SISL_INTVER_CAP_SQ_CMD_MODE); diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index be468ed1d884..1279293ff3b3 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -3227,13 +3227,104 @@ static int cxlflash_chr_open(struct inode *inode, struct file *file) static char *decode_hioctl(int cmd) { switch (cmd) { - default: - return "UNKNOWN"; + case HT_CXLFLASH_LUN_PROVISION: + return __stringify_1(HT_CXLFLASH_LUN_PROVISION); } return "UNKNOWN"; } +/** + * cxlflash_lun_provision() - host LUN provisioning handler + * @cfg: Internal structure associated with the host. + * @arg: Kernel copy of userspace ioctl data structure. + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_lun_provision(struct cxlflash_cfg *cfg, + struct ht_cxlflash_lun_provision *lunprov) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct sisl_ioarcb rcb; + struct sisl_ioasa asa; + __be64 __iomem *fc_port_regs; + u16 port = lunprov->port; + u16 scmd = lunprov->hdr.subcmd; + u16 type; + u64 reg; + u64 size; + u64 lun_id; + int rc = 0; + + if (!afu_is_lun_provision(afu)) { + rc = -ENOTSUPP; + goto out; + } + + if (port >= cfg->num_fc_ports) { + rc = -EINVAL; + goto out; + } + + switch (scmd) { + case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN: + type = SISL_AFU_LUN_PROVISION_CREATE; + size = lunprov->size; + lun_id = 0; + break; + case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN: + type = SISL_AFU_LUN_PROVISION_DELETE; + size = 0; + lun_id = lunprov->lun_id; + break; + case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT: + fc_port_regs = get_fc_port_regs(cfg, port); + + reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]); + lunprov->max_num_luns = reg; + reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]); + lunprov->cur_num_luns = reg; + reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]); + lunprov->max_cap_port = reg; + reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]); + lunprov->cur_cap_port = reg; + + goto out; + default: + rc = -EINVAL; + goto out; + } + + memset(&rcb, 0, sizeof(rcb)); + memset(&asa, 0, sizeof(asa)); + rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; + rcb.lun_id = lun_id; + rcb.msi = SISL_MSI_RRQ_UPDATED; + rcb.timeout = MC_LUN_PROV_TIMEOUT; + rcb.ioasa = &asa; + + rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION; + rcb.cdb[1] = type; + rcb.cdb[2] = port; + put_unaligned_be64(size, &rcb.cdb[8]); + + rc = send_afu_cmd(afu, &rcb); + if (rc) { + dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n", + __func__, rc, asa.ioasc, asa.afu_extra); + goto out; + } + + if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) { + lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo; + memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid)); + } +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + /** * cxlflash_chr_ioctl() - character device IOCTL handler * @file: File pointer for this device. @@ -3270,6 +3361,8 @@ static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd, size_t size; hioctl ioctl; } ioctl_tbl[] = { /* NOTE: order matters here */ + { sizeof(struct ht_cxlflash_lun_provision), + (hioctl)cxlflash_lun_provision }, }; /* Hold read semaphore so we can drain if needed */ @@ -3279,6 +3372,16 @@ static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd, __func__, cmd, idx, sizeof(ioctl_tbl)); switch (cmd) { + case HT_CXLFLASH_LUN_PROVISION: + known_ioctl = true; + idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd); + size = ioctl_tbl[idx].size; + do_ioctl = ioctl_tbl[idx].ioctl; + + if (likely(do_ioctl)) + break; + + /* fall through */ default: rc = -EINVAL; goto out; diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index 44c86c2d84f5..6ae8a118358a 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h @@ -41,6 +41,10 @@ /* FC defines */ #define FC_MTIP_CMDCONFIG 0x010 #define FC_MTIP_STATUS 0x018 +#define FC_MAX_NUM_LUNS 0x080 /* Max LUNs host can provision for port */ +#define FC_CUR_NUM_LUNS 0x088 /* Cur number LUNs provisioned for port */ +#define FC_MAX_CAP_PORT 0x090 /* Max capacity all LUNs for port (4K blocks) */ +#define FC_CUR_CAP_PORT 0x098 /* Cur capacity all LUNs for port (4K blocks) */ #define FC_PNAME 0x300 #define FC_CONFIG 0x320 @@ -63,6 +67,7 @@ /* AFU command timeout values */ #define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */ +#define MC_LUN_PROV_TIMEOUT 5 /* 5 secs */ /* AFU command room retry limit */ #define MC_ROOM_RETRY_CNT 10 diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h index 483710a89781..c216feeb8906 100644 --- a/drivers/scsi/cxlflash/sislite.h +++ b/drivers/scsi/cxlflash/sislite.h @@ -73,6 +73,10 @@ struct sisl_ioarcb { u32 rsvd1; u8 cdb[16]; /* must be in big endian */ #define SISL_AFU_CMD_SYNC 0xC0 /* AFU sync command */ +#define SISL_AFU_CMD_LUN_PROVISION 0xD0 /* AFU LUN provision command */ + +#define SISL_AFU_LUN_PROVISION_CREATE 0x00 /* LUN provision create type */ +#define SISL_AFU_LUN_PROVISION_DELETE 0x01 /* LUN provision delete type */ union { u64 reserved; /* Reserved for IOARRIN mode */ @@ -158,6 +162,7 @@ struct sisl_rc { }; #define SISL_SENSE_DATA_LEN 20 /* Sense data length */ +#define SISL_WWID_DATA_LEN 16 /* WWID data length */ /* * IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required, @@ -169,7 +174,12 @@ struct sisl_ioasa { u32 ioasc; #define SISL_IOASC_GOOD_COMPLETION 0x00000000U }; - u32 resid; + + union { + u32 resid; + u32 lunid_hi; + }; + u8 port; u8 afu_extra; /* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR): @@ -192,7 +202,14 @@ struct sisl_ioasa { u8 scsi_extra; u8 fc_extra; - u8 sense_data[SISL_SENSE_DATA_LEN]; + + union { + u8 sense_data[SISL_SENSE_DATA_LEN]; + struct { + u32 lunid_lo; + u8 wwid[SISL_WWID_DATA_LEN]; + }; + }; /* These fields are defined by the SISlite architecture for the * host to use as they see fit for their implementation. @@ -394,6 +411,7 @@ struct sisl_global_regs { #define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL #define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL #define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL +#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL }; #define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */ diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h index 87e1f63029bf..ad79c34f4d11 100644 --- a/include/uapi/scsi/cxlflash_ioctl.h +++ b/include/uapi/scsi/cxlflash_ioctl.h @@ -202,14 +202,34 @@ union cxlflash_ioctls { #define HT_CXLFLASH_VERSION_0 0 struct ht_cxlflash_hdr { - __u16 version; /* Version data */ - __u16 subcmd; /* Sub-command */ - __u16 rsvd[2]; /* Reserved for future use */ - __u64 flags; /* Input flags */ - __u64 return_flags; /* Returned flags */ + __u16 version; /* Version data */ + __u16 subcmd; /* Sub-command */ + __u16 rsvd[2]; /* Reserved for future use */ + __u64 flags; /* Input flags */ + __u64 return_flags; /* Returned flags */ +}; + +#define HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN 0x0001 +#define HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN 0x0002 +#define HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT 0x0003 +#define HT_CXLFLASH_LUN_PROVISION_WWID_LEN 16 + +struct ht_cxlflash_lun_provision { + struct ht_cxlflash_hdr hdr; /* Common fields */ + __u16 port; /* Target port for provision request */ + __u16 reserved16[3]; /* Reserved for future use */ + __u64 size; /* Size of LUN (4K blocks) */ + __u64 lun_id; /* SCSI LUN ID */ + __u8 wwid[HT_CXLFLASH_LUN_PROVISION_WWID_LEN]; /* Page83 WWID, NAA-6 */ + __u64 max_num_luns; /* Maximum number of LUNs provisioned */ + __u64 cur_num_luns; /* Current number of LUNs provisioned */ + __u64 max_cap_port; /* Total capacity for port (4K blocks) */ + __u64 cur_cap_port; /* Current capacity for port (4K blocks) */ + __u64 reserved[8]; /* Reserved for future use */ }; union cxlflash_ht_ioctls { + struct ht_cxlflash_lun_provision lun_provision; }; #define MAX_HT_CXLFLASH_IOCTL_SZ (sizeof(union cxlflash_ht_ioctls)) @@ -218,6 +238,7 @@ union cxlflash_ht_ioctls { * CXL Flash host ioctls start at the top of the reserved CXL_MAGIC * region (0xBF) and grow downwards. */ +#define HT_CXLFLASH_LUN_PROVISION CXL_IOWR(0xBF, ht_cxlflash_lun_provision) #endif /* ifndef _CXLFLASH_IOCTL_H */ -- cgit From bc88ac47d5cb11c7dd9896781f793fae519d53fa Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 21 Jun 2017 21:16:22 -0500 Subject: scsi: cxlflash: Support AFU debug Adopt the SISLite AFU debug capability to allow future CXL Flash adapters the ability to better debug AFU issues. Update the SISLite header with the changes necessary to support AFU debug operations and create a host ioctl interface for user debug software. Also update the cxlflash documentation to describe this new host ioctl. Signed-off-by: Matthew R. Ochs Signed-off-by: Uma Krishnan Signed-off-by: Martin K. Petersen --- Documentation/powerpc/cxlflash.txt | 14 ++++++ drivers/scsi/cxlflash/common.h | 5 ++ drivers/scsi/cxlflash/main.c | 96 ++++++++++++++++++++++++++++++++++++++ drivers/scsi/cxlflash/main.h | 1 + drivers/scsi/cxlflash/sislite.h | 2 + include/uapi/scsi/cxlflash_ioctl.h | 37 +++++++++++++-- 6 files changed, 152 insertions(+), 3 deletions(-) diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.txt index 2d6297b4ad80..a64bdaa0a1cf 100644 --- a/Documentation/powerpc/cxlflash.txt +++ b/Documentation/powerpc/cxlflash.txt @@ -413,3 +413,17 @@ HT_CXLFLASH_LUN_PROVISION With this information, the number of available LUNs and capacity can be can be calculated. + +HT_CXLFLASH_AFU_DEBUG +--------------------- + This ioctl is used to debug AFUs by supporting a command pass-through + interface. It is only valid when used with AFUs that support the AFU + debug capability. + + With exception of buffer management, AFU debug commands are opaque to + cxlflash and treated as pass-through. For debug commands that do require + data transfer, the user supplies an adequately sized data buffer and must + specify the data transfer direction with respect to the host. There is a + maximum transfer size of 256K imposed. Note that partial read completions + are not supported - when errors are experienced with a host read data + transfer, the data buffer is not copied back to the user. diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index 58107246c32f..a91151c4a4fc 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -262,6 +262,11 @@ static inline bool afu_has_cap(struct afu *afu, u64 cap) return afu_cap & cap; } +static inline bool afu_is_afu_debug(struct afu *afu) +{ + return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG); +} + static inline bool afu_is_lun_provision(struct afu *afu) { return afu_has_cap(afu, SISL_INTVER_CAP_LUN_PROVISION); diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 1279293ff3b3..d3ad52ec5314 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -3325,6 +3325,99 @@ out: return rc; } +/** + * cxlflash_afu_debug() - host AFU debug handler + * @cfg: Internal structure associated with the host. + * @arg: Kernel copy of userspace ioctl data structure. + * + * For debug requests requiring a data buffer, always provide an aligned + * (cache line) buffer to the AFU to appease any alignment requirements. + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, + struct ht_cxlflash_afu_debug *afu_dbg) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct sisl_ioarcb rcb; + struct sisl_ioasa asa; + char *buf = NULL; + char *kbuf = NULL; + void __user *ubuf = (__force void __user *)afu_dbg->data_ea; + u16 req_flags = SISL_REQ_FLAGS_AFU_CMD; + u32 ulen = afu_dbg->data_len; + bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE; + int rc = 0; + + if (!afu_is_afu_debug(afu)) { + rc = -ENOTSUPP; + goto out; + } + + if (ulen) { + req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN; + + if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) { + rc = -EINVAL; + goto out; + } + + if (unlikely(!access_ok(is_write ? VERIFY_READ : VERIFY_WRITE, + ubuf, ulen))) { + rc = -EFAULT; + goto out; + } + + buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL); + if (unlikely(!buf)) { + rc = -ENOMEM; + goto out; + } + + kbuf = PTR_ALIGN(buf, cache_line_size()); + + if (is_write) { + req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + + rc = copy_from_user(kbuf, ubuf, ulen); + if (unlikely(rc)) + goto out; + } + } + + memset(&rcb, 0, sizeof(rcb)); + memset(&asa, 0, sizeof(asa)); + + rcb.req_flags = req_flags; + rcb.msi = SISL_MSI_RRQ_UPDATED; + rcb.timeout = MC_AFU_DEBUG_TIMEOUT; + rcb.ioasa = &asa; + + if (ulen) { + rcb.data_len = ulen; + rcb.data_ea = (uintptr_t)kbuf; + } + + rcb.cdb[0] = SISL_AFU_CMD_DEBUG; + memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd, + HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN); + + rc = send_afu_cmd(afu, &rcb); + if (rc) { + dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n", + __func__, rc, asa.ioasc, asa.afu_extra); + goto out; + } + + if (ulen && !is_write) + rc = copy_to_user(ubuf, kbuf, ulen); +out: + kfree(buf); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + /** * cxlflash_chr_ioctl() - character device IOCTL handler * @file: File pointer for this device. @@ -3363,6 +3456,8 @@ static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd, } ioctl_tbl[] = { /* NOTE: order matters here */ { sizeof(struct ht_cxlflash_lun_provision), (hioctl)cxlflash_lun_provision }, + { sizeof(struct ht_cxlflash_afu_debug), + (hioctl)cxlflash_afu_debug }, }; /* Hold read semaphore so we can drain if needed */ @@ -3373,6 +3468,7 @@ static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case HT_CXLFLASH_LUN_PROVISION: + case HT_CXLFLASH_AFU_DEBUG: known_ioctl = true; idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd); size = ioctl_tbl[idx].size; diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index 6ae8a118358a..880e348ed5c9 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h @@ -68,6 +68,7 @@ /* AFU command timeout values */ #define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */ #define MC_LUN_PROV_TIMEOUT 5 /* 5 secs */ +#define MC_AFU_DEBUG_TIMEOUT 5 /* 5 secs */ /* AFU command room retry limit */ #define MC_ROOM_RETRY_CNT 10 diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h index c216feeb8906..d671fae19697 100644 --- a/drivers/scsi/cxlflash/sislite.h +++ b/drivers/scsi/cxlflash/sislite.h @@ -74,6 +74,7 @@ struct sisl_ioarcb { u8 cdb[16]; /* must be in big endian */ #define SISL_AFU_CMD_SYNC 0xC0 /* AFU sync command */ #define SISL_AFU_CMD_LUN_PROVISION 0xD0 /* AFU LUN provision command */ +#define SISL_AFU_CMD_DEBUG 0xE0 /* AFU debug command */ #define SISL_AFU_LUN_PROVISION_CREATE 0x00 /* LUN provision create type */ #define SISL_AFU_LUN_PROVISION_DELETE 0x01 /* LUN provision delete type */ @@ -412,6 +413,7 @@ struct sisl_global_regs { #define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL #define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL #define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL +#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL }; #define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */ diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h index ad79c34f4d11..48d107e75cf2 100644 --- a/include/uapi/scsi/cxlflash_ioctl.h +++ b/include/uapi/scsi/cxlflash_ioctl.h @@ -17,6 +17,11 @@ #include +/* + * Structure and definitions for all CXL Flash ioctls + */ +#define CXLFLASH_WWID_LEN 16 + /* * Structure and flag definitions CXL Flash superpipe ioctls */ @@ -151,7 +156,7 @@ struct dk_cxlflash_recover_afu { __u64 reserved[8]; /* Reserved for future use */ }; -#define DK_CXLFLASH_MANAGE_LUN_WWID_LEN 16 +#define DK_CXLFLASH_MANAGE_LUN_WWID_LEN CXLFLASH_WWID_LEN #define DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE 0x8000000000000000ULL #define DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE 0x4000000000000000ULL #define DK_CXLFLASH_MANAGE_LUN_ALL_PORTS_ACCESSIBLE 0x2000000000000000ULL @@ -209,10 +214,20 @@ struct ht_cxlflash_hdr { __u64 return_flags; /* Returned flags */ }; +/* + * Input flag definitions available to all host ioctls + * + * These are grown from the bottom-up with the intention that ioctl-specific + * input flag definitions would grow from the top-down, allowing the two sets + * to co-exist. While not required/enforced at this time, this provides future + * flexibility. + */ +#define HT_CXLFLASH_HOST_READ 0x0000000000000000ULL +#define HT_CXLFLASH_HOST_WRITE 0x0000000000000001ULL + #define HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN 0x0001 #define HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN 0x0002 #define HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT 0x0003 -#define HT_CXLFLASH_LUN_PROVISION_WWID_LEN 16 struct ht_cxlflash_lun_provision { struct ht_cxlflash_hdr hdr; /* Common fields */ @@ -220,7 +235,7 @@ struct ht_cxlflash_lun_provision { __u16 reserved16[3]; /* Reserved for future use */ __u64 size; /* Size of LUN (4K blocks) */ __u64 lun_id; /* SCSI LUN ID */ - __u8 wwid[HT_CXLFLASH_LUN_PROVISION_WWID_LEN]; /* Page83 WWID, NAA-6 */ + __u8 wwid[CXLFLASH_WWID_LEN];/* Page83 WWID, NAA-6 */ __u64 max_num_luns; /* Maximum number of LUNs provisioned */ __u64 cur_num_luns; /* Current number of LUNs provisioned */ __u64 max_cap_port; /* Total capacity for port (4K blocks) */ @@ -228,8 +243,23 @@ struct ht_cxlflash_lun_provision { __u64 reserved[8]; /* Reserved for future use */ }; +#define HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN 262144 /* 256K */ +#define HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN 12 +struct ht_cxlflash_afu_debug { + struct ht_cxlflash_hdr hdr; /* Common fields */ + __u8 reserved8[4]; /* Reserved for future use */ + __u8 afu_subcmd[HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN]; /* AFU subcommand, + * (pass through) + */ + __u64 data_ea; /* Data buffer effective address */ + __u32 data_len; /* Data buffer length */ + __u32 reserved32; /* Reserved for future use */ + __u64 reserved[8]; /* Reserved for future use */ +}; + union cxlflash_ht_ioctls { struct ht_cxlflash_lun_provision lun_provision; + struct ht_cxlflash_afu_debug afu_debug; }; #define MAX_HT_CXLFLASH_IOCTL_SZ (sizeof(union cxlflash_ht_ioctls)) @@ -239,6 +269,7 @@ union cxlflash_ht_ioctls { * region (0xBF) and grow downwards. */ #define HT_CXLFLASH_LUN_PROVISION CXL_IOWR(0xBF, ht_cxlflash_lun_provision) +#define HT_CXLFLASH_AFU_DEBUG CXL_IOWR(0xBE, ht_cxlflash_afu_debug) #endif /* ifndef _CXLFLASH_IOCTL_H */ -- cgit From 3223c01aa1cec60d59bd218aca5e202b558d225a Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 21 Jun 2017 21:16:33 -0500 Subject: scsi: cxlflash: Support WS16 unmap The cxlflash driver supports performing a write-same16 to scrub virtual luns when they are released by a user. To date, AFUs for adapters that are supported by cxlflash do not have the capability to unmap as part of the WS operation. This can lead to fragmented flash devices which results in performance degradation. Future AFUs can optionally support unmap write-same commands and reflects this support via the context control register. This provides userspace applications with direct visibility such that they need not depend on a host API. Detect unmap support during cxlflash initialization by reading the context control register associated with the primary hardware queue. Update the existing write_same16() routine to set the unmap bit in the CDB when unmap is supported by the host. Signed-off-by: Matthew R. Ochs Signed-off-by: Uma Krishnan Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/common.h | 1 + drivers/scsi/cxlflash/main.c | 12 ++++++++++++ drivers/scsi/cxlflash/sislite.h | 1 + drivers/scsi/cxlflash/vlun.c | 1 + 4 files changed, 15 insertions(+) diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index a91151c4a4fc..e95e5a516b5e 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -147,6 +147,7 @@ struct cxlflash_cfg { wait_queue_head_t tmf_waitq; spinlock_t tmf_slock; bool tmf_active; + bool ws_unmap; /* Write-same unmap supported */ wait_queue_head_t reset_waitq; enum cxlflash_state state; async_cookie_t async_reset_cookie; diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index d3ad52ec5314..0cce442c260a 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -1812,6 +1812,18 @@ static int init_global(struct cxlflash_cfg *cfg) SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), &hwq->ctrl_map->ctx_cap); } + + /* + * Determine write-same unmap support for host by evaluating the unmap + * sector support bit of the context control register associated with + * the primary hardware queue. Note that while this status is reflected + * in a context register, the outcome can be assumed to be host-wide. + */ + hwq = get_hwq(afu, PRIMARY_HWQ); + reg = readq_be(&hwq->host_map->ctx_ctrl); + if (reg & SISL_CTX_CTRL_UNMAP_SECTOR) + cfg->ws_unmap = true; + /* Initialize heartbeat */ afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); out: diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h index d671fae19697..09daa86670fc 100644 --- a/drivers/scsi/cxlflash/sislite.h +++ b/drivers/scsi/cxlflash/sislite.h @@ -283,6 +283,7 @@ struct sisl_host_map { __be64 rrq_end; /* write sequence: start followed by end */ __be64 cmd_room; __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */ +#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */ __be64 mbox_w; /* restricted use */ __be64 sq_start; /* Submission Queue (R/W): write sequence and */ __be64 sq_end; /* inclusion semantics are the same as RRQ */ diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c index 0800bcba5a00..bdfb93061460 100644 --- a/drivers/scsi/cxlflash/vlun.c +++ b/drivers/scsi/cxlflash/vlun.c @@ -446,6 +446,7 @@ static int write_same16(struct scsi_device *sdev, while (left > 0) { scsi_cmd[0] = WRITE_SAME_16; + scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0; put_unaligned_be64(offset, &scsi_cmd[2]); put_unaligned_be32(ws_limit < left ? ws_limit : left, &scsi_cmd[10]); -- cgit From 479ad8e9d48c4d82c92417b012193e967fc33b8a Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 21 Jun 2017 21:16:44 -0500 Subject: scsi: cxlflash: Remove zeroing of private command data The SCSI core now zeroes the per-command private data area prior to calling into the LLD. Replace the clearing operation that takes place when the private command data reference is obtained with a routine that performs common initializations. The zeroing that takes place in the device reset path remains intact as the private command data associated with the specified SCSI command is not guaranteed to be cleared. Signed-off-by: Matthew R. Ochs Signed-off-by: Uma Krishnan Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/common.h | 11 +++++++++-- drivers/scsi/cxlflash/main.c | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index e95e5a516b5e..6d95e8e147e0 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -178,15 +178,22 @@ static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc) return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd)); } -static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc) +static inline struct afu_cmd *sc_to_afuci(struct scsi_cmnd *sc) { struct afu_cmd *afuc = sc_to_afuc(sc); - memset(afuc, 0, sizeof(*afuc)); INIT_LIST_HEAD(&afuc->queue); return afuc; } +static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc) +{ + struct afu_cmd *afuc = sc_to_afuc(sc); + + memset(afuc, 0, sizeof(*afuc)); + return sc_to_afuci(sc); +} + struct hwq { /* Stuff requiring alignment go first. */ struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */ diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 0cce442c260a..43389825299a 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -543,7 +543,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) struct cxlflash_cfg *cfg = shost_priv(host); struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; - struct afu_cmd *cmd = sc_to_afucz(scp); + struct afu_cmd *cmd = sc_to_afuci(scp); struct scatterlist *sg = scsi_sglist(scp); int hwq_index = cmd_to_target_hwq(host, scp, afu); struct hwq *hwq = get_hwq(afu, hwq_index); -- cgit From 8ba1ddb31f528cb45be39b7f3b600261afaa7920 Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 21 Jun 2017 21:16:54 -0500 Subject: scsi: cxlflash: Update TMF command processing Currently, the SCSI command presented to the device reset handler is used to send TMFs to the AFU for a device reset. This behavior is incorrect as the command presented is an actual command and not a special notification. As such, it should only be used for reference and not be acted upon. Additionally, the existing TMF transmission routine does not account for actual errors from the hardware, only reflecting failure when a timeout occurs. This can lead to a condition where the device reset handler is presented with a false 'success'. Update send_tmf() to dynamically allocate a private command for sending the TMF command and properly reflect failure when the completed command indicates an error or was aborted. Detect TMF commands during response processing and avoid scsi_done() for these types of commands. Lastly, update comments in the TMF processing paths to describe the new behavior. Signed-off-by: Matthew R. Ochs Signed-off-by: Uma Krishnan Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/main.c | 65 ++++++++++++++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 21 deletions(-) diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 43389825299a..7a787b6e21c4 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -155,9 +155,10 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) * cmd_complete() - command completion handler * @cmd: AFU command that has completed. * - * Prepares and submits command that has either completed or timed out to - * the SCSI stack. Checks AFU command back into command pool for non-internal - * (cmd->scp populated) commands. + * For SCSI commands this routine prepares and submits commands that have + * either completed or timed out to the SCSI stack. For internal commands + * (TMF or AFU), this routine simply notifies the originator that the + * command has completed. */ static void cmd_complete(struct afu_cmd *cmd) { @@ -167,7 +168,6 @@ static void cmd_complete(struct afu_cmd *cmd) struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; struct hwq *hwq = get_hwq(afu, cmd->hwq_index); - bool cmd_is_tmf; spin_lock_irqsave(&hwq->hsq_slock, lock_flags); list_del(&cmd->list); @@ -180,19 +180,14 @@ static void cmd_complete(struct afu_cmd *cmd) else scp->result = (DID_OK << 16); - cmd_is_tmf = cmd->cmd_tmf; - dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n", __func__, scp, scp->result, cmd->sa.ioasc); - scp->scsi_done(scp); - - if (cmd_is_tmf) { - spin_lock_irqsave(&cfg->tmf_slock, lock_flags); - cfg->tmf_active = false; - wake_up_all_locked(&cfg->tmf_waitq); - spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); - } + } else if (cmd->cmd_tmf) { + spin_lock_irqsave(&cfg->tmf_slock, lock_flags); + cfg->tmf_active = false; + wake_up_all_locked(&cfg->tmf_waitq); + spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); } else complete(&cmd->cevent); } @@ -206,8 +201,10 @@ static void cmd_complete(struct afu_cmd *cmd) */ static void flush_pending_cmds(struct hwq *hwq) { + struct cxlflash_cfg *cfg = hwq->afu->parent; struct afu_cmd *cmd, *tmp; struct scsi_cmnd *scp; + ulong lock_flags; list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) { /* Bypass command when on a doneq, cmd_complete() will handle */ @@ -222,7 +219,15 @@ static void flush_pending_cmds(struct hwq *hwq) scp->scsi_done(scp); } else { cmd->cmd_aborted = true; - complete(&cmd->cevent); + + if (cmd->cmd_tmf) { + spin_lock_irqsave(&cfg->tmf_slock, lock_flags); + cfg->tmf_active = false; + wake_up_all_locked(&cfg->tmf_waitq); + spin_unlock_irqrestore(&cfg->tmf_slock, + lock_flags); + } else + complete(&cmd->cevent); } } } @@ -455,24 +460,35 @@ static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp, /** * send_tmf() - sends a Task Management Function (TMF) * @afu: AFU to checkout from. - * @scp: SCSI command from stack. + * @scp: SCSI command from stack describing target. * @tmfcmd: TMF command to send. * * Return: - * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure + * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure */ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) { struct Scsi_Host *host = scp->device->host; struct cxlflash_cfg *cfg = shost_priv(host); - struct afu_cmd *cmd = sc_to_afucz(scp); + struct afu_cmd *cmd = NULL; struct device *dev = &cfg->dev->dev; int hwq_index = cmd_to_target_hwq(host, scp, afu); struct hwq *hwq = get_hwq(afu, hwq_index); + char *buf = NULL; ulong lock_flags; int rc = 0; ulong to; + buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); + if (unlikely(!buf)) { + dev_err(dev, "%s: no memory for command\n", __func__); + rc = -ENOMEM; + goto out; + } + + cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); + INIT_LIST_HEAD(&cmd->queue); + /* When Task Management Function is active do not send another */ spin_lock_irqsave(&cfg->tmf_slock, lock_flags); if (cfg->tmf_active) @@ -482,7 +498,6 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) cfg->tmf_active = true; spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); - cmd->scp = scp; cmd->parent = afu; cmd->cmd_tmf = true; cmd->hwq_index = hwq_index; @@ -511,12 +526,20 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) cfg->tmf_slock, to); if (!to) { - cfg->tmf_active = false; dev_err(dev, "%s: TMF timed out\n", __func__); - rc = -1; + rc = -ETIMEDOUT; + } else if (cmd->cmd_aborted) { + dev_err(dev, "%s: TMF aborted\n", __func__); + rc = -EAGAIN; + } else if (cmd->sa.ioasc) { + dev_err(dev, "%s: TMF failed ioasc=%08x\n", + __func__, cmd->sa.ioasc); + rc = -EIO; } + cfg->tmf_active = false; spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); out: + kfree(buf); return rc; } -- cgit From c5419e2618b951a73d590e0752810686d52c45cf Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:16 -0700 Subject: scsi: qla2xxx: Combine Active command arrays. Merge active/outstanding cmd arrays from target side and initiator side together in prepration for Target Multi Queue support. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 15 +++- drivers/scsi/qla2xxx/qla_gbl.h | 2 - drivers/scsi/qla2xxx/qla_inline.h | 1 + drivers/scsi/qla2xxx/qla_isr.c | 48 +++++++++---- drivers/scsi/qla2xxx/qla_os.c | 75 ++++++++++++++------ drivers/scsi/qla2xxx/qla_target.c | 144 ++++++++++++-------------------------- drivers/scsi/qla2xxx/qla_target.h | 23 ++++-- 7 files changed, 164 insertions(+), 144 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index ddf93efe3986..1b5049b1ef4a 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -437,7 +437,18 @@ struct srb_iocb { #define SRB_NACK_PRLI 17 #define SRB_NACK_LOGO 18 +enum { + TYPE_SRB, + TYPE_TGT_CMD, +}; + typedef struct srb { + /* + * Do not move cmd_type field, it needs to + * line up with qla_tgt_cmd->cmd_type + */ + uint8_t cmd_type; + uint8_t pad[3]; atomic_t ref_count; struct fc_port *fcport; struct scsi_qla_host *vha; @@ -3287,9 +3298,6 @@ struct qlt_hw_data { uint32_t __iomem *atio_q_out; struct qla_tgt_func_tmpl *tgt_ops; - struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS]; - uint16_t current_handle; - struct qla_tgt_vp_map *tgt_vp_map; int saved_set; @@ -4258,6 +4266,7 @@ enum nexus_wait_type { WAIT_LUN, }; +#include "qla_target.h" #include "qla_gbl.h" #include "qla_dbg.h" #include "qla_inline.h" diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f8540f5c9e5d..63355f40ff2f 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -834,8 +834,6 @@ extern irqreturn_t qla8044_intr_handler(int, void *); extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t); extern int qla8044_abort_isp(scsi_qla_host_t *); extern int qla8044_check_fw_alive(struct scsi_qla_host *); - -extern void qlt_host_reset_handler(struct qla_hw_data *ha); extern int qla_get_exlogin_status(scsi_qla_host_t *, uint16_t *, uint16_t *); extern int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 9996ec0daab1..99028d48c664 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -250,6 +250,7 @@ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) memset(sp, 0, sizeof(*sp)); sp->fcport = fcport; + sp->cmd_type = TYPE_SRB; sp->iocbs = 1; sp->vha = vha; done: diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 2984abcc29e7..8aaddb75f964 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -17,7 +17,7 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); -static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, +static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); /** @@ -2280,6 +2280,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) return; } + if (sp->cmd_type != TYPE_SRB) { + req->outstanding_cmds[handle] = NULL; + ql_dbg(ql_dbg_io, vha, 0x3015, + "Unknown sp->cmd_type %x %p).\n", + sp->cmd_type, sp); + return; + } + if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); return; @@ -2632,8 +2640,9 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) * qla2x00_error_entry() - Process an error entry. * @ha: SCSI driver HA context * @pkt: Entry pointer + * return : 1=allow further error analysis. 0=no additional error analysis. */ -static void +static int qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) { srb_t *sp; @@ -2654,18 +2663,35 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) if (pkt->entry_status & RF_BUSY) res = DID_BUS_BUSY << 16; - if (pkt->entry_type == NOTIFY_ACK_TYPE && - pkt->handle == QLA_TGT_SKIP_HANDLE) - return; + if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) + return 0; - sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); - if (sp) { - sp->done(sp, res); - return; + switch (pkt->entry_type) { + case NOTIFY_ACK_TYPE: + case STATUS_TYPE: + case STATUS_CONT_TYPE: + case LOGINOUT_PORT_IOCB_TYPE: + case CT_IOCB_TYPE: + case ELS_IOCB_TYPE: + case ABORT_IOCB_TYPE: + case MBX_IOCB_TYPE: + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (sp) { + sp->done(sp, res); + return 0; + } + break; + + case ABTS_RESP_24XX: + case CTIO_TYPE7: + case CTIO_CRC2: + default: + return 1; } fatal: ql_log(ql_log_warn, vha, 0x5030, "Error entry - invalid handle/queue (%04x).\n", que); + return 0; } /** @@ -2746,9 +2772,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, } if (pkt->entry_status != 0) { - qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); - - if (qlt_24xx_process_response_error(vha, pkt)) + if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) goto process_err; ((response_t *)pkt)->signature = RESPONSE_PROCESSED; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index dcf50aa61e9d..d92e65b40c44 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1649,8 +1649,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) srb_t *sp; struct qla_hw_data *ha = vha->hw; struct req_que *req; - - qlt_host_reset_handler(ha); + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_tgt_cmd *cmd; + uint8_t trace = 0; spin_lock_irqsave(&ha->hardware_lock, flags); for (que = 0; que < ha->max_req_queues; que++) { @@ -1662,27 +1663,57 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { - /* Don't abort commands in adapter during EEH - * recovery as it's not accessible/responding. - */ - if (GET_CMD_SP(sp) && !ha->flags.eeh_busy && - (sp->type == SRB_SCSI_CMD)) { - /* Get a reference to the sp and drop the lock. - * The reference ensures this sp->done() call - * - and not the call in qla2xxx_eh_abort() - - * ends the SCSI command (with result 'res'). + req->outstanding_cmds[cnt] = NULL; + if (sp->cmd_type == TYPE_SRB) { + /* + * Don't abort commands in adapter + * during EEH recovery as it's not + * accessible/responding. */ - sp_get(sp); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - status = qla2xxx_eh_abort(GET_CMD_SP(sp)); - spin_lock_irqsave(&ha->hardware_lock, flags); - /* Get rid of extra reference if immediate exit - * from ql2xxx_eh_abort */ - if (status == FAILED && (qla2x00_isp_reg_stat(ha))) - atomic_dec(&sp->ref_count); + if (GET_CMD_SP(sp) && + !ha->flags.eeh_busy && + (sp->type == SRB_SCSI_CMD)) { + /* + * Get a reference to the sp + * and drop the lock. The + * reference ensures this + * sp->done() call and not the + * call in qla2xxx_eh_abort() + * ends the SCSI command (with + * result 'res'). + */ + sp_get(sp); + spin_unlock_irqrestore( + &ha->hardware_lock, flags); + status = qla2xxx_eh_abort( + GET_CMD_SP(sp)); + spin_lock_irqsave( + &ha->hardware_lock, flags); + /* + * Get rid of extra reference + * if immediate exit from + * ql2xxx_eh_abort + */ + if (status == FAILED && + (qla2x00_isp_reg_stat(ha))) + atomic_dec( + &sp->ref_count); + } + sp->done(sp, res); + } else { + if (!vha->hw->tgt.tgt_ops || !tgt || + qla_ini_mode_enabled(vha)) { + if (!trace) + ql_dbg(ql_dbg_tgt_mgt, + vha, 0xf003, + "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n", + vha->dpc_flags); + continue; + } + cmd = (struct qla_tgt_cmd *)sp; + qlt_abort_cmd_on_host_reset(cmd->vha, + cmd); } - req->outstanding_cmds[cnt] = NULL; - sp->done(sp, res); } } } @@ -5862,6 +5893,8 @@ qla2x00_timer(scsi_qla_host_t *vha) sp = req->outstanding_cmds[index]; if (!sp) continue; + if (sp->cmd_type != TYPE_SRB) + continue; if (sp->type != SRB_SCSI_CMD) continue; sfcp = sp->fcport; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 5aae7aa03c38..a18f81046959 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -115,8 +115,6 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, int fn, void *iocb, int flags); static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); -static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, - struct qla_tgt_cmd *cmd); static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint16_t status, int qfull); static void qlt_disable_vha(struct scsi_qla_host *vha); @@ -2291,28 +2289,35 @@ static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) /* ha->hardware_lock supposed to be held on entry */ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) { - struct qla_hw_data *ha = vha->hw; uint32_t h; + int index; + uint8_t found = 0; + struct req_que *req = vha->req; + + h = req->current_outstanding_cmd; - h = ha->tgt.current_handle; - /* always increment cmd handle */ - do { - ++h; - if (h > DEFAULT_OUTSTANDING_COMMANDS) - h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ - if (h == ha->tgt.current_handle) { - ql_dbg(ql_dbg_io, vha, 0x305b, - "qla_target(%d): Ran out of " - "empty cmd slots in ha %p\n", vha->vp_idx, ha); - h = QLA_TGT_NULL_HANDLE; + for (index = 1; index < req->num_outstanding_cmds; index++) { + h++; + if (h == req->num_outstanding_cmds) + h = 1; + + if (h == QLA_TGT_SKIP_HANDLE) + continue; + + if (!req->outstanding_cmds[h]) { + found = 1; break; } - } while ((h == QLA_TGT_NULL_HANDLE) || - (h == QLA_TGT_SKIP_HANDLE) || - (ha->tgt.cmds[h-1] != NULL)); + } - if (h != QLA_TGT_NULL_HANDLE) - ha->tgt.current_handle = h; + if (found) { + req->current_outstanding_cmd = h; + } else { + ql_dbg(ql_dbg_io, vha, 0x305b, + "qla_target(%d): Ran out of empty cmd slots\n", + vha->vp_idx); + h = QLA_TGT_NULL_HANDLE; + } return h; } @@ -2323,7 +2328,6 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, { uint32_t h; struct ctio7_to_24xx *pkt; - struct qla_hw_data *ha = vha->hw; struct atio_from_isp *atio = &prm->cmd->atio; uint16_t temp; @@ -2343,8 +2347,9 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, * the session and, so, the command. */ return -EAGAIN; - } else - ha->tgt.cmds[h - 1] = prm->cmd; + } else { + vha->req->outstanding_cmds[h] = (srb_t *)prm->cmd; + } pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = prm->cmd->loop_id; @@ -2888,7 +2893,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) */ return -EAGAIN; } else - ha->tgt.cmds[h-1] = prm->cmd; + vha->req->outstanding_cmds[h] = (srb_t *)prm->cmd; pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); @@ -2994,7 +2999,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) crc_queuing_error: /* Cleanup will be performed by the caller */ - vha->hw->tgt.cmds[h - 1] = NULL; + vha->req->outstanding_cmds[h] = NULL; return QLA_FUNCTION_FAILED; } @@ -3676,50 +3681,38 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, return term; } -/* ha->hardware_lock supposed to be held on entry */ -static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha, - uint32_t handle) -{ - struct qla_hw_data *ha = vha->hw; - - handle--; - if (ha->tgt.cmds[handle] != NULL) { - struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; - ha->tgt.cmds[handle] = NULL; - return cmd; - } else - return NULL; -} /* ha->hardware_lock supposed to be held on entry */ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, uint32_t handle, void *ctio) { struct qla_tgt_cmd *cmd = NULL; + struct req_que *req = vha->req; /* Clear out internal marks */ - handle &= ~(CTIO_COMPLETION_HANDLE_MARK | - CTIO_INTERMEDIATE_HANDLE_MARK); + handle &= ~QLA_TGT_HANDLE_MASK; if (handle != QLA_TGT_NULL_HANDLE) { if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) return NULL; - /* handle-1 is actually used */ - if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { + handle &= QLA_CMD_HANDLE_MASK; + + if (unlikely(handle > req->num_outstanding_cmds)) { ql_dbg(ql_dbg_tgt, vha, 0xe052, "qla_target(%d): Wrong handle %x received\n", vha->vp_idx, handle); return NULL; } - cmd = qlt_get_cmd(vha, handle); - if (unlikely(cmd == NULL)) { - ql_dbg(ql_dbg_tgt, vha, 0xe053, - "qla_target(%d): Suspicious: unable to " - "find the command with handle %x\n", vha->vp_idx, - handle); + cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[handle]; + if (unlikely((cmd == NULL) || + (cmd->cmd_type != TYPE_TGT_CMD))) { + ql_dbg(ql_dbg_async, vha, 0xe053, + "qla_target(%d): Suspicious: unable to find the command with handle %x cmd %p\n", + vha->vp_idx, handle, cmd); return NULL; } + req->outstanding_cmds[handle] = NULL; } else if (ctio != NULL) { /* We can't get loop ID from CTIO7 */ ql_dbg(ql_dbg_tgt, vha, 0xe054, @@ -3732,7 +3725,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, } /* hardware_lock should be held by caller. */ -static void +void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) { struct qla_hw_data *ha = vha->hw; @@ -3767,42 +3760,6 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) ha->tgt.tgt_ops->free_cmd(cmd); } -void -qlt_host_reset_handler(struct qla_hw_data *ha) -{ - struct qla_tgt_cmd *cmd; - unsigned long flags; - scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - scsi_qla_host_t *vha = NULL; - struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt; - uint32_t i; - - if (!base_vha->hw->tgt.tgt_ops) - return; - - if (!tgt || qla_ini_mode_enabled(base_vha)) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, - "Target mode disabled\n"); - return; - } - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10, - "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n", - base_vha->dpc_flags); - - spin_lock_irqsave(&ha->hardware_lock, flags); - for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) { - cmd = qlt_get_cmd(base_vha, i); - if (!cmd) - continue; - /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */ - vha = cmd->vha; - qlt_abort_cmd_on_host_reset(vha, cmd); - } - spin_unlock_irqrestore(&ha->hardware_lock, flags); -} - - /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ @@ -4084,7 +4041,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; memset(cmd, 0, sizeof(struct qla_tgt_cmd)); - + cmd->cmd_type = TYPE_TGT_CMD; memcpy(&cmd->atio, atio, sizeof(*atio)); cmd->state = QLA_TGT_STATE_NEW; cmd->tgt = vha->vha_tgt.qla_tgt; @@ -6632,21 +6589,6 @@ qlt_83xx_iospace_config(struct qla_hw_data *ha) ha->msix_count += 1; /* For ATIO Q */ } -int -qlt_24xx_process_response_error(struct scsi_qla_host *vha, - struct sts_entry_24xx *pkt) -{ - switch (pkt->entry_type) { - case ABTS_RECV_24XX: - case ABTS_RESP_24XX: - case CTIO_TYPE7: - case NOTIFY_ACK_TYPE: - case CTIO_CRC2: - return 1; - default: - return 0; - } -} void qlt_modify_vp_config(struct scsi_qla_host *vha, diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index c328a267c4c3..2120456c4709 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -70,6 +70,16 @@ /* Used to mark CTIO as intermediate */ #define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30 +#define QLA_TGT_NULL_HANDLE 0 + +#define QLA_TGT_HANDLE_MASK 0xF0000000 +#define QLA_QPID_HANDLE_MASK 0x000F0000 /* qpair id mask */ +#define QLA_CMD_HANDLE_MASK 0x0000FFFF +#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~QLA_TGT_HANDLE_MASK) + +#define QLA_QPID_HANDLE_SHIFT 16 +#define GET_QID(_h) ((_h & QLA_QPID_HANDLE_MASK) >> QLA_QPID_HANDLE_SHIFT) + #ifndef OF_SS_MODE_0 /* @@ -664,6 +674,7 @@ struct abts_resp_from_24xx_fw { struct qla_tgt_mgmt_cmd; struct fc_port; +struct qla_tgt_cmd; /* * This structure provides a template of function calls that the @@ -744,11 +755,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *); #define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */ #define QLA_TGT_STATE_PROCESSED 3 /* target done processing */ - -/* Special handles */ -#define QLA_TGT_NULL_HANDLE 0 -#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK) - /* ATIO task_codes field */ #define ATIO_SIMPLE_QUEUE 0 #define ATIO_HEAD_OF_QUEUE 1 @@ -858,6 +864,11 @@ enum trace_flags { }; struct qla_tgt_cmd { + /* + * Do not move cmd_type field. it needs to line up with srb->cmd_type + */ + uint8_t cmd_type; + uint8_t pad[7]; struct se_cmd se_cmd; struct fc_port *sess; int state; @@ -1077,5 +1088,7 @@ extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t, uint8_t, uint8_t, uint8_t); +extern void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *, + struct qla_tgt_cmd *); #endif /* __QLA_TARGET_H */ -- cgit From 82de802ad46e23820f7fcaddc45adde181d95562 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:17 -0700 Subject: scsi: qla2xxx: Preparation for Target MQ. In Current code, Req Q 0, RespQ 0 & hardware_lock are the main resources for sending and process completion of Target IO. These resources are now referenced behind a new qpair/"struct qla_qpair base_qpair". Main path IO handle will access those resources via the qpair pointer in preparation for Target MQ. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_attr.c | 2 +- drivers/scsi/qla2xxx/qla_def.h | 17 +- drivers/scsi/qla2xxx/qla_gbl.h | 15 +- drivers/scsi/qla2xxx/qla_init.c | 11 +- drivers/scsi/qla2xxx/qla_iocb.c | 30 ++- drivers/scsi/qla2xxx/qla_isr.c | 11 +- drivers/scsi/qla2xxx/qla_mid.c | 40 +-- drivers/scsi/qla2xxx/qla_os.c | 23 +- drivers/scsi/qla2xxx/qla_target.c | 518 ++++++++++++++++++++------------------ drivers/scsi/qla2xxx/qla_target.h | 10 +- 10 files changed, 380 insertions(+), 297 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index a93eb42718e5..f0f16d313faf 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2096,7 +2096,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) } if (qos) { - qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx); + qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true); if (!qpair) ql_log(ql_log_warn, vha, 0x7084, "Can't create qpair for VP[%d]\n", diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 1b5049b1ef4a..64109134e276 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3182,6 +3182,9 @@ struct qla_tc_param { #define QLA_PRECONFIG_VPORTS 32 #define QLA_MAX_VPORTS_QLA24XX 128 #define QLA_MAX_VPORTS_QLA25XX 256 + +struct qla_qpair; + /* Response queue data structure */ struct rsp_que { dma_addr_t dma; @@ -3201,6 +3204,7 @@ struct rsp_que { struct qla_msix_entry *msix; struct req_que *req; srb_t *status_srb; /* status continuation entry */ + struct qla_qpair *qpair; dma_addr_t dma_fx00; response_t *ring_fx00; @@ -3241,6 +3245,14 @@ struct req_que { struct qla_qpair { spinlock_t qp_lock; atomic_t ref_count; + + /* + * For qpair 0, qp_lock_ptr will point at hardware_lock due to + * legacy code. For other Qpair(s), it will point at qp_lock. + */ + spinlock_t *qp_lock_ptr; + struct scsi_qla_host *vha; + /* distill these fields down to 'online=0/1' * ha->flags.eeh_busy * ha->flags.pci_channel_io_perm_failure @@ -3252,10 +3264,7 @@ struct qla_qpair { uint32_t delete_in_progress:1; uint16_t id; /* qp number used with FW */ - uint16_t num_active_cmd; /* cmds down at firmware */ - cpumask_t cpu_mask; /* CPU mask for cpu affinity operation */ uint16_t vp_idx; /* vport ID */ - mempool_t *srb_mempool; /* to do: New driver: move queues to here instead of pointers */ @@ -3266,7 +3275,7 @@ struct qla_qpair { struct qla_hw_data *hw; struct work_struct q_work; struct list_head qp_list_elem; /* vha->qp_list */ - struct scsi_qla_host *vha; + uint16_t cpuid; }; /* Place holder for FW buffer parameters */ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 63355f40ff2f..f5493eda0110 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -77,8 +77,7 @@ struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *, enum qla_work_type); extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *); int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e); -extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); -extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *); +extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *); extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); extern fc_port_t * @@ -96,7 +95,7 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *); extern int qla2x00_init_rings(scsi_qla_host_t *); extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *); extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *, - int, int); + int, int, bool); extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *); void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *); int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8); @@ -255,7 +254,8 @@ extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t); extern int qla2xxx_dif_start_scsi_mq(srb_t *); extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); -extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); +extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); +extern void *__qla2x00_alloc_iocbs(struct qla_qpair *, srb_t *); extern int qla2x00_issue_marker(scsi_qla_host_t *, int); extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, uint32_t *, uint16_t, struct qla_tc_param *); @@ -663,9 +663,9 @@ extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *, extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, - uint16_t, int, uint8_t); + uint16_t, int, uint8_t, bool); extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, - uint16_t, struct qla_qpair *); + uint16_t, struct qla_qpair *, bool); extern void qla2x00_init_response_q_entries(struct rsp_que *); extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); @@ -839,7 +839,8 @@ extern int qla_get_exlogin_status(scsi_qla_host_t *, uint16_t *, extern int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr); extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *); extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *); -extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *); +extern void qlt_handle_abts_recv(struct scsi_qla_host *, struct rsp_que *, + response_t *); int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, struct imm_ntfy_from_isp *, int); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 730e7fe4344a..878b552be263 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -7578,7 +7578,8 @@ qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha) return ret; } -struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx) +struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, + int vp_idx, bool startqp) { int rsp_id = 0; int req_id = 0; @@ -7605,6 +7606,8 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v qpair->hw = vha->hw; qpair->vha = vha; + qpair->qp_lock_ptr = &qpair->qp_lock; + spin_lock_init(&qpair->qp_lock); /* Assign available que pair id */ mutex_lock(&ha->mq_lock); @@ -7642,7 +7645,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v mutex_unlock(&ha->mq_lock); /* Create response queue first */ - rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair); + rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp); if (!rsp_id) { ql_log(ql_log_warn, vha, 0x0185, "Failed to create response queue.\n"); @@ -7652,7 +7655,8 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v qpair->rsp = ha->rsp_q_map[rsp_id]; /* Create request queue */ - req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos); + req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos, + startqp); if (!req_id) { ql_log(ql_log_warn, vha, 0x0186, "Failed to create request queue.\n"); @@ -7661,6 +7665,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v qpair->req = ha->req_q_map[req_id]; qpair->rsp->req = qpair->req; + qpair->rsp->qpair = qpair; if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 8404f17f3c6c..6c710313adce 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2109,20 +2109,13 @@ queuing_error: /* Generic Control-SRB manipulation functions. */ /* hardware_lock assumed to be held. */ -void * -qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp) -{ - if (qla2x00_reset_active(vha)) - return NULL; - - return qla2x00_alloc_iocbs(vha, sp); -} void * -qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) +__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) { + scsi_qla_host_t *vha = qpair->vha; struct qla_hw_data *ha = vha->hw; - struct req_que *req = ha->req_q_map[0]; + struct req_que *req = qpair->req; device_reg_t *reg = ISP_QUE_REG(ha, req->id); uint32_t index, handle; request_t *pkt; @@ -2200,6 +2193,23 @@ queuing_error: return pkt; } +void * +qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp) +{ + scsi_qla_host_t *vha = qpair->vha; + + if (qla2x00_reset_active(vha)) + return NULL; + + return __qla2x00_alloc_iocbs(qpair, sp); +} + +void * +qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp) +{ + return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp); +} + static void qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) { diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 8aaddb75f964..1535a29a9d9f 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2653,7 +2653,8 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) int res = DID_ERROR << 16; ql_dbg(ql_dbg_async, vha, 0x502a, - "type of error status in response: 0x%x\n", pkt->entry_status); + "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", + pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); if (que >= ha->max_req_queues || !ha->req_q_map[que]) goto fatal; @@ -2805,7 +2806,8 @@ process_err: case ABTS_RECV_24XX: if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { /* ensure that the ATIO queue is empty */ - qlt_handle_abts_recv(vha, (response_t *)pkt); + qlt_handle_abts_recv(vha, rsp, + (response_t *)pkt); break; } else { /* drop through */ @@ -2814,11 +2816,12 @@ process_err: case ABTS_RESP_24XX: case CTIO_TYPE7: case CTIO_CRC2: - qlt_response_pkt_all_vps(vha, (response_t *)pkt); + qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); break; case NOTIFY_ACK_TYPE: if (pkt->handle == QLA_TGT_SKIP_HANDLE) - qlt_response_pkt_all_vps(vha, (response_t *)pkt); + qlt_response_pkt_all_vps(vha, rsp, + (response_t *)pkt); else qla24xxx_nack_iocb_entry(vha, rsp->req, (struct nack_to_isp *)pkt); diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 09a490c98763..4ad452a42dbe 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -640,7 +640,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) int qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, - uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos) + uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp) { int ret = 0; struct req_que *req = NULL; @@ -731,14 +731,16 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req->ring_ptr, req->ring_index, req->cnt, req->id, req->max_q_depth); - ret = qla25xx_init_req_que(base_vha, req); - if (ret != QLA_SUCCESS) { - ql_log(ql_log_fatal, base_vha, 0x00df, - "%s failed.\n", __func__); - mutex_lock(&ha->mq_lock); - clear_bit(que_id, ha->req_qid_map); - mutex_unlock(&ha->mq_lock); - goto que_failed; + if (startqp) { + ret = qla25xx_init_req_que(base_vha, req); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_fatal, base_vha, 0x00df, + "%s failed.\n", __func__); + mutex_lock(&ha->mq_lock); + clear_bit(que_id, ha->req_qid_map); + mutex_unlock(&ha->mq_lock); + goto que_failed; + } } return req->id; @@ -765,7 +767,7 @@ static void qla_do_work(struct work_struct *work) /* create response queue */ int qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, - uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair) + uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp) { int ret = 0; struct rsp_que *rsp = NULL; @@ -843,14 +845,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, if (ret) goto que_failed; - ret = qla25xx_init_rsp_que(base_vha, rsp); - if (ret != QLA_SUCCESS) { - ql_log(ql_log_fatal, base_vha, 0x00e7, - "%s failed.\n", __func__); - mutex_lock(&ha->mq_lock); - clear_bit(que_id, ha->rsp_qid_map); - mutex_unlock(&ha->mq_lock); - goto que_failed; + if (startqp) { + ret = qla25xx_init_rsp_que(base_vha, rsp); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_fatal, base_vha, 0x00e7, + "%s failed.\n", __func__); + mutex_lock(&ha->mq_lock); + clear_bit(que_id, ha->rsp_qid_map); + mutex_unlock(&ha->mq_lock); + goto que_failed; + } } rsp->req = NULL; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index d92e65b40c44..82bbb6432f77 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -389,6 +389,13 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, ha->base_qpair->rsp = rsp; } + rsp->qpair = ha->base_qpair; + rsp->req = req; + ha->base_qpair->vha = vha; + ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; + ha->queue_pair_map[0] = ha->base_qpair; + set_bit(0, ha->qpair_qid_map); + /* * Make sure we record at least the request and response queue zero in * case we need to free them if part of the probe fails. @@ -399,9 +406,10 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, set_bit(0, ha->req_qid_map); return 1; -fail_base_qpair: - kfree(ha->queue_pair_map); fail_qpair_map: + kfree(ha->base_qpair); + ha->base_qpair = NULL; +fail_base_qpair: kfree(ha->rsp_q_map); ha->rsp_q_map = NULL; fail_rsp_map: @@ -451,6 +459,15 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) int cnt; unsigned long flags; + if (ha->queue_pair_map) { + kfree(ha->queue_pair_map); + ha->queue_pair_map = NULL; + } + if (ha->base_qpair) { + kfree(ha->base_qpair); + ha->base_qpair = NULL; + } + spin_lock_irqsave(&ha->hardware_lock, flags); for (cnt = 0; cnt < ha->max_req_queues; cnt++) { if (!test_bit(cnt, ha->req_qid_map)) @@ -3113,7 +3130,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Create start of day qpairs for Block MQ */ if (shost_use_blk_mq(host)) { for (i = 0; i < ha->max_qpairs; i++) - qla2xxx_create_qpair(base_vha, 5, 0); + qla2xxx_create_qpair(base_vha, 5, 0, true); } } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index a18f81046959..22f9bb59a98d 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -110,16 +110,17 @@ enum fcp_resp_rsp_codes { /* Predefs for callbacks handed to qla2xxx LLD */ static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, struct atio_from_isp *pkt, uint8_t); -static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); +static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp, + response_t *pkt); static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, int fn, void *iocb, int flags); -static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd +static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint16_t status, int qfull); static void qlt_disable_vha(struct scsi_qla_host *vha); static void qlt_clear_tgt_db(struct qla_tgt *tgt); -static void qlt_send_notify_ack(struct scsi_qla_host *vha, +static void qlt_send_notify_ack(struct qla_qpair *qpair, struct imm_ntfy_from_isp *ntfy, uint32_t add_flags, uint16_t resp_code, int resp_code_valid, uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); @@ -130,6 +131,8 @@ static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, void qlt_unreg_sess(struct fc_port *sess); static void qlt_24xx_handle_abts(struct scsi_qla_host *, struct abts_recv_from_24xx *); +static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *, + uint16_t); /* * Global Variables @@ -243,7 +246,7 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, - struct atio_from_isp *atio, uint8_t ha_locked) + struct atio_from_isp *atio, uint8_t ha_locked) { struct qla_tgt_sess_op *u; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; @@ -274,7 +277,7 @@ out: return; out_term: - qlt_send_term_exchange(vha, NULL, atio, ha_locked, 0); + qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0); goto out; } @@ -292,8 +295,8 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, ql_dbg(ql_dbg_async, vha, 0x502e, "Freeing unknown %s %p, because of Abort\n", "ATIO_TYPE7", u); - qlt_send_term_exchange(vha, NULL, &u->atio, - ha_locked, 0); + qlt_send_term_exchange(vha->hw->base_qpair, NULL, + &u->atio, ha_locked, 0); goto abort; } @@ -306,8 +309,8 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, ql_dbg(ql_dbg_async, vha, 0x503a, "Freeing unknown %s %p, because tgt is being stopped\n", "ATIO_TYPE7", u); - qlt_send_term_exchange(vha, NULL, &u->atio, - ha_locked, 0); + qlt_send_term_exchange(vha->hw->base_qpair, NULL, + &u->atio, ha_locked, 0); } else { ql_dbg(ql_dbg_async, vha, 0x503d, "Reschedule u %p, vha %p, host %p\n", u, vha, host); @@ -373,6 +376,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *entry = (struct imm_ntfy_from_isp *)atio; + qlt_issue_marker(vha, ha_locked); + if ((entry->u.isp24.vp_index != 0xFF) && (entry->u.isp24.nport_handle != 0xFFFF)) { host = qlt_find_host_by_vp_idx(vha, @@ -430,7 +435,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, return false; } -void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) +void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, + struct rsp_que *rsp, response_t *pkt) { switch (pkt->entry_type) { case CTIO_CRC2: @@ -449,7 +455,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) vha->vp_idx, entry->vp_index); break; } - qlt_response_pkt(host, pkt); + qlt_response_pkt(host, rsp, pkt); break; } @@ -467,7 +473,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) vha->vp_idx, entry->u.isp24.vp_index); break; } - qlt_response_pkt(host, pkt); + qlt_response_pkt(host, rsp, pkt); break; } @@ -489,7 +495,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) break; } } - qlt_response_pkt(host, pkt); + qlt_response_pkt(host, rsp, pkt); break; } @@ -506,7 +512,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) "vp_index %d\n", vha->vp_idx, entry->vp_index); break; } - qlt_response_pkt(host, pkt); + qlt_response_pkt(host, rsp, pkt); break; } @@ -523,12 +529,12 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) "vp_index %d\n", vha->vp_idx, entry->vp_index); break; } - qlt_response_pkt(host, pkt); + qlt_response_pkt(host, rsp, pkt); break; } default: - qlt_response_pkt(vha, pkt); + qlt_response_pkt(vha, rsp, pkt); break; } @@ -1560,11 +1566,12 @@ static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ -static void qlt_send_notify_ack(struct scsi_qla_host *vha, +static void qlt_send_notify_ack(struct qla_qpair *qpair, struct imm_ntfy_from_isp *ntfy, uint32_t add_flags, uint16_t resp_code, int resp_code_valid, uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) { + struct scsi_qla_host *vha = qpair->vha; struct qla_hw_data *ha = vha->hw; request_t *pkt; struct nack_to_isp *nack; @@ -1574,11 +1581,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); - /* Send marker if required */ - if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) - return; - - pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); + pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); if (!pkt) { ql_dbg(ql_dbg_tgt, vha, 0xe049, "qla_target(%d): %s failed: unable to allocate " @@ -1619,16 +1622,17 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(vha, vha->req); + qla2x00_start_iocbs(vha, qpair->req); } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ -static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, +static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, struct abts_recv_from_24xx *abts, uint32_t status, bool ids_reversed) { + struct scsi_qla_host *vha = qpair->vha; struct qla_hw_data *ha = vha->hw; struct abts_resp_to_24xx *resp; uint32_t f_ctl; @@ -1638,11 +1642,8 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", ha, abts, status); - /* Send marker if required */ - if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) - return; - - resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL); + resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, + NULL); if (!resp) { ql_dbg(ql_dbg_tgt, vha, 0xe04a, "qla_target(%d): %s failed: unable to allocate " @@ -1698,7 +1699,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(vha, vha->req); + qla2x00_start_iocbs(vha, qpair->req); } /* @@ -1711,11 +1712,9 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt, vha, 0xe007, "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); - /* Send marker if required */ - if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) - return; - ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL); + ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready( + vha->hw->base_qpair, NULL); if (ctio == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe04b, "qla_target(%d): %s failed: unable to allocate " @@ -1746,7 +1745,8 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, wmb(); qla2x00_start_iocbs(vha, vha->req); - qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, + qlt_24xx_send_abts_resp(vha->hw->base_qpair, + (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true); } @@ -1861,7 +1861,8 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, if (!found_lun) { if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { /* send TASK_ABORT response immediately */ - qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false); + qlt_24xx_send_abts_resp(ha->base_qpair, abts, + FCP_TMF_CMPL, false); return 0; } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081, @@ -1889,6 +1890,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); mcmd->reset_count = vha->hw->chip_reset; mcmd->tmr_func = QLA_TGT_ABTS; + mcmd->qpair = ha->base_qpair; rc = ha->tgt.tgt_ops->handle_tmr(mcmd, cmd->unpacked_lun, mcmd->tmr_func, abts->exchange_addr_to_abort); @@ -1920,7 +1922,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, "qla_target(%d): ABTS: Abort Sequence not " "supported\n", vha->vp_idx); - qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); + qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, + false); return; } @@ -1928,7 +1931,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, "qla_target(%d): ABTS: Unknown Exchange " "Address received\n", vha->vp_idx); - qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); + qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, + false); return; } @@ -1954,8 +1958,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); if (rc != 0) { - qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, - false); + qlt_24xx_send_abts_resp(ha->base_qpair, abts, + FCP_TMF_REJECTED, false); } return; } @@ -1963,7 +1967,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, if (sess->deleted) { - qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); + qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, + false); return; } @@ -1972,7 +1977,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", vha->vp_idx, rc); - qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); + qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, + false); return; } } @@ -1980,9 +1986,10 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ -static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, +static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) { + struct scsi_qla_host *ha = qpair->vha; struct atio_from_isp *atio = &mcmd->orig_iocb.atio; struct ctio7_to_24xx *ctio; uint16_t temp; @@ -1991,11 +1998,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", ha, atio, resp_code); - /* Send marker if required */ - if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) - return; - ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); + ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL); if (ctio == NULL) { ql_dbg(ql_dbg_tgt, ha, 0xe04c, "qla_target(%d): %s failed: unable to allocate " @@ -2025,7 +2029,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(ha, ha->req); + qla2x00_start_iocbs(ha, qpair->req); } void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) @@ -2105,12 +2109,13 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) struct scsi_qla_host *vha = mcmd->sess->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags; + struct qla_qpair *qpair = mcmd->qpair; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, "TM response mcmd (%p) status %#x state %#x", mcmd, mcmd->fc_tm_rsp, mcmd->flags); - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(qpair->qp_lock_ptr, flags); if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) { /* @@ -2122,7 +2127,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) vha->flags.online, qla2x00_reset_active(vha), mcmd->reset_count, ha->chip_reset); ha->tgt.tgt_ops->free_mcmd(mcmd); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return; } @@ -2139,15 +2144,15 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) mcmd->flags); qlt_schedule_sess_for_deletion_lock(mcmd->sess); } else { - qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, - 0, 0, 0, 0, 0, 0); + qlt_send_notify_ack(vha->hw->base_qpair, + &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); } } else { if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) - qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, + qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts, mcmd->fc_tm_rsp, false); else - qlt_24xx_send_task_mgmt_ctio(vha, mcmd, + qlt_24xx_send_task_mgmt_ctio(qpair, mcmd, mcmd->fc_tm_rsp); } /* @@ -2159,7 +2164,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) * qlt_xmit_tm_rsp() returns here.. */ ha->tgt.tgt_ops->free_mcmd(mcmd); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); } EXPORT_SYMBOL(qlt_xmit_tm_rsp); @@ -2247,25 +2252,25 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); } -static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, +static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t req_cnt) { uint32_t cnt; + struct req_que *req = qpair->req; - if (vha->req->cnt < (req_cnt + 2)) { - cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out); + if (req->cnt < (req_cnt + 2)) { + cnt = (uint16_t)RD_REG_DWORD(req->req_q_out); - if (vha->req->ring_index < cnt) - vha->req->cnt = cnt - vha->req->ring_index; + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; else - vha->req->cnt = vha->req->length - - (vha->req->ring_index - cnt); + req->cnt = req->length - (req->ring_index - cnt); - if (unlikely(vha->req->cnt < (req_cnt + 2))) + if (unlikely(req->cnt < (req_cnt + 2))) return -EAGAIN; } - vha->req->cnt -= req_cnt; + req->cnt -= req_cnt; return 0; } @@ -2273,26 +2278,27 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ -static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) +static inline void *qlt_get_req_pkt(struct req_que *req) { /* Adjust ring index. */ - vha->req->ring_index++; - if (vha->req->ring_index == vha->req->length) { - vha->req->ring_index = 0; - vha->req->ring_ptr = vha->req->ring; + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; } else { - vha->req->ring_ptr++; + req->ring_ptr++; } - return (cont_entry_t *)vha->req->ring_ptr; + return (cont_entry_t *)req->ring_ptr; } /* ha->hardware_lock supposed to be held on entry */ -static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) +static inline uint32_t qlt_make_handle(struct qla_qpair *qpair) { + struct scsi_qla_host *vha = qpair->vha; uint32_t h; int index; uint8_t found = 0; - struct req_que *req = vha->req; + struct req_que *req = qpair->req; h = req->current_outstanding_cmd; @@ -2323,15 +2329,16 @@ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) } /* ha->hardware_lock supposed to be held on entry */ -static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, - struct scsi_qla_host *vha) +static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, + struct qla_tgt_prm *prm) { uint32_t h; struct ctio7_to_24xx *pkt; struct atio_from_isp *atio = &prm->cmd->atio; uint16_t temp; + struct scsi_qla_host *vha = prm->cmd->vha; - pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; + pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; prm->pkt = pkt; memset(pkt, 0, sizeof(*pkt)); @@ -2339,7 +2346,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, pkt->entry_count = (uint8_t)prm->req_cnt; pkt->vp_index = vha->vp_idx; - h = qlt_make_handle(vha); + h = qlt_make_handle(qpair); if (unlikely(h == QLA_TGT_NULL_HANDLE)) { /* * CTIO type 7 from the firmware doesn't provide a way to @@ -2351,8 +2358,9 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, vha->req->outstanding_cmds[h] = (srb_t *)prm->cmd; } - pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; - pkt->nport_handle = prm->cmd->loop_id; + pkt->handle = MAKE_HANDLE(qpair->req->id, h); + pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; + pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; @@ -2381,7 +2389,8 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, /* Build continuation packets */ while (prm->seg_cnt > 0) { cont_a64_entry_t *cont_pkt64 = - (cont_a64_entry_t *)qlt_get_req_pkt(vha); + (cont_a64_entry_t *)qlt_get_req_pkt( + prm->cmd->qpair->req); /* * Make sure that from cont_pkt64 none of @@ -2546,10 +2555,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, prm->req_cnt = 1; prm->add_status_pkt = 0; - /* Send marker if required */ - if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) - return -EFAULT; - if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { if (qlt_pci_map_calc_cnt(prm) != 0) return -EAGAIN; @@ -2791,7 +2796,7 @@ qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, } static inline int -qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) +qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) { uint32_t *cur_dsd; uint32_t transfer_length = 0; @@ -2810,10 +2815,11 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) struct atio_from_isp *atio = &prm->cmd->atio; struct qla_tc_param tc; uint16_t t16; + scsi_qla_host_t *vha = cmd->vha; ha = vha->hw; - pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr; + pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr; prm->pkt = pkt; memset(pkt, 0, sizeof(*pkt)); @@ -2884,7 +2890,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) pkt->entry_count = 1; pkt->vp_index = vha->vp_idx; - h = qlt_make_handle(vha); + h = qlt_make_handle(qpair); if (unlikely(h == QLA_TGT_NULL_HANDLE)) { /* * CTIO type 7 from the firmware doesn't provide a way to @@ -2893,9 +2899,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) */ return -EAGAIN; } else - vha->req->outstanding_cmds[h] = (srb_t *)prm->cmd; + qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; - pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; + pkt->handle = MAKE_HANDLE(qpair->req->id, h); + pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; @@ -2999,7 +3006,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) crc_queuing_error: /* Cleanup will be performed by the caller */ - vha->req->outstanding_cmds[h] = NULL; + qpair->req->outstanding_cmds[h] = NULL; return QLA_FUNCTION_FAILED; } @@ -3013,32 +3020,30 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, { struct scsi_qla_host *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; + struct qla_qpair *qpair = cmd->qpair; struct ctio7_to_24xx *pkt; struct qla_tgt_prm prm; uint32_t full_req_cnt = 0; unsigned long flags = 0; int res; - spin_lock_irqsave(&ha->hardware_lock, flags); if (cmd->sess && cmd->sess->deleted) { cmd->state = QLA_TGT_STATE_PROCESSED; if (cmd->sess->logout_completed) /* no need to terminate. FW already freed exchange. */ qlt_abort_cmd_on_host_reset(cmd->vha, cmd); else - qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0); return 0; } - spin_unlock_irqrestore(&ha->hardware_lock, flags); memset(&prm, 0, sizeof(prm)); ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, - "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", + "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, - &cmd->se_cmd); + &cmd->se_cmd, qpair->id); res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); @@ -3046,7 +3051,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, return res; } - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(qpair->qp_lock_ptr, flags); if (xmit_type == QLA_TGT_XMIT_STATUS) vha->tgt_counters.core_qla_snd_status++; @@ -3064,21 +3069,21 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, ha->chip_reset); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return 0; } /* Does F/W have an IOCBs for this request */ - res = qlt_check_reserve_free_req(vha, full_req_cnt); + res = qlt_check_reserve_free_req(qpair, full_req_cnt); if (unlikely(res)) goto out_unmap_unlock; if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) - res = qlt_build_ctio_crc2_pkt(&prm, vha); + res = qlt_build_ctio_crc2_pkt(qpair, &prm); else - res = qlt_24xx_build_ctio_pkt(&prm, vha); + res = qlt_24xx_build_ctio_pkt(qpair, &prm); if (unlikely(res != 0)) { - vha->req->cnt += full_req_cnt; + qpair->req->cnt += full_req_cnt; goto out_unmap_unlock; } @@ -3115,9 +3120,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, * req_pkt(). */ struct ctio7_to_24xx *ctio = - (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); + (struct ctio7_to_24xx *)qlt_get_req_pkt( + qpair->req); - ql_dbg(ql_dbg_io, vha, 0x305e, + ql_dbg(ql_dbg_tgt, vha, 0x305e, "Building additional status packet 0x%p.\n", ctio); @@ -3155,14 +3161,14 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(vha, vha->req); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + qla2x00_start_iocbs(vha, qpair->req); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return 0; out_unmap_unlock: qlt_unmap_sg(vha, cmd); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return res; } @@ -3175,8 +3181,9 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = cmd->tgt; struct qla_tgt_prm prm; - unsigned long flags; + unsigned long flags = 0; int res = 0; + struct qla_qpair *qpair = cmd->qpair; memset(&prm, 0, sizeof(prm)); prm.cmd = cmd; @@ -3184,16 +3191,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) prm.sg = NULL; prm.req_cnt = 1; - /* Send marker if required */ - if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) - return -EIO; - /* Calculate number of entries and segments required */ if (qlt_pci_map_calc_cnt(&prm) != 0) return -EAGAIN; - spin_lock_irqsave(&ha->hardware_lock, flags); - if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) || (cmd->sess && cmd->sess->deleted)) { /* @@ -3206,21 +3207,21 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, ha->chip_reset); - spin_unlock_irqrestore(&ha->hardware_lock, flags); return 0; } + spin_lock_irqsave(qpair->qp_lock_ptr, flags); /* Does F/W have an IOCBs for this request */ - res = qlt_check_reserve_free_req(vha, prm.req_cnt); + res = qlt_check_reserve_free_req(qpair, prm.req_cnt); if (res != 0) goto out_unlock_free_unmap; if (cmd->se_cmd.prot_op) - res = qlt_build_ctio_crc2_pkt(&prm, vha); + res = qlt_build_ctio_crc2_pkt(qpair, &prm); else - res = qlt_24xx_build_ctio_pkt(&prm, vha); + res = qlt_24xx_build_ctio_pkt(qpair, &prm); if (unlikely(res != 0)) { - vha->req->cnt += prm.req_cnt; + qpair->req->cnt += prm.req_cnt; goto out_unlock_free_unmap; } @@ -3236,14 +3237,14 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(vha, vha->req); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + qla2x00_start_iocbs(vha, qpair->req); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return res; out_unlock_free_unmap: qlt_unmap_sg(vha, cmd); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return res; } @@ -3408,9 +3409,6 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, unsigned long flags = 0; int rc; - if (qlt_issue_marker(vha, ha_locked) < 0) - return; - if (ha_locked) { rc = __qlt_send_term_imm_notif(vha, imm); @@ -3441,10 +3439,11 @@ done: * If hardware_lock held on entry, might drop it, then reaquire * This function sends the appropriate CTIO to ISP 2xxx or 24xx */ -static int __qlt_send_term_exchange(struct scsi_qla_host *vha, +static int __qlt_send_term_exchange(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio) { + struct scsi_qla_host *vha = qpair->vha; struct ctio7_to_24xx *ctio24; struct qla_hw_data *ha = vha->hw; request_t *pkt; @@ -3453,7 +3452,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); - pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); + pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL); if (pkt == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe050, "qla_target(%d): %s failed: unable to allocate " @@ -3499,28 +3498,32 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(vha, vha->req); + qla2x00_start_iocbs(vha, qpair->req); return ret; } -static void qlt_send_term_exchange(struct scsi_qla_host *vha, +static void qlt_send_term_exchange(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort) { + struct scsi_qla_host *vha; unsigned long flags = 0; int rc; - if (qlt_issue_marker(vha, ha_locked) < 0) - return; + /* why use different vha? NPIV */ + if (cmd) + vha = cmd->vha; + else + vha = qpair->vha; if (ha_locked) { - rc = __qlt_send_term_exchange(vha, cmd, atio); + rc = __qlt_send_term_exchange(qpair, cmd, atio); if (rc == -ENOMEM) qlt_alloc_qfull_cmd(vha, atio, 0, 0); goto done; } - spin_lock_irqsave(&vha->hw->hardware_lock, flags); - rc = __qlt_send_term_exchange(vha, cmd, atio); + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + rc = __qlt_send_term_exchange(qpair, cmd, atio); if (rc == -ENOMEM) qlt_alloc_qfull_cmd(vha, atio, 0, 0); @@ -3532,7 +3535,7 @@ done: } if (!ha_locked) - spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return; } @@ -3614,7 +3617,7 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd) cmd->trc_flags |= TRC_ABORT; spin_unlock_irqrestore(&cmd->cmd_lock, flags); - qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1); + qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1); return 0; } EXPORT_SYMBOL(qlt_abort_cmd); @@ -3653,10 +3656,11 @@ EXPORT_SYMBOL(qlt_free_cmd); /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ -static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, +static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, struct qla_tgt_cmd *cmd, uint32_t status) { int term = 0; + struct scsi_qla_host *vha = qpair->vha; if (cmd->se_cmd.prot_op) ql_dbg(ql_dbg_tgt_dif, vha, 0xe013, @@ -3676,7 +3680,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, term = 1; if (term) - qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); + qlt_term_ctio_exchange(qpair, ctio, cmd, status); return term; } @@ -3684,35 +3688,45 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, /* ha->hardware_lock supposed to be held on entry */ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, - uint32_t handle, void *ctio) + struct rsp_que *rsp, uint32_t handle, void *ctio) { struct qla_tgt_cmd *cmd = NULL; - struct req_que *req = vha->req; + struct req_que *req; + int qid = GET_QID(handle); + uint32_t h = handle & ~QLA_TGT_HANDLE_MASK; - /* Clear out internal marks */ - handle &= ~QLA_TGT_HANDLE_MASK; + if (unlikely(h == QLA_TGT_SKIP_HANDLE)) + return NULL; - if (handle != QLA_TGT_NULL_HANDLE) { - if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) - return NULL; + if (qid == rsp->req->id) { + req = rsp->req; + } else if (vha->hw->req_q_map[qid]) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a, + "qla_target(%d): CTIO completion with different QID %d handle %x\n", + vha->vp_idx, rsp->id, handle); + req = vha->hw->req_q_map[qid]; + } else { + return NULL; + } - handle &= QLA_CMD_HANDLE_MASK; + h &= QLA_CMD_HANDLE_MASK; - if (unlikely(handle > req->num_outstanding_cmds)) { + if (h != QLA_TGT_NULL_HANDLE) { + if (unlikely(h > req->num_outstanding_cmds)) { ql_dbg(ql_dbg_tgt, vha, 0xe052, "qla_target(%d): Wrong handle %x received\n", vha->vp_idx, handle); return NULL; } - cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[handle]; - if (unlikely((cmd == NULL) || - (cmd->cmd_type != TYPE_TGT_CMD))) { + + cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h]; + if (unlikely(cmd == NULL)) { ql_dbg(ql_dbg_async, vha, 0xe053, - "qla_target(%d): Suspicious: unable to find the command with handle %x cmd %p\n", - vha->vp_idx, handle, cmd); + "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", + vha->vp_idx, handle, req->id, rsp->id); return NULL; } - req->outstanding_cmds[handle] = NULL; + req->outstanding_cmds[h] = NULL; } else if (ctio != NULL) { /* We can't get loop ID from CTIO7 */ ql_dbg(ql_dbg_tgt, vha, 0xe054, @@ -3729,29 +3743,26 @@ void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) { struct qla_hw_data *ha = vha->hw; - uint32_t handle; if (cmd->sg_mapped) qlt_unmap_sg(vha, cmd); - handle = qlt_make_handle(vha); - /* TODO: fix debug message type and ids. */ if (cmd->state == QLA_TGT_STATE_PROCESSED) { ql_dbg(ql_dbg_io, vha, 0xff00, - "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle); + "HOST-ABORT: state=PROCESSED.\n"); } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { cmd->write_data_transferred = 0; cmd->state = QLA_TGT_STATE_DATA_IN; ql_dbg(ql_dbg_io, vha, 0xff01, - "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle); + "HOST-ABORT: state=DATA_IN.\n"); ha->tgt.tgt_ops->handle_data(cmd); return; } else { ql_dbg(ql_dbg_io, vha, 0xff03, - "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle, + "HOST-ABORT: state=BAD(%d).\n", cmd->state); dump_stack(); } @@ -3763,12 +3774,13 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ -static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, - uint32_t status, void *ctio) +static void qlt_do_ctio_completion(struct scsi_qla_host *vha, + struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio) { struct qla_hw_data *ha = vha->hw; struct se_cmd *se_cmd; struct qla_tgt_cmd *cmd; + struct qla_qpair *qpair = rsp->qpair; if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { /* That could happen only in case of an error/reset/abort */ @@ -3780,7 +3792,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, return; } - cmd = qlt_ctio_to_cmd(vha, handle, ctio); + cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio); if (cmd == NULL) return; @@ -3864,7 +3876,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && (!cmd->aborted)) { cmd->trc_flags |= TRC_CTIO_ERR; - if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) + if (qlt_term_ctio_exchange(qpair, ctio, cmd, status)) return; } } @@ -3947,6 +3959,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) unsigned long flags; uint32_t data_length; int ret, fcp_task_attr, data_dir, bidi = 0; + struct qla_qpair *qpair = cmd->qpair; cmd->cmd_in_wq = 0; cmd->trc_flags |= TRC_DO_WORK; @@ -4002,12 +4015,12 @@ out_term: * argument to qlt_send_term_exchange() and free the memory here. */ cmd->trc_flags |= TRC_DO_WORK_ERR; - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0); + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); qlt_decr_num_pend_cmds(vha); percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); spin_lock_irqsave(&ha->tgt.sess_lock, flags); ha->tgt.tgt_ops->put_sess(sess); @@ -4056,13 +4069,12 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, cmd->jiffies_at_alloc = get_jiffies_64(); cmd->reset_count = vha->hw->chip_reset; + cmd->qpair = vha->hw->base_qpair; + cmd->se_cmd.cpuid = cmd->qpair->cpuid; return cmd; } -static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *, - uint16_t); - static void qlt_create_sess_from_atio(struct work_struct *work) { struct qla_tgt_sess_op *op = container_of(work, @@ -4108,10 +4120,15 @@ static void qlt_create_sess_from_atio(struct work_struct *work) */ cmd = qlt_get_tag(vha, sess, &op->atio); if (!cmd) { - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); + struct qla_qpair *qpair = ha->base_qpair; + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + qlt_send_busy(qpair, &op->atio, SAM_STAT_BUSY); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); kfree(op); return; } @@ -4124,9 +4141,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work) kfree(op); return; out_term: - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + qlt_send_term_exchange(vha->hw->base_qpair, NULL, &op->atio, 0, 0); kfree(op); } @@ -4197,8 +4212,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, cmd->cmd_in_wq = 1; cmd->trc_flags |= TRC_NEW_CMD; - cmd->se_cmd.cpuid = ha->msix_count ? - ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND; spin_lock_irqsave(&vha->cmd_list_lock, flags); list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); @@ -4215,8 +4228,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, } else { queue_work(qla_tgt_wq, &cmd->work); } - return 0; + return 0; } /* ha->hardware_lock supposed to be held on entry */ @@ -4247,6 +4260,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, mcmd->tmr_func = fn; mcmd->flags = flags; mcmd->reset_count = vha->hw->chip_reset; + mcmd->qpair = ha->base_qpair; switch (fn) { case QLA_TGT_LUN_RESET: @@ -4330,6 +4344,7 @@ static int __qlt_abort_task(struct scsi_qla_host *vha, scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); mcmd->reset_count = vha->hw->chip_reset; mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; + mcmd->qpair = ha->base_qpair; rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, le16_to_cpu(iocb->u.isp2x.seq_id)); @@ -4756,8 +4771,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; if (tgt->link_reinit_iocb_pending) { - qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, - 0, 0, 0, 0, 0, 0); + qlt_send_notify_ack(ha->base_qpair, + &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0; } @@ -4820,8 +4835,8 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha, le16_to_cpu(iocb->u.isp24.nport_handle), iocb->u.isp24.status_subcode); if (tgt->link_reinit_iocb_pending) { - qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, - 0, 0, 0, 0, 0, 0); + qlt_send_notify_ack(ha->base_qpair, + &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); } memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); tgt->link_reinit_iocb_pending = 1; @@ -4915,16 +4930,18 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha, } if (send_notify_ack) - qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); + qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0, + 0, 0); } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire * This function sends busy to ISP 2xxx or 24xx. */ -static int __qlt_send_busy(struct scsi_qla_host *vha, +static int __qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, uint16_t status) { + struct scsi_qla_host *vha = qpair->vha; struct ctio7_to_24xx *ctio24; struct qla_hw_data *ha = vha->hw; request_t *pkt; @@ -4937,12 +4954,12 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, atio->u.isp24.fcp_hdr.s_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); if (!sess) { - qlt_send_term_exchange(vha, NULL, atio, 1, 0); + qlt_send_term_exchange(qpair, NULL, atio, 1, 0); return 0; } /* Sending marker isn't necessary, since we called from ISR */ - pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); + pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); if (!pkt) { ql_dbg(ql_dbg_io, vha, 0x3063, "qla_target(%d): %s failed: unable to allocate " @@ -4975,7 +4992,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, ctio24->u.status1.scsi_status = cpu_to_le16(status); /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(vha, vha->req); + qla2x00_start_iocbs(vha, qpair->req); return 0; } @@ -4994,6 +5011,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, struct se_session *se_sess; struct qla_tgt_cmd *cmd; int tag; + unsigned long flags; if (unlikely(tgt->tgt_stop)) { ql_dbg(ql_dbg_io, vha, 0x300a, @@ -5054,6 +5072,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, cmd->vha = vha; cmd->reset_count = vha->hw->chip_reset; cmd->q_full = 1; + cmd->qpair = ha->base_qpair; if (qfull) { cmd->q_full = 1; @@ -5062,6 +5081,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, } else cmd->term_exchg = 1; + spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); vha->hw->tgt.num_qfull_cmds_alloc++; @@ -5069,35 +5089,41 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, vha->qla_stats.stat_max_qfull_cmds_alloc) vha->qla_stats.stat_max_qfull_cmds_alloc = vha->hw->tgt.num_qfull_cmds_alloc; + spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } int -qlt_free_qfull_cmds(struct scsi_qla_host *vha) +qlt_free_qfull_cmds(struct qla_qpair *qpair) { + struct scsi_qla_host *vha = qpair->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags; struct qla_tgt_cmd *cmd, *tcmd; - struct list_head free_list; + struct list_head free_list, q_full_list; int rc = 0; if (list_empty(&ha->tgt.q_full_list)) return 0; INIT_LIST_HEAD(&free_list); + INIT_LIST_HEAD(&q_full_list); - spin_lock_irqsave(&vha->hw->hardware_lock, flags); - + spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); if (list_empty(&ha->tgt.q_full_list)) { - spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); return 0; } - list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) { + list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list); + spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) { if (cmd->q_full) /* cmd->state is a borrowed field to hold status */ - rc = __qlt_send_busy(vha, &cmd->atio, cmd->state); + rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state); else if (cmd->term_exchg) - rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio); + rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio); if (rc == -ENOMEM) break; @@ -5121,7 +5147,7 @@ qlt_free_qfull_cmds(struct scsi_qla_host *vha) /* piggy back on hardware_lock for protection */ vha->hw->tgt.num_qfull_cmds_alloc--; } - spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); cmd = NULL; @@ -5132,23 +5158,31 @@ qlt_free_qfull_cmds(struct scsi_qla_host *vha) */ qlt_free_cmd(cmd); } + + if (!list_empty(&q_full_list)) { + spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); + list_splice(&q_full_list, &vha->hw->tgt.q_full_list); + spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); + } + return rc; } static void -qlt_send_busy(struct scsi_qla_host *vha, - struct atio_from_isp *atio, uint16_t status) +qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, + uint16_t status) { int rc = 0; + struct scsi_qla_host *vha = qpair->vha; - rc = __qlt_send_busy(vha, atio, status); + rc = __qlt_send_busy(qpair, atio, status); if (rc == -ENOMEM) qlt_alloc_qfull_cmd(vha, atio, status, 1); } static int -qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, - struct atio_from_isp *atio, bool ha_locked) +qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair, + struct atio_from_isp *atio, uint8_t ha_locked) { struct qla_hw_data *ha = vha->hw; uint16_t status; @@ -5160,7 +5194,7 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, if (!ha_locked) spin_lock_irqsave(&ha->hardware_lock, flags); status = temp_sam_status; - qlt_send_busy(vha, atio, status); + qlt_send_busy(qpair, atio, status); if (!ha_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -5199,16 +5233,17 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, "sending QUEUE_FULL\n", vha->vp_idx); if (!ha_locked) spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); + qlt_send_busy(ha->base_qpair, atio, + SAM_STAT_TASK_SET_FULL); if (!ha_locked) - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->hardware_lock, + flags); break; } - - if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { - rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked); + rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair, + atio, ha_locked); if (rc != 0) { tgt->atio_irq_cmd_count--; return; @@ -5220,19 +5255,19 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, if (unlikely(rc != 0)) { if (rc == -ESRCH) { if (!ha_locked) - spin_lock_irqsave - (&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->hardware_lock, + flags); #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ - qlt_send_busy(vha, atio, SAM_STAT_BUSY); + qlt_send_busy(ha->base_qpair, atio, + SAM_STAT_BUSY); #else - qlt_send_term_exchange(vha, NULL, atio, 1, 0); + qlt_send_term_exchange(ha->base_qpair, NULL, + atio, 1, 0); #endif - if (!ha_locked) - spin_unlock_irqrestore - (&ha->hardware_lock, flags); - + spin_unlock_irqrestore( + &ha->hardware_lock, flags); } else { if (tgt->tgt_stop) { ql_dbg(ql_dbg_tgt, vha, 0xe059, @@ -5247,7 +5282,8 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, if (!ha_locked) spin_lock_irqsave( &ha->hardware_lock, flags); - qlt_send_busy(vha, atio, SAM_STAT_BUSY); + qlt_send_busy(ha->base_qpair, + atio, SAM_STAT_BUSY); if (!ha_locked) spin_unlock_irqrestore( &ha->hardware_lock, flags); @@ -5288,7 +5324,8 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, /* ha->hardware_lock supposed to be held on entry */ /* called via callback from qla2xxx */ -static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) +static void qlt_response_pkt(struct scsi_qla_host *vha, + struct rsp_que *rsp, response_t *pkt) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; @@ -5310,7 +5347,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; - qlt_do_ctio_completion(vha, entry->handle, + qlt_do_ctio_completion(vha, rsp, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); break; @@ -5329,7 +5366,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) break; } - rc = qlt_chk_qfull_thresh_hold(vha, atio, true); + rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1); if (rc != 0) return; @@ -5337,9 +5374,9 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) if (unlikely(rc != 0)) { if (rc == -ESRCH) { #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ - qlt_send_busy(vha, atio, 0); + qlt_send_busy(rsp->qpair, atio, 0); #else - qlt_send_term_exchange(vha, NULL, atio, 1, 0); + qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0); #endif } else { if (tgt->tgt_stop) { @@ -5347,14 +5384,14 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) "qla_target: Unable to send " "command to target, sending TERM " "EXCHANGE for rsp\n"); - qlt_send_term_exchange(vha, NULL, + qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0); } else { ql_dbg(ql_dbg_tgt, vha, 0xe060, "qla_target(%d): Unable to send " "command to target, sending BUSY " "status\n", vha->vp_idx); - qlt_send_busy(vha, atio, 0); + qlt_send_busy(rsp->qpair, atio, 0); } } } @@ -5364,7 +5401,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) case CONTINUE_TGT_IO_TYPE: { struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; - qlt_do_ctio_completion(vha, entry->handle, + qlt_do_ctio_completion(vha, rsp, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); break; @@ -5373,7 +5410,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) case CTIO_A64_TYPE: { struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; - qlt_do_ctio_completion(vha, entry->handle, + qlt_do_ctio_completion(vha, rsp, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); break; @@ -5508,7 +5545,8 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); if (tgt->link_reinit_iocb_pending) { - qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, + qlt_send_notify_ack(ha->base_qpair, + (void *)&tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0; } @@ -5790,7 +5828,8 @@ out_term2: out_term: spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); + qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts, + FCP_TMF_REJECTED, false); spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -5857,7 +5896,7 @@ out_term2: ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); out_term: - qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); + qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0); } static void qlt_sess_work_fn(struct work_struct *work) @@ -6160,7 +6199,6 @@ qlt_enable_vha(struct scsi_qla_host *vha) struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; unsigned long flags; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER; if (!tgt) { ql_dbg(ql_dbg_tgt, vha, 0xe069, @@ -6179,17 +6217,6 @@ qlt_enable_vha(struct scsi_qla_host *vha) qla24xx_disable_vp(vha); qla24xx_enable_vp(vha); } else { - if (ha->msix_entries) { - ql_dbg(ql_dbg_tgt, vha, 0xe081, - "%s: host%ld : vector %d cpu %d\n", - __func__, vha->host_no, - ha->msix_entries[rspq_ent].vector, - ha->msix_entries[rspq_ent].cpuid); - - ha->tgt.rspq_vector_cpuid = - ha->msix_entries[rspq_ent].cpuid; - } - set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(base_vha); qla2x00_wait_for_hba_online(base_vha); @@ -6323,7 +6350,8 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); adjust_corrupted_atio(pkt); - qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0); + qlt_send_term_exchange(ha->base_qpair, NULL, pkt, + ha_locked, 0); } else { qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, ha_locked); @@ -6674,14 +6702,15 @@ qlt_handle_abts_recv_work(struct work_struct *work) spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); + qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio); spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(op); } void -qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt) +qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp, + response_t *pkt) { struct qla_tgt_sess_op *op; @@ -6691,13 +6720,14 @@ qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt) /* do not reach for ATIO queue here. This is best effort err * recovery at this point. */ - qlt_response_pkt_all_vps(vha, pkt); + qlt_response_pkt_all_vps(vha, rsp, pkt); return; } memcpy(&op->atio, pkt, sizeof(*pkt)); op->vha = vha; op->chip_reset = vha->hw->chip_reset; + op->rsp = rsp; INIT_WORK(&op->work, qlt_handle_abts_recv_work); queue_work(qla_tgt_wq, &op->work); return; diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 2120456c4709..9519eeca1997 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -73,7 +73,7 @@ #define QLA_TGT_NULL_HANDLE 0 #define QLA_TGT_HANDLE_MASK 0xF0000000 -#define QLA_QPID_HANDLE_MASK 0x000F0000 /* qpair id mask */ +#define QLA_QPID_HANDLE_MASK 0x00FF0000 /* qpair id mask */ #define QLA_CMD_HANDLE_MASK 0x0000FFFF #define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~QLA_TGT_HANDLE_MASK) @@ -837,6 +837,7 @@ struct qla_tgt_sess_op { struct work_struct work; struct list_head cmd_list; bool aborted; + struct rsp_que *rsp; }; enum trace_flags { @@ -871,6 +872,7 @@ struct qla_tgt_cmd { uint8_t pad[7]; struct se_cmd se_cmd; struct fc_port *sess; + struct qla_qpair *qpair; int state; struct work_struct work; /* Sense buffer that will be mapped into outgoing status */ @@ -948,6 +950,7 @@ struct qla_tgt_mgmt_cmd { uint16_t tmr_func; uint8_t fc_tm_rsp; struct fc_port *sess; + struct qla_qpair *qpair; struct se_cmd se_cmd; struct work_struct free_work; unsigned int flags; @@ -1049,7 +1052,8 @@ static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p) /* * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. */ -extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); +extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, struct rsp_que *, + response_t *); extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); extern int qlt_abort_cmd(struct qla_tgt_cmd *); @@ -1082,7 +1086,7 @@ extern int qlt_stop_phase1(struct qla_tgt *); extern void qlt_stop_phase2(struct qla_tgt *); extern irqreturn_t qla83xx_msix_atio_q(int, void *); extern void qlt_83xx_iospace_config(struct qla_hw_data *); -extern int qlt_free_qfull_cmds(struct scsi_qla_host *); +extern int qlt_free_qfull_cmds(struct qla_qpair *); extern void qlt_logo_completion_handler(fc_port_t *, int); extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); -- cgit From e326d22af9653dd8eff05d71f0d1bad9174578a3 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:18 -0700 Subject: scsi: qla2xxx: Enable Target Multi Queue Enable Multi Queue for Target mode. At Initiator LUN scan time, each LUN is assign to a QPair. Each QPair is affinitize to certain CPU. When new cmd arrives from the wire, the lunid is used to search for qpair. The qpair's affinitized cpuid will be used to queue up the work element. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 3 +- drivers/scsi/qla2xxx/qla_init.c | 3 + drivers/scsi/qla2xxx/qla_inline.h | 28 +++++++ drivers/scsi/qla2xxx/qla_isr.c | 21 +++-- drivers/scsi/qla2xxx/qla_os.c | 56 +++++++++----- drivers/scsi/qla2xxx/qla_target.c | 157 ++++++++++++++++++++++++++++++++++++-- drivers/scsi/qla2xxx/qla_target.h | 10 ++- 7 files changed, 240 insertions(+), 38 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 64109134e276..005ca2de3795 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3245,7 +3245,7 @@ struct req_que { struct qla_qpair { spinlock_t qp_lock; atomic_t ref_count; - + uint32_t lun_cnt; /* * For qpair 0, qp_lock_ptr will point at hardware_lock due to * legacy code. For other Qpair(s), it will point at qp_lock. @@ -3275,6 +3275,7 @@ struct qla_qpair { struct qla_hw_data *hw; struct work_struct q_work; struct list_head qp_list_elem; /* vha->qp_list */ + struct list_head hints_list; uint16_t cpuid; }; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 878b552be263..4366b12b0e6d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -7623,6 +7623,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, ha->queue_pair_map[qpair_id] = qpair; qpair->id = qpair_id; qpair->vp_idx = vp_idx; + INIT_LIST_HEAD(&qpair->hints_list); for (i = 0; i < ha->msix_count; i++) { msix = &ha->msix_entries[i]; @@ -7666,6 +7667,8 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->req = ha->req_q_map[req_id]; qpair->rsp->req = qpair->req; qpair->rsp->qpair = qpair; + /* init qpair to this cpu. Will adjust at run time. */ + qla_cpu_update(qpair, smp_processor_id()); if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 99028d48c664..bd8cb796f64e 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -324,3 +324,31 @@ qla_is_exch_offld_enabled(struct scsi_qla_host *vha) else return false; } + +static inline void +qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid) +{ + qpair->cpuid = cpuid; + + if (!list_empty(&qpair->hints_list)) { + struct qla_qpair_hint *h; + + list_for_each_entry(h, &qpair->hints_list, hint_elem) + h->cpuid = qpair->cpuid; + } +} + +static inline struct qla_qpair_hint * +qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair) +{ + struct qla_qpair_hint *h; + u16 i; + + for (i = 0; i < tgt->ha->max_qpairs + 1; i++) { + h = &tgt->qphints[i]; + if (h->qpair == qpair) + return h; + } + + return NULL; +} diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 1535a29a9d9f..9eb946cc8297 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -2761,6 +2762,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (!ha->flags.fw_started) return; + if (rsp->qpair->cpuid != smp_processor_id()) + qla_cpu_update(rsp->qpair, smp_processor_id()); + while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (struct sts_entry_24xx *)rsp->ring_ptr; @@ -3196,10 +3200,10 @@ struct qla_init_msix_entry { }; static const struct qla_init_msix_entry msix_entries[] = { - { "qla2xxx (default)", qla24xx_msix_default }, - { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, - { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, - { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q }, + { "default", qla24xx_msix_default }, + { "rsp_q", qla24xx_msix_rsp_q }, + { "atio_q", qla83xx_msix_atio_q }, + { "qpair_multiq", qla2xxx_msix_rsp_q }, }; static const struct qla_init_msix_entry qla82xx_msix_entries[] = { @@ -3279,7 +3283,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) qentry->handle = rsp; rsp->msix = qentry; scnprintf(qentry->name, sizeof(qentry->name), - "%s", msix_entries[i].name); + "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); if (IS_P3P_TYPE(ha)) ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, @@ -3287,7 +3291,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) else ret = request_irq(qentry->vector, msix_entries[i].handler, - 0, msix_entries[i].name, rsp); + 0, qentry->name, rsp); if (ret) goto msix_register_fail; qentry->have_irq = 1; @@ -3303,11 +3307,12 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) rsp->msix = qentry; qentry->handle = rsp; scnprintf(qentry->name, sizeof(qentry->name), - "%s", msix_entries[QLA_ATIO_VECTOR].name); + "qla2xxx%lu_%s", vha->host_no, + msix_entries[QLA_ATIO_VECTOR].name); qentry->in_use = 1; ret = request_irq(qentry->vector, msix_entries[QLA_ATIO_VECTOR].handler, - 0, msix_entries[QLA_ATIO_VECTOR].name, rsp); + 0, qentry->name, rsp); qentry->have_irq = 1; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 82bbb6432f77..3963602aef35 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -371,6 +371,23 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, goto fail_rsp_map; } + ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); + if (ha->base_qpair == NULL) { + ql_log(ql_log_warn, vha, 0x00e0, + "Failed to allocate base queue pair memory.\n"); + goto fail_base_qpair; + } + + rsp->qpair = ha->base_qpair; + rsp->req = req; + ha->base_qpair->req = req; + ha->base_qpair->rsp = rsp; + ha->base_qpair->vha = vha; + ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; + ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; + INIT_LIST_HEAD(&ha->base_qpair->hints_list); + qla_cpu_update(rsp->qpair, smp_processor_id()); + if (ql2xmqsupport && ha->max_qpairs) { ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), GFP_KERNEL); @@ -379,23 +396,8 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, "Unable to allocate memory for queue pair ptrs.\n"); goto fail_qpair_map; } - ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); - if (ha->base_qpair == NULL) { - ql_log(ql_log_warn, vha, 0x00e0, - "Failed to allocate base queue pair memory.\n"); - goto fail_base_qpair; - } - ha->base_qpair->req = req; - ha->base_qpair->rsp = rsp; } - rsp->qpair = ha->base_qpair; - rsp->req = req; - ha->base_qpair->vha = vha; - ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; - ha->queue_pair_map[0] = ha->base_qpair; - set_bit(0, ha->qpair_qid_map); - /* * Make sure we record at least the request and response queue zero in * case we need to free them if part of the probe fails. @@ -2009,7 +2011,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha) /* Read MSIX vector size of the board */ pci_read_config_word(ha->pdev, QLA_83XX_PCI_MSIX_CONTROL, &msix); - ha->msix_count = msix + 1; + ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1; /* * By default, driver uses at least two msix vectors * (default & rspq) @@ -3125,12 +3127,26 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->can_queue, base_vha->req, base_vha->mgmt_svr_loop_id, host->sg_tablesize); - if (ha->mqenable && qla_ini_mode_enabled(base_vha)) { + if (ha->mqenable) { + bool mq = false; + bool startit = false; ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); - /* Create start of day qpairs for Block MQ */ - if (shost_use_blk_mq(host)) { + + if (QLA_TGT_MODE_ENABLED()) { + mq = true; + startit = false; + } + + if ((ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) && + shost_use_blk_mq(host)) { + mq = true; + startit = true; + } + + if (mq) { + /* Create start of day qpairs for Block MQ */ for (i = 0; i < ha->max_qpairs; i++) - qla2xxx_create_qpair(base_vha, 5, 0, true); + qla2xxx_create_qpair(base_vha, 5, 0, startit); } } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 22f9bb59a98d..92e41055e6f8 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1515,6 +1515,10 @@ EXPORT_SYMBOL(qlt_stop_phase2); static void qlt_release(struct qla_tgt *tgt) { scsi_qla_host_t *vha = tgt->vha; + void *node; + u64 key = 0; + u16 i; + struct qla_qpair_hint *h; if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop && !tgt->tgt_stopped) @@ -1523,6 +1527,24 @@ static void qlt_release(struct qla_tgt *tgt) if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) qlt_stop_phase2(tgt); + for (i = 0; i < vha->hw->max_qpairs + 1; i++) { + unsigned long flags; + + h = &tgt->qphints[i]; + if (h->qpair) { + spin_lock_irqsave(h->qpair->qp_lock_ptr, flags); + list_del(&h->hint_elem); + spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags); + h->qpair = NULL; + } + } + kfree(tgt->qphints); + + btree_for_each_safe64(&tgt->lun_qpair_map, key, node) + btree_remove64(&tgt->lun_qpair_map, key); + + btree_destroy64(&tgt->lun_qpair_map); + vha->vha_tgt.qla_tgt = NULL; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, @@ -2354,9 +2376,8 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, * the session and, so, the command. */ return -EAGAIN; - } else { - vha->req->outstanding_cmds[h] = (srb_t *)prm->cmd; - } + } else + qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; pkt->handle = MAKE_HANDLE(qpair->req->id, h); pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; @@ -3976,8 +3997,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) spin_lock_init(&cmd->cmd_lock); cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; cmd->se_cmd.tag = atio->u.isp24.exchange_addr; - cmd->unpacked_lun = scsilun_to_int( - (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); if (atio->u.isp24.fcp_cmnd.rddata && atio->u.isp24.fcp_cmnd.wrdata) { @@ -4040,6 +4059,85 @@ static void qlt_do_work(struct work_struct *work) __qlt_do_work(cmd); } +static void qlt_assign_qpair(struct scsi_qla_host *vha, + struct qla_tgt_cmd *cmd) +{ + struct qla_qpair *qpair, *qp; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_qpair_hint *h; + + if (vha->flags.qpairs_available) { + h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun); + if (unlikely(!h)) { + /* spread lun to qpair ratio evently */ + int lcnt = 0, rc; + struct scsi_qla_host *base_vha = + pci_get_drvdata(vha->hw->pdev); + + qpair = vha->hw->base_qpair; + if (qpair->lun_cnt == 0) { + qpair->lun_cnt++; + h = qla_qpair_to_hint(tgt, qpair); + BUG_ON(!h); + rc = btree_insert64(&tgt->lun_qpair_map, + cmd->unpacked_lun, h, GFP_ATOMIC); + if (rc) { + qpair->lun_cnt--; + ql_log(ql_log_info, vha, 0xd037, + "Unable to insert lun %llx into lun_qpair_map\n", + cmd->unpacked_lun); + } + goto out; + } else { + lcnt = qpair->lun_cnt; + } + + h = NULL; + list_for_each_entry(qp, &base_vha->qp_list, + qp_list_elem) { + if (qp->lun_cnt == 0) { + qp->lun_cnt++; + h = qla_qpair_to_hint(tgt, qp); + BUG_ON(!h); + rc = btree_insert64(&tgt->lun_qpair_map, + cmd->unpacked_lun, h, GFP_ATOMIC); + if (rc) { + qp->lun_cnt--; + ql_log(ql_log_info, vha, 0xd038, + "Unable to insert lun %llx into lun_qpair_map\n", + cmd->unpacked_lun); + } + qpair = qp; + goto out; + } else { + if (qp->lun_cnt < lcnt) { + lcnt = qp->lun_cnt; + qpair = qp; + continue; + } + } + } + BUG_ON(!qpair); + qpair->lun_cnt++; + h = qla_qpair_to_hint(tgt, qpair); + BUG_ON(!h); + rc = btree_insert64(&tgt->lun_qpair_map, + cmd->unpacked_lun, h, GFP_ATOMIC); + if (rc) { + qpair->lun_cnt--; + ql_log(ql_log_info, vha, 0xd039, + "Unable to insert lun %llx into lun_qpair_map\n", + cmd->unpacked_lun); + } + } + } else { + h = &tgt->qphints[0]; + } +out: + cmd->qpair = h->qpair; + cmd->se_cmd.cpuid = h->cpuid; +} + static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, struct fc_port *sess, struct atio_from_isp *atio) @@ -4069,8 +4167,9 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, cmd->jiffies_at_alloc = get_jiffies_64(); cmd->reset_count = vha->hw->chip_reset; - cmd->qpair = vha->hw->base_qpair; - cmd->se_cmd.cpuid = cmd->qpair->cpuid; + cmd->unpacked_lun = scsilun_to_int( + (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); + qlt_assign_qpair(vha, cmd); return cmd; } @@ -4218,7 +4317,9 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, spin_unlock_irqrestore(&vha->cmd_list_lock, flags); INIT_WORK(&cmd->work, qlt_do_work); - if (ha->msix_count) { + if (vha->flags.qpairs_available) { + queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); + } else if (ha->msix_count) { if (cmd->atio.u.isp24.fcp_cmnd.rddata) queue_work_on(smp_processor_id(), qla_tgt_wq, &cmd->work); @@ -5944,6 +6045,8 @@ static void qlt_sess_work_fn(struct work_struct *work) int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) { struct qla_tgt *tgt; + int rc, i; + struct qla_qpair_hint *h; if (!QLA_TGT_MODE_ENABLED()) return 0; @@ -5966,9 +6069,47 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) return -ENOMEM; } + tgt->qphints = kzalloc((ha->max_qpairs + 1) * + sizeof(struct qla_qpair_hint), GFP_KERNEL); + if (!tgt->qphints) { + kfree(tgt); + ql_log(ql_log_warn, base_vha, 0x0197, + "Unable to allocate qpair hints.\n"); + return -ENOMEM; + } + if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) base_vha->host->hostt->supported_mode |= MODE_TARGET; + rc = btree_init64(&tgt->lun_qpair_map); + if (rc) { + kfree(tgt->qphints); + kfree(tgt); + ql_log(ql_log_info, base_vha, 0x0198, + "Unable to initialize lun_qpair_map btree\n"); + return -EIO; + } + h = &tgt->qphints[0]; + h->qpair = ha->base_qpair; + INIT_LIST_HEAD(&h->hint_elem); + h->cpuid = ha->base_qpair->cpuid; + list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list); + + for (i = 0; i < ha->max_qpairs; i++) { + unsigned long flags; + + struct qla_qpair *qpair = ha->queue_pair_map[i]; + h = &tgt->qphints[i + 1]; + INIT_LIST_HEAD(&h->hint_elem); + if (qpair) { + h->qpair = qpair; + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + list_add_tail(&h->hint_elem, &qpair->hints_list); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + h->cpuid = qpair->cpuid; + } + } + tgt->ha = ha; tgt->vha = base_vha; init_waitqueue_head(&tgt->waitQ); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 9519eeca1997..22c783e3e38f 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -787,10 +787,18 @@ struct qla_port_24xx_data { uint16_t reserved; }; +struct qla_qpair_hint { + struct list_head hint_elem; + struct qla_qpair *qpair; + u16 cpuid; + uint8_t cmd_cnt; +}; + struct qla_tgt { struct scsi_qla_host *vha; struct qla_hw_data *ha; - + struct btree_head64 lun_qpair_map; + struct qla_qpair_hint *qphints; /* * To sync between IRQ handlers and qlt_target_release(). Needed, * because req_pkt() can drop/reaquire HW lock inside. Protected by -- cgit From d65237c7f086042b5630d94fe2d151f62c09f723 Mon Sep 17 00:00:00 2001 From: Sawan Chandak Date: Tue, 13 Jun 2017 20:47:19 -0700 Subject: scsi: qla2xxx: Fix mailbox failure while deleting Queue pairs In target mode driver, queue pairs are not created during driver load time, instead they are created at the configuration time after chip reset. If a user tries to load/unload driver after queue pairs are created, then there would be mailbox failure, while deleting queue pairs. Flag is added to check if queue pairs are created or not. Queue pairs will be deleted only If they were created during target configuration. Signed-off-by: Sawan Chandak Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 2 ++ drivers/scsi/qla2xxx/qla_init.c | 10 ++++++++-- drivers/scsi/qla2xxx/qla_mid.c | 4 ++++ drivers/scsi/qla2xxx/qla_os.c | 1 + 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 005ca2de3795..8b52f431a812 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3997,6 +3997,8 @@ typedef struct scsi_qla_host { uint32_t fw_tgt_reported:1; uint32_t bbcr_enable:1; uint32_t qpairs_available:1; + uint32_t qpairs_req_created:1; + uint32_t qpairs_rsp_created:1; } flags; atomic_t loop_state; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 4366b12b0e6d..6230f33f2b85 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -7719,9 +7719,12 @@ fail_qid_map: int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) { - int ret; + int ret = QLA_FUNCTION_FAILED; struct qla_hw_data *ha = qpair->hw; + if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created) + goto fail; + qpair->delete_in_progress = 1; while (atomic_read(&qpair->ref_count)) msleep(500); @@ -7738,8 +7741,11 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) clear_bit(qpair->id, ha->qpair_qid_map); ha->num_qpairs--; list_del(&qpair->qp_list_elem); - if (list_empty(&vha->qp_list)) + if (list_empty(&vha->qp_list)) { vha->flags.qpairs_available = 0; + vha->flags.qpairs_req_created = 0; + vha->flags.qpairs_rsp_created = 0; + } mempool_destroy(qpair->srb_mempool); kfree(qpair); mutex_unlock(&ha->mq_lock); diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 4ad452a42dbe..f0605cd196fb 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -645,6 +645,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, int ret = 0; struct req_que *req = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; device_reg_t *reg; uint32_t cnt; @@ -741,6 +742,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, mutex_unlock(&ha->mq_lock); goto que_failed; } + vha->flags.qpairs_req_created = 1; } return req->id; @@ -772,6 +774,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, int ret = 0; struct rsp_que *rsp = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; device_reg_t *reg; @@ -855,6 +858,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, mutex_unlock(&ha->mq_lock); goto que_failed; } + vha->flags.qpairs_rsp_created = 1; } rsp->req = NULL; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 3963602aef35..13e4d2428a9a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -384,6 +384,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, ha->base_qpair->rsp = rsp; ha->base_qpair->vha = vha; ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; + /* init qpair to this cpu. Will adjust at run time. */ ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; INIT_LIST_HEAD(&ha->base_qpair->hints_list); qla_cpu_update(rsp->qpair, smp_processor_id()); -- cgit From 09620eeb62c4167d0c206f9a69730fa9e9251aae Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:20 -0700 Subject: scsi: qla2xxx: Add debug knob for user control workload For Target mode, user can control the work load by placing qla2xxx's irq vector on certain CPU via the smp_affinity knob. This patch allows user to control the number of QPair's irq to be active. The irqs are allocated at driver load time until unload. The work itself is placed on the QPair based on user setting. Usage: modprobe qla2xxx qlini_mode=disabled ql2xuctrlirq=1 mount -t debugfs none /sys/kernel/debug echo 2 > /sys/kernel/debug/qla2xxx/qla2xxx_[host num]/naqp echo [cpu id] > /proc/irq/[irq id]/smp_affinity_list Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 7 ++- drivers/scsi/qla2xxx/qla_dfs.c | 89 +++++++++++++++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_gbl.h | 2 + drivers/scsi/qla2xxx/qla_isr.c | 11 +++-- drivers/scsi/qla2xxx/qla_target.c | 32 ++++++++++++++ 5 files changed, 137 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 8b52f431a812..18b37c864250 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3322,6 +3322,7 @@ struct qlt_hw_data { struct dentry *dfs_tgt_sess; struct dentry *dfs_tgt_port_database; + struct dentry *dfs_naqp; struct list_head q_full_list; uint32_t num_pend_cmds; @@ -3330,7 +3331,8 @@ struct qlt_hw_data { spinlock_t q_full_lock; uint32_t leak_exchg_thresh_hold; spinlock_t sess_lock; - int rspq_vector_cpuid; + int num_act_qpairs; +#define DEFAULT_NAQP 2 spinlock_t atio_lock ____cacheline_aligned; struct btree_head32 host_map; }; @@ -4278,6 +4280,9 @@ enum nexus_wait_type { WAIT_LUN, }; +#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \ + (IS_QLA27XX(_ha) || IS_QLA83XX(_ha))) + #include "qla_target.h" #include "qla_gbl.h" #include "qla_dbg.h" diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 391c50be2297..63d7374dce77 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -314,6 +314,81 @@ static const struct file_operations dfs_fce_ops = { .release = qla2x00_dfs_fce_release, }; +static int +qla_dfs_naqp_show(struct seq_file *s, void *unused) +{ + struct scsi_qla_host *vha = s->private; + struct qla_hw_data *ha = vha->hw; + + seq_printf(s, "%d\n", ha->tgt.num_act_qpairs); + return 0; +} + +static int +qla_dfs_naqp_open(struct inode *inode, struct file *file) +{ + struct scsi_qla_host *vha = inode->i_private; + + return single_open(file, qla_dfs_naqp_show, vha); +} + +static ssize_t +qla_dfs_naqp_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) +{ + struct seq_file *s = file->private_data; + struct scsi_qla_host *vha = s->private; + struct qla_hw_data *ha = vha->hw; + char *buf; + int rc = 0; + unsigned long num_act_qp; + + if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha))) { + pr_err("host%ld: this adapter does not support Multi Q.", + vha->host_no); + return -EINVAL; + } + + if (!vha->flags.qpairs_available) { + pr_err("host%ld: Driver is not setup with Multi Q.", + vha->host_no); + return -EINVAL; + } + buf = memdup_user_nul(buffer, count); + if (IS_ERR(buf)) { + pr_err("host%ld: fail to copy user buffer.", + vha->host_no); + return PTR_ERR(buf); + } + + num_act_qp = simple_strtoul(buf, NULL, 0); + + if (num_act_qp >= vha->hw->max_qpairs) { + pr_err("User set invalid number of qpairs %lu. Max = %d", + num_act_qp, vha->hw->max_qpairs); + rc = -EINVAL; + goto out_free; + } + + if (num_act_qp != ha->tgt.num_act_qpairs) { + ha->tgt.num_act_qpairs = num_act_qp; + qlt_clr_qp_table(vha); + } + rc = count; +out_free: + kfree(buf); + return rc; +} + +static const struct file_operations dfs_naqp_ops = { + .open = qla_dfs_naqp_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = qla_dfs_naqp_write, +}; + + int qla2x00_dfs_setup(scsi_qla_host_t *vha) { @@ -391,6 +466,15 @@ create_nodes: goto out; } + if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) { + ha->tgt.dfs_naqp = debugfs_create_file("naqp", + 0400, ha->dfs_dir, vha, &dfs_naqp_ops); + if (!ha->tgt.dfs_naqp) { + ql_log(ql_log_warn, vha, 0xd011, + "Unable to create debugFS naqp node.\n"); + goto out; + } + } out: return 0; } @@ -400,6 +484,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + if (ha->tgt.dfs_naqp) { + debugfs_remove(ha->tgt.dfs_naqp); + ha->tgt.dfs_naqp = NULL; + } + if (ha->tgt.dfs_tgt_sess) { debugfs_remove(ha->tgt.dfs_tgt_sess); ha->tgt.dfs_tgt_sess = NULL; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f5493eda0110..beebf96c23d4 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -139,6 +139,7 @@ extern int ql2xexchoffld; extern int ql2xiniexchg; extern int ql2xfwholdabts; extern int ql2xmvasynctoatio; +extern int ql2xuctrlirq; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -856,5 +857,6 @@ void qla24xx_delete_sess_fn(struct work_struct *); void qlt_unknown_atio_work_fn(struct work_struct *); void qlt_update_host_map(struct scsi_qla_host *, port_id_t); void qlt_remove_target_resources(struct qla_hw_data *); +void qlt_clr_qp_table(struct scsi_qla_host *vha); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 9eb946cc8297..3c9f9aa7f2c2 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3227,9 +3227,14 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) min_vecs++; } - ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, - ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, - &desc); + if (USER_CTRL_IRQ(ha)) { + /* user wants to control IRQ setting for target mode */ + ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, + ha->msix_count, PCI_IRQ_MSIX); + } else + ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, + ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, + &desc); if (ret < 0) { ql_log(ql_log_fatal, vha, 0x00c7, diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 92e41055e6f8..87f80dccaf8c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -66,6 +66,13 @@ MODULE_PARM_DESC(ql_dm_tgt_ex_pct, "the percentage of exchanges/cmds FW will allocate resources " "for Target mode."); +int ql2xuctrlirq = 1; +module_param(ql2xuctrlirq, int, 0644); +MODULE_PARM_DESC(ql2xuctrlirq, + "User to control IRQ placement via smp_affinity." + "Valid with qlini_mode=disabled." + "1(default): enable"); + int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; static int temp_sam_status = SAM_STAT_BUSY; @@ -4059,6 +4066,31 @@ static void qlt_do_work(struct work_struct *work) __qlt_do_work(cmd); } +void qlt_clr_qp_table(struct scsi_qla_host *vha) +{ + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + void *node; + u64 key = 0; + + ql_log(ql_log_info, vha, 0x706c, + "User update Number of Active Qpairs %d\n", + ha->tgt.num_act_qpairs); + + spin_lock_irqsave(&ha->tgt.atio_lock, flags); + + btree_for_each_safe64(&tgt->lun_qpair_map, key, node) + btree_remove64(&tgt->lun_qpair_map, key); + + ha->base_qpair->lun_cnt = 0; + for (key = 0; key < ha->max_qpairs; key++) + if (ha->queue_pair_map[key]) + ha->queue_pair_map[key]->lun_cnt = 0; + + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); +} + static void qlt_assign_qpair(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) { -- cgit From 4b60c82736d0e2d7f863cba1a320e89bf74a884e Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:21 -0700 Subject: scsi: qla2xxx: Add fw_started flags to qpair Add fw_started flag to qpair to reduce cache thrash. This reduce access to qla_hw_data structure by each qpair. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 21 +++++++++++++++++++++ drivers/scsi/qla2xxx/qla_init.c | 4 ++-- drivers/scsi/qla2xxx/qla_isr.c | 2 +- drivers/scsi/qla2xxx/qla_target.c | 9 ++++----- 4 files changed, 28 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 18b37c864250..0dec148a4580 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3262,6 +3262,7 @@ struct qla_qpair { /* move vha->flags.difdix_supported here */ uint32_t difdix_supported:1; uint32_t delete_in_progress:1; + uint32_t fw_started:1; uint16_t id; /* qp number used with FW */ uint16_t vp_idx; /* vport ID */ @@ -4183,6 +4184,26 @@ struct qla2_sgx { srb_t *sp; }; +#define QLA_FW_STARTED(_ha) { \ + int i; \ + _ha->flags.fw_started = 1; \ + _ha->base_qpair->fw_started = 1; \ + for (i = 0; i < _ha->max_qpairs; i++) { \ + if (_ha->queue_pair_map[i]) \ + _ha->queue_pair_map[i]->fw_started = 1; \ + } \ +} + +#define QLA_FW_STOPPED(_ha) { \ + int i; \ + _ha->flags.fw_started = 0; \ + _ha->base_qpair->fw_started = 0; \ + for (i = 0; i < _ha->max_qpairs; i++) { \ + if (_ha->queue_pair_map[i]) \ + _ha->queue_pair_map[i]->fw_started = 0; \ + } \ +} + /* * Macros to help code, maintain, etc. */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 6230f33f2b85..e097c3331113 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3207,7 +3207,7 @@ next_check: } else { ql_dbg(ql_dbg_init, vha, 0x00d3, "Init Firmware -- success.\n"); - ha->flags.fw_started = 1; + QLA_FW_STARTED(ha); } return (rval); @@ -6841,7 +6841,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) ret = qla2x00_stop_firmware(vha); } - ha->flags.fw_started = 0; + QLA_FW_STOPPED(ha); ha->flags.fw_init_done = 0; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 3c9f9aa7f2c2..40385bc1d1fa 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -710,7 +710,7 @@ skip_rio: ha->isp_ops->fw_dump(vha, 1); ha->flags.fw_init_done = 0; - ha->flags.fw_started = 0; + QLA_FW_STOPPED(ha); if (IS_FWI2_CAPABLE(ha)) { if (mb[1] == 0 && mb[2] == 0) { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 87f80dccaf8c..0ce0d500982a 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -3086,7 +3086,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, else vha->tgt_counters.core_qla_que_buf++; - if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) { + if (!qpair->fw_started || cmd->reset_count != vha->hw->chip_reset) { /* * Either the port is not online or this request was from * previous life, just abort the processing. @@ -3096,7 +3096,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, ql_dbg(ql_dbg_async, vha, 0xe101, "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), - cmd->reset_count, ha->chip_reset); + cmd->reset_count, vha->hw->chip_reset); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return 0; } @@ -3206,7 +3206,6 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) { struct ctio7_to_24xx *pkt; struct scsi_qla_host *vha = cmd->vha; - struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = cmd->tgt; struct qla_tgt_prm prm; unsigned long flags = 0; @@ -3223,7 +3222,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) if (qlt_pci_map_calc_cnt(&prm) != 0) return -EAGAIN; - if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) || + if (!qpair->fw_started || (cmd->reset_count != vha->hw->chip_reset) || (cmd->sess && cmd->sess->deleted)) { /* * Either the port is not online or this request was from @@ -3234,7 +3233,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) ql_dbg(ql_dbg_async, vha, 0xe102, "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), - cmd->reset_count, ha->chip_reset); + cmd->reset_count, vha->hw->chip_reset); return 0; } -- cgit From 7c3f8fd10bab0b4d9021a11f123fd67e81ef3b0e Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:22 -0700 Subject: scsi: qla2xxx: move fields from qla_hw_data to qla_qpair - Move chip_reset, enable_class_2 fields from qla_hw_data to qla_qpair to reduce cache thrash for target MQ. - Optimizations to reduce unnecessary memory load for good path io. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_attr.c | 2 +- drivers/scsi/qla2xxx/qla_def.h | 25 +++++++++++-- drivers/scsi/qla2xxx/qla_init.c | 14 ++++++-- drivers/scsi/qla2xxx/qla_os.c | 2 +- drivers/scsi/qla2xxx/qla_target.c | 75 +++++++++++++++++++-------------------- drivers/scsi/qla2xxx/qla_target.h | 3 +- 6 files changed, 73 insertions(+), 48 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index f0f16d313faf..6dd984203666 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2289,7 +2289,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha) fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); - fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ? + fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ? (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3; fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 0dec148a4580..dfa001357110 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3252,6 +3252,7 @@ struct qla_qpair { */ spinlock_t *qp_lock_ptr; struct scsi_qla_host *vha; + u32 chip_reset; /* distill these fields down to 'online=0/1' * ha->flags.eeh_busy @@ -3263,6 +3264,8 @@ struct qla_qpair { uint32_t difdix_supported:1; uint32_t delete_in_progress:1; uint32_t fw_started:1; + uint32_t enable_class_2:1; + uint32_t enable_explicit_conf:1; uint16_t id; /* qp number used with FW */ uint16_t vp_idx; /* vport ID */ @@ -3296,8 +3299,6 @@ struct scsi_qlt_host { struct qlt_hw_data { /* Protected by hw lock */ - uint32_t enable_class_2:1; - uint32_t enable_explicit_conf:1; uint32_t node_name_set:1; dma_addr_t atio_dma; /* Physical address. */ @@ -3954,7 +3955,6 @@ struct qla_hw_data { struct work_struct board_disable; struct mr_data_fx00 mr; - uint32_t chip_reset; struct qlt_hw_data tgt; int allow_cna_fw_dump; @@ -4247,6 +4247,25 @@ struct qla2_sgx { #define QLA_QPAIR_MARK_NOT_BUSY(__qpair) \ atomic_dec(&__qpair->ref_count); \ + +#define QLA_ENA_CONF(_ha) {\ + int i;\ + _ha->base_qpair->enable_explicit_conf = 1; \ + for (i = 0; i < _ha->max_qpairs; i++) { \ + if (_ha->queue_pair_map[i]) \ + _ha->queue_pair_map[i]->enable_explicit_conf = 1; \ + } \ +} + +#define QLA_DIS_CONF(_ha) {\ + int i;\ + _ha->base_qpair->enable_explicit_conf = 0; \ + for (i = 0; i < _ha->max_qpairs; i++) { \ + if (_ha->queue_pair_map[i]) \ + _ha->queue_pair_map[i]->enable_explicit_conf = 0; \ + } \ +} + /* * qla2x00 local function return status codes */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index e097c3331113..3e5f193480f0 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1322,7 +1322,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) ql_dbg(ql_dbg_disc, vha, 0x20ea, "%s %d %8phC post gpdb\n", __func__, __LINE__, ea->fcport->port_name); - ea->fcport->chip_reset = vha->hw->chip_reset; + ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea->fcport->logout_on_delete = 1; qla24xx_post_gpdb_work(vha, ea->fcport, 0); break; @@ -5524,6 +5524,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) struct scsi_qla_host *vp; unsigned long flags; fc_port_t *fcport; + u16 i; /* For ISP82XX, driver waits for completion of the commands. * online flag should be set. @@ -5549,7 +5550,12 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) ha->current_topology = 0; ha->flags.fw_started = 0; ha->flags.fw_init_done = 0; - ha->chip_reset++; + ha->base_qpair->chip_reset++; + for (i = 0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) + ha->queue_pair_map[i]->chip_reset = + ha->base_qpair->chip_reset; + } atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { @@ -7624,6 +7630,10 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->id = qpair_id; qpair->vp_idx = vp_idx; INIT_LIST_HEAD(&qpair->hints_list); + qpair->chip_reset = ha->base_qpair->chip_reset; + qpair->enable_class_2 = ha->base_qpair->enable_class_2; + qpair->enable_explicit_conf = + ha->base_qpair->enable_explicit_conf; for (i = 0; i < ha->msix_count; i++) { msix = &ha->msix_entries[i]; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 13e4d2428a9a..44be2c8237fd 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -387,6 +387,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, /* init qpair to this cpu. Will adjust at run time. */ ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; INIT_LIST_HEAD(&ha->base_qpair->hints_list); + ha->base_qpair->enable_class_2 = ql2xenableclass2; qla_cpu_update(rsp->qpair, smp_processor_id()); if (ql2xmqsupport && ha->max_qpairs) { @@ -2708,7 +2709,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ql_dbg_pci(ql_dbg_init, pdev, 0x000a, "Memory allocated for ha=%p.\n", ha); ha->pdev = pdev; - ha->tgt.enable_class_2 = ql2xenableclass2; INIT_LIST_HEAD(&ha->tgt.q_full_list); spin_lock_init(&ha->tgt.q_full_lock); spin_lock_init(&ha->tgt.sess_lock); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 0ce0d500982a..7e6c575e3e29 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -577,7 +577,7 @@ void qla2x00_async_nack_sp_done(void *s, int res) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); sp->fcport->flags &= ~FCF_ASYNC_SENT; - sp->fcport->chip_reset = vha->hw->chip_reset; + sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset; switch (sp->type) { case SRB_NACK_PLOGI: @@ -1032,7 +1032,7 @@ static void qlt_free_session_done(struct work_struct *work) sess->login_succ = 0; } - if (sess->chip_reset != sess->vha->hw->chip_reset) + if (sess->chip_reset != ha->base_qpair->chip_reset) qla2x00_clear_loop_id(sess); if (sess->conflict) { @@ -1162,7 +1162,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) static void qla24xx_chk_fcp_state(struct fc_port *sess) { - if (sess->chip_reset != sess->vha->hw->chip_reset) { + if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) { sess->logout_on_delete = 0; sess->logo_ack_needed = 0; sess->fw_login_state = DSC_LS_PORT_UNAVAIL; @@ -1917,7 +1917,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); mcmd->sess = sess; memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); - mcmd->reset_count = vha->hw->chip_reset; + mcmd->reset_count = ha->base_qpair->chip_reset; mcmd->tmr_func = QLA_TGT_ABTS; mcmd->qpair = ha->base_qpair; @@ -2146,7 +2146,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) spin_lock_irqsave(qpair->qp_lock_ptr, flags); - if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) { + if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) { /* * Either the port is not online or this request was from * previous life, just abort the processing. @@ -2154,7 +2154,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) ql_dbg(ql_dbg_async, vha, 0xe100, "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), - mcmd->reset_count, ha->chip_reset); + mcmd->reset_count, qpair->chip_reset); ha->tgt.tgt_ops->free_mcmd(mcmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return; @@ -2568,20 +2568,22 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, uint32_t *full_req_cnt) { - struct qla_tgt *tgt = cmd->tgt; - struct scsi_qla_host *vha = tgt->vha; - struct qla_hw_data *ha = vha->hw; struct se_cmd *se_cmd = &cmd->se_cmd; prm->cmd = cmd; - prm->tgt = tgt; + prm->tgt = cmd->tgt; + prm->pkt = NULL; prm->rq_result = scsi_status; prm->sense_buffer = &cmd->sense_buffer[0]; prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; prm->sg = NULL; prm->seg_cnt = -1; prm->req_cnt = 1; + prm->residual = 0; prm->add_status_pkt = 0; + prm->prot_sg = NULL; + prm->prot_seg_cnt = 0; + prm->tot_dsds = 0; if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { if (qlt_pci_map_calc_cnt(prm) != 0) @@ -2592,7 +2594,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { prm->residual = se_cmd->residual_count; - ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c, + ql_dbg(ql_dbg_io + ql_dbg_verbose, cmd->vha, 0x305c, "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, @@ -2600,7 +2602,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, prm->rq_result |= SS_RESIDUAL_UNDER; } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { prm->residual = se_cmd->residual_count; - ql_dbg(ql_dbg_io, vha, 0x305d, + ql_dbg(ql_dbg_io, cmd->vha, 0x305d, "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); @@ -2614,7 +2616,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, */ if (qlt_has_data(cmd)) { if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || - (IS_FWI2_CAPABLE(ha) && + (IS_FWI2_CAPABLE(cmd->vha->hw) && (prm->rq_result != 0))) { prm->add_status_pkt = 1; (*full_req_cnt)++; @@ -2625,17 +2627,17 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, return 0; } -static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, - struct qla_tgt_cmd *cmd, int sending_sense) +static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd, + int sending_sense) { - if (ha->tgt.enable_class_2) + if (cmd->qpair->enable_class_2) return 0; if (sending_sense) return cmd->conf_compl_supported; else - return ha->tgt.enable_explicit_conf && - cmd->conf_compl_supported; + return cmd->qpair->enable_explicit_conf && + cmd->conf_compl_supported; } static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, @@ -2644,7 +2646,7 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, (uint32_t)sizeof(ctio->u.status1.sense_data)); ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); - if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { + if (qlt_need_explicit_conf(prm->cmd, 0)) { ctio->u.status0.flags |= cpu_to_le16( CTIO7_FLAGS_EXPLICIT_CONFORM | CTIO7_FLAGS_CONFORM_REQ); @@ -2654,7 +2656,7 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { int i; - if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { + if (qlt_need_explicit_conf(prm->cmd, 1)) { if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, "Skipping EXPLICIT_CONFORM and " @@ -3047,7 +3049,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, uint8_t scsi_status) { struct scsi_qla_host *vha = cmd->vha; - struct qla_hw_data *ha = vha->hw; struct qla_qpair *qpair = cmd->qpair; struct ctio7_to_24xx *pkt; struct qla_tgt_prm prm; @@ -3065,8 +3066,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, return 0; } - memset(&prm, 0, sizeof(prm)); - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? @@ -3086,7 +3085,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, else vha->tgt_counters.core_qla_que_buf++; - if (!qpair->fw_started || cmd->reset_count != vha->hw->chip_reset) { + if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) { /* * Either the port is not online or this request was from * previous life, just abort the processing. @@ -3096,7 +3095,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, ql_dbg(ql_dbg_async, vha, 0xe101, "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), - cmd->reset_count, vha->hw->chip_reset); + cmd->reset_count, qpair->chip_reset); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return 0; } @@ -3133,7 +3132,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, cpu_to_le32(prm.residual); pkt->u.status0.flags |= cpu_to_le16( CTIO7_FLAGS_SEND_STATUS); - if (qlt_need_explicit_conf(ha, cmd, 0)) { + if (qlt_need_explicit_conf(cmd, 0)) { pkt->u.status0.flags |= cpu_to_le16( CTIO7_FLAGS_EXPLICIT_CONFORM | @@ -3222,7 +3221,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) if (qlt_pci_map_calc_cnt(&prm) != 0) return -EAGAIN; - if (!qpair->fw_started || (cmd->reset_count != vha->hw->chip_reset) || + if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || (cmd->sess && cmd->sess->deleted)) { /* * Either the port is not online or this request was from @@ -3233,7 +3232,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) ql_dbg(ql_dbg_async, vha, 0xe102, "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), - cmd->reset_count, vha->hw->chip_reset); + cmd->reset_count, qpair->chip_reset); return 0; } @@ -3979,7 +3978,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) { scsi_qla_host_t *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct fc_port *sess = cmd->sess; struct atio_from_isp *atio = &cmd->atio; unsigned char *cdb; @@ -3990,8 +3988,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) cmd->cmd_in_wq = 0; cmd->trc_flags |= TRC_DO_WORK; - if (tgt->tgt_stop) - goto out_term; if (cmd->aborted) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, @@ -4197,10 +4193,10 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, cmd->trc_flags = 0; cmd->jiffies_at_alloc = get_jiffies_64(); - cmd->reset_count = vha->hw->chip_reset; cmd->unpacked_lun = scsilun_to_int( (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); qlt_assign_qpair(vha, cmd); + cmd->reset_count = vha->hw->base_qpair->chip_reset; return cmd; } @@ -4391,7 +4387,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, } mcmd->tmr_func = fn; mcmd->flags = flags; - mcmd->reset_count = vha->hw->chip_reset; + mcmd->reset_count = ha->base_qpair->chip_reset; mcmd->qpair = ha->base_qpair; switch (fn) { @@ -4474,7 +4470,7 @@ static int __qlt_abort_task(struct scsi_qla_host *vha, unpacked_lun = scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); - mcmd->reset_count = vha->hw->chip_reset; + mcmd->reset_count = ha->base_qpair->chip_reset; mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; mcmd->qpair = ha->base_qpair; @@ -5202,7 +5198,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, cmd->tgt = vha->vha_tgt.qla_tgt; cmd->vha = vha; - cmd->reset_count = vha->hw->chip_reset; + cmd->reset_count = ha->base_qpair->chip_reset; cmd->q_full = 1; cmd->qpair = ha->base_qpair; @@ -6638,7 +6634,7 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) return; } - if (ha->tgt.enable_class_2) { + if (ha->base_qpair->enable_class_2) { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = FC_COS_CLASS2 | FC_COS_CLASS3; @@ -6742,7 +6738,7 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) return; } - if (ha->tgt.enable_class_2) { + if (ha->base_qpair->enable_class_2) { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = FC_COS_CLASS2 | FC_COS_CLASS3; @@ -6866,7 +6862,8 @@ qlt_handle_abts_recv_work(struct work_struct *work) struct qla_hw_data *ha = vha->hw; unsigned long flags; - if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset)) + if (qla2x00_reset_active(vha) || + (op->chip_reset != ha->base_qpair->chip_reset)) return; spin_lock_irqsave(&ha->tgt.atio_lock, flags); @@ -6898,7 +6895,7 @@ qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp, memcpy(&op->atio, pkt, sizeof(*pkt)); op->vha = vha; - op->chip_reset = vha->hw->chip_reset; + op->chip_reset = vha->hw->base_qpair->chip_reset; op->rsp = rsp; INIT_WORK(&op->work, qlt_handle_abts_recv_work); queue_work(qla_tgt_wq, &op->work); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 22c783e3e38f..5f497311d7b7 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -881,6 +881,7 @@ struct qla_tgt_cmd { struct se_cmd se_cmd; struct fc_port *sess; struct qla_qpair *qpair; + uint32_t reset_count; int state; struct work_struct work; /* Sense buffer that will be mapped into outgoing status */ @@ -906,7 +907,6 @@ struct qla_tgt_cmd { int offset; u64 unpacked_lun; enum dma_data_direction dma_data_direction; - uint32_t reset_count; uint16_t loop_id; /* to save extra sess dereferences */ struct qla_tgt *tgt; /* to save extra sess dereferences */ @@ -980,7 +980,6 @@ struct qla_tgt_prm { int seg_cnt; int req_cnt; uint16_t rq_result; - uint16_t scsi_status; int sense_buffer_len; int residual; int add_status_pkt; -- cgit From af7bb3826a49b774332a5e8ff3f68877bdfd21c9 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:23 -0700 Subject: scsi: qla2xxx: use shadow register for ISP27XX For ISP27XX, use shadow register to read FW provided REQQ's consumer index. The shadow register is dma'ed by firmware. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 1 + drivers/scsi/qla2xxx/qla_init.c | 1 + drivers/scsi/qla2xxx/qla_os.c | 1 + drivers/scsi/qla2xxx/qla_target.c | 3 ++- 4 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index dfa001357110..b3ba32773db4 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3266,6 +3266,7 @@ struct qla_qpair { uint32_t fw_started:1; uint32_t enable_class_2:1; uint32_t enable_explicit_conf:1; + uint32_t use_shadow_reg:1; uint16_t id; /* qp number used with FW */ uint16_t vp_idx; /* vport ID */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 3e5f193480f0..62c856cad7ec 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -7614,6 +7614,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->vha = vha; qpair->qp_lock_ptr = &qpair->qp_lock; spin_lock_init(&qpair->qp_lock); + qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; /* Assign available que pair id */ mutex_lock(&ha->mq_lock); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 44be2c8237fd..1d66954b7e5a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -384,6 +384,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, ha->base_qpair->rsp = rsp; ha->base_qpair->vha = vha; ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; + ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; /* init qpair to this cpu. Will adjust at run time. */ ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; INIT_LIST_HEAD(&ha->base_qpair->hints_list); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 7e6c575e3e29..1d8b5f14a69c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2288,7 +2288,8 @@ static int qlt_check_reserve_free_req(struct qla_qpair *qpair, struct req_que *req = qpair->req; if (req->cnt < (req_cnt + 2)) { - cnt = (uint16_t)RD_REG_DWORD(req->req_q_out); + cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out)); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; -- cgit From 8abfa9e2268337cdcd458e24008345973328e2e2 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:24 -0700 Subject: scsi: qla2xxx: Add function call to qpair for door bell Add call back to door bell for qpair. This help reduce access to qla_hw_data structure, in order to reduce cach thrash. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 3 ++ drivers/scsi/qla2xxx/qla_init.c | 3 ++ drivers/scsi/qla2xxx/qla_inline.h | 15 +++++++++ drivers/scsi/qla2xxx/qla_os.c | 35 ++++++++++++------- drivers/scsi/qla2xxx/qla_target.c | 71 +++++++++++++++++++++++++++------------ drivers/scsi/qla2xxx/qla_target.h | 2 +- 6 files changed, 94 insertions(+), 35 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index b3ba32773db4..17b13dd92511 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3272,6 +3272,9 @@ struct qla_qpair { uint16_t vp_idx; /* vport ID */ mempool_t *srb_mempool; + struct pci_dev *pdev; + void (*reqq_start_iocbs)(struct qla_qpair *); + /* to do: New driver: move queues to here instead of pointers */ struct req_que *req; struct rsp_que *rsp; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 62c856cad7ec..4a659460d6e6 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -7653,6 +7653,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->msix->in_use = 1; list_add_tail(&qpair->qp_list_elem, &vha->qp_list); + qpair->pdev = ha->pdev; + if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) + qpair->reqq_start_iocbs = qla_83xx_start_iocbs; mutex_unlock(&ha->mq_lock); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index bd8cb796f64e..9a2c86eacf44 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -352,3 +352,18 @@ qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair) return NULL; } + +static inline void +qla_83xx_start_iocbs(struct qla_qpair *qpair) +{ + struct req_que *req = qpair->req; + + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + WRT_REG_DWORD(req->req_q_in, req->ring_index); +} diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1d66954b7e5a..88e115fcea60 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -351,6 +351,28 @@ int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, struct qla_qpair *qpair); /* -------------------------------------------------------------------------- */ +static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, + struct rsp_que *rsp) +{ + struct qla_hw_data *ha = vha->hw; + rsp->qpair = ha->base_qpair; + rsp->req = req; + ha->base_qpair->req = req; + ha->base_qpair->rsp = rsp; + ha->base_qpair->vha = vha; + ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; + ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; + ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; + INIT_LIST_HEAD(&ha->base_qpair->hints_list); + ha->base_qpair->enable_class_2 = ql2xenableclass2; + /* init qpair to this cpu. Will adjust at run time. */ + qla_cpu_update(rsp->qpair, smp_processor_id()); + ha->base_qpair->pdev = ha->pdev; + + if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) + ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs; +} + static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, struct rsp_que *rsp) { @@ -378,18 +400,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, goto fail_base_qpair; } - rsp->qpair = ha->base_qpair; - rsp->req = req; - ha->base_qpair->req = req; - ha->base_qpair->rsp = rsp; - ha->base_qpair->vha = vha; - ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; - ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; - /* init qpair to this cpu. Will adjust at run time. */ - ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; - INIT_LIST_HEAD(&ha->base_qpair->hints_list); - ha->base_qpair->enable_class_2 = ql2xenableclass2; - qla_cpu_update(rsp->qpair, smp_processor_id()); + qla_init_base_qpair(vha, req, rsp); if (ql2xmqsupport && ha->max_qpairs) { ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 1d8b5f14a69c..db5a81265d1b 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1728,7 +1728,10 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(vha, qpair->req); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); } /* @@ -2058,7 +2061,10 @@ static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(ha, qpair->req); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(ha, qpair->req); } void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) @@ -2071,12 +2077,13 @@ EXPORT_SYMBOL(qlt_free_mcmd); * ha->hardware_lock supposed to be held on entry. Might drop it, then * reacquire */ -void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, +void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) { struct atio_from_isp *atio = &cmd->atio; struct ctio7_to_24xx *ctio; uint16_t temp; + struct scsi_qla_host *vha = cmd->vha; ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " @@ -2127,7 +2134,11 @@ void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(vha, vha->req); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); + out: return; } @@ -2205,7 +2216,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) BUG_ON(cmd->sg_cnt == 0); prm->sg = (struct scatterlist *)cmd->sg; - prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, + prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); if (unlikely(prm->seg_cnt == 0)) goto out_err; @@ -2232,7 +2243,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) if (cmd->prot_sg_cnt) { prm->prot_sg = cmd->prot_sg; - prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev, + prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt, cmd->dma_data_direction); if (unlikely(prm->prot_seg_cnt == 0)) @@ -2260,21 +2271,24 @@ out_err: static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) { - struct qla_hw_data *ha = vha->hw; - + struct qla_hw_data *ha; + struct qla_qpair *qpair; if (!cmd->sg_mapped) return; - pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); + qpair = cmd->qpair; + + pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt, + cmd->dma_data_direction); cmd->sg_mapped = 0; if (cmd->prot_sg_cnt) - pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, + pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt, cmd->dma_data_direction); if (!cmd->ctx) return; - + ha = vha->hw; if (cmd->ctx_dsd_alloced) qla2x00_clean_dsd_pool(ha, cmd->ctx); @@ -2324,7 +2338,6 @@ static inline void *qlt_get_req_pkt(struct req_que *req) /* ha->hardware_lock supposed to be held on entry */ static inline uint32_t qlt_make_handle(struct qla_qpair *qpair) { - struct scsi_qla_host *vha = qpair->vha; uint32_t h; int index; uint8_t found = 0; @@ -2349,9 +2362,9 @@ static inline uint32_t qlt_make_handle(struct qla_qpair *qpair) if (found) { req->current_outstanding_cmd = h; } else { - ql_dbg(ql_dbg_io, vha, 0x305b, - "qla_target(%d): Ran out of empty cmd slots\n", - vha->vp_idx); + ql_dbg(ql_dbg_io, qpair->vha, 0x305b, + "qla_target(%d): Ran out of empty cmd slots\n", + qpair->vha->vp_idx); h = QLA_TGT_NULL_HANDLE; } @@ -3189,7 +3202,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(vha, qpair->req); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return 0; @@ -3264,7 +3280,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(vha, qpair->req); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return res; @@ -3282,7 +3301,7 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer); * it is assumed either hardware_lock or qpair lock is held. */ static void -qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, +qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, struct ctio_crc_from_fw *sts) { uint8_t *ap = &sts->actual_dif[0]; @@ -3290,6 +3309,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, uint64_t lba = cmd->se_cmd.t_task_lba; uint8_t scsi_status, sense_key, asc, ascq; unsigned long flags; + struct scsi_qla_host *vha = cmd->vha; cmd->trc_flags |= TRC_DIF_ERR; @@ -3370,7 +3390,8 @@ out: } spin_unlock_irqrestore(&cmd->cmd_lock, flags); - qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq); + qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc, + ascq); /* assume scsi status gets out on the wire. * Will not wait for completion. */ @@ -3525,7 +3546,10 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair, /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(vha, qpair->req); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); return ret; } @@ -3883,7 +3907,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, *((u64 *)&crc->actual_dif[0]), *((u64 *)&crc->expected_dif[0])); - qlt_handle_dif_error(vha, cmd, ctio); + qlt_handle_dif_error(qpair, cmd, ctio); return; } default: @@ -5121,7 +5145,10 @@ static int __qlt_send_busy(struct qla_qpair *qpair, ctio24->u.status1.scsi_status = cpu_to_le16(status); /* Memory Barrier */ wmb(); - qla2x00_start_iocbs(vha, qpair->req); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 5f497311d7b7..30a389445de4 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -1097,7 +1097,7 @@ extern int qlt_free_qfull_cmds(struct qla_qpair *); extern void qlt_logo_completion_handler(fc_port_t *, int); extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); -void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t, +void qlt_send_resp_ctio(struct qla_qpair *, struct qla_tgt_cmd *, uint8_t, uint8_t, uint8_t, uint8_t); extern void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *, struct qla_tgt_cmd *); -- cgit From 22d84726e3b82458dee015c56c88dae8c861436e Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:25 -0700 Subject: scsi: qla2xxx: Add debug logging routine for qpair For target main path io routines that uses qpair, create new logging & debugging routines to use qpair instead of reaching for scsi_qla_host to reduce cache thrash. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_dbg.c | 101 ++++++++++++++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_dbg.h | 6 +++ drivers/scsi/qla2xxx/qla_target.c | 28 +++++------ drivers/scsi/qla2xxx/qla_target.h | 1 + 4 files changed, 122 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 11e097e123bd..c0c90dcc7c7b 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -2713,3 +2713,104 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id, buf + cnt, min(16U, size - cnt), false); } } + +/* + * This function is for formatting and logging log messages. + * It is to be used when vha is available. It formats the message + * and logs it to the messages file. All the messages will be logged + * irrespective of value of ql2xextended_error_logging. + * parameters: + * level: The level of the log messages to be printed in the + * messages file. + * vha: Pointer to the scsi_qla_host_t + * id: This is a unique id for the level. It identifies the + * part of the code from where the message originated. + * msg: The message to be displayed. + */ +void +ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char pbuf[128]; + + if (level > ql_errlev) + return; + + if (qpair != NULL) { + const struct pci_dev *pdev = qpair->pdev; + /* : Message */ + snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ", + QL_MSGHDR, dev_name(&(pdev->dev)), id); + } else { + snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", + QL_MSGHDR, "0000:00:00.0", id); + } + pbuf[sizeof(pbuf) - 1] = 0; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + switch (level) { + case ql_log_fatal: /* FATAL LOG */ + pr_crit("%s%pV", pbuf, &vaf); + break; + case ql_log_warn: + pr_err("%s%pV", pbuf, &vaf); + break; + case ql_log_info: + pr_warn("%s%pV", pbuf, &vaf); + break; + default: + pr_info("%s%pV", pbuf, &vaf); + break; + } + + va_end(va); +} + +/* + * This function is for formatting and logging debug information. + * It is to be used when vha is available. It formats the message + * and logs it to the messages file. + * parameters: + * level: The level of the debug messages to be printed. + * If ql2xextended_error_logging value is correctly set, + * this message will appear in the messages file. + * vha: Pointer to the scsi_qla_host_t. + * id: This is a unique identifier for the level. It identifies the + * part of the code from where the message originated. + * msg: The message to be displayed. + */ +void +ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + + if (!ql_mask_match(level)) + return; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (qpair != NULL) { + const struct pci_dev *pdev = qpair->pdev; + /* : Message */ + pr_warn("%s [%s]-%04x: %pV", + QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, + &vaf); + } else { + pr_warn("%s [%s]-%04x: : %pV", + QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf); + } + + va_end(va); + +} diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index c6bffe929fe7..f60138f66dce 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -313,12 +313,18 @@ void __attribute__((format (printf, 4, 5))) ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...); void __attribute__((format (printf, 4, 5))) ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); +void __attribute__((format (printf, 4, 5))) +ql_dbg_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...); + void __attribute__((format (printf, 4, 5))) ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...); void __attribute__((format (printf, 4, 5))) ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); +void __attribute__((format (printf, 4, 5))) +ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...); + /* Debug Levels */ /* The 0x40000000 is the max value any debug level can have * as ql2xextended_error_logging is of type signed int diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index db5a81265d1b..0e8cdaa03ee4 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2263,7 +2263,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) return 0; out_err: - ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d, + ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d, "qla_target(%d): PCI mapping failed: sg_cnt=%d", 0, prm->cmd->sg_cnt); return -1; @@ -2379,7 +2379,6 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, struct ctio7_to_24xx *pkt; struct atio_from_isp *atio = &prm->cmd->atio; uint16_t temp; - struct scsi_qla_host *vha = prm->cmd->vha; pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; prm->pkt = pkt; @@ -2387,7 +2386,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, pkt->entry_type = CTIO_TYPE7; pkt->entry_count = (uint8_t)prm->req_cnt; - pkt->vp_index = vha->vp_idx; + pkt->vp_index = prm->cmd->vp_idx; h = qlt_make_handle(qpair); if (unlikely(h == QLA_TGT_NULL_HANDLE)) { @@ -2583,6 +2582,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, uint32_t *full_req_cnt) { struct se_cmd *se_cmd = &cmd->se_cmd; + struct qla_qpair *qpair = cmd->qpair; prm->cmd = cmd; prm->tgt = cmd->tgt; @@ -2608,7 +2608,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { prm->residual = se_cmd->residual_count; - ql_dbg(ql_dbg_io + ql_dbg_verbose, cmd->vha, 0x305c, + ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c, "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, @@ -2616,7 +2616,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, prm->rq_result |= SS_RESIDUAL_UNDER; } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { prm->residual = se_cmd->residual_count; - ql_dbg(ql_dbg_io, cmd->vha, 0x305d, + ql_dbg_qp(ql_dbg_io, qpair, 0x305d, "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); @@ -2672,7 +2672,7 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, if (qlt_need_explicit_conf(prm->cmd, 1)) { if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { - ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, + ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017, "Skipping EXPLICIT_CONFORM and " "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " "non GOOD status\n"); @@ -2867,9 +2867,9 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) prm->pkt = pkt; memset(pkt, 0, sizeof(*pkt)); - ql_dbg(ql_dbg_tgt, vha, 0xe071, + ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071, "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", - vha->vp_idx, __func__, se_cmd, se_cmd->prot_op, + cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op, prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || @@ -2932,7 +2932,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) /* Update entry type to indicate Command Type CRC_2 IOCB */ pkt->entry_type = CTIO_CRC2; pkt->entry_count = 1; - pkt->vp_index = vha->vp_idx; + pkt->vp_index = cmd->vp_idx; h = qlt_make_handle(qpair); if (unlikely(h == QLA_TGT_NULL_HANDLE)) { @@ -3080,7 +3080,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, return 0; } - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, + ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, @@ -3106,7 +3106,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, */ cmd->state = QLA_TGT_STATE_PROCESSED; qlt_abort_cmd_on_host_reset(cmd->vha, cmd); - ql_dbg(ql_dbg_async, vha, 0xe101, + ql_dbg_qp(ql_dbg_async, qpair, 0xe101, "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, qpair->chip_reset); @@ -3164,7 +3164,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, (struct ctio7_to_24xx *)qlt_get_req_pkt( qpair->req); - ql_dbg(ql_dbg_tgt, vha, 0x305e, + ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e, "Building additional status packet 0x%p.\n", ctio); @@ -3191,7 +3191,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, */ qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, &prm); - pr_debug("Status CTIO7: %p\n", ctio); } } else qlt_24xx_init_ctio_to_isp(pkt, &prm); @@ -3246,7 +3245,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) */ cmd->state = QLA_TGT_STATE_NEED_DATA; qlt_abort_cmd_on_host_reset(cmd->vha, cmd); - ql_dbg(ql_dbg_async, vha, 0xe102, + ql_dbg_qp(ql_dbg_async, qpair, 0xe102, "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, qpair->chip_reset); @@ -4222,6 +4221,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); qlt_assign_qpair(vha, cmd); cmd->reset_count = vha->hw->base_qpair->chip_reset; + cmd->vp_idx = vha->vp_idx; return cmd; } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 30a389445de4..bb8f73a86b2b 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -908,6 +908,7 @@ struct qla_tgt_cmd { u64 unpacked_lun; enum dma_data_direction dma_data_direction; + uint16_t vp_idx; uint16_t loop_id; /* to save extra sess dereferences */ struct qla_tgt *tgt; /* to save extra sess dereferences */ struct scsi_qla_host *vha; -- cgit From 32d29b4ce82785a78d059d89f40fc1cad163e0ad Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:26 -0700 Subject: scsi: qla2xxx: Remove unused tgt_enable_64bit_addr flag By default this flag is forced to true. Remove this flag and unneccessary check for this flag. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_target.c | 44 ++++++++++++--------------------------- drivers/scsi/qla2xxx/qla_target.h | 1 - 2 files changed, 13 insertions(+), 32 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 0e8cdaa03ee4..e8aa58dfa93c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2420,12 +2420,10 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, * ha->hardware_lock supposed to be held on entry. We have already made sure * that there is sufficient amount of request entries to not drop it. */ -static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, - struct scsi_qla_host *vha) +static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm) { int cnt; uint32_t *dword_ptr; - int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; /* Build continuation packets */ while (prm->seg_cnt > 0) { @@ -2445,16 +2443,8 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, cont_pkt64->entry_count = 1; cont_pkt64->sys_define = 0; - if (enable_64bit_addressing) { - cont_pkt64->entry_type = CONTINUE_A64_TYPE; - dword_ptr = - (uint32_t *)&cont_pkt64->dseg_0_address; - } else { - cont_pkt64->entry_type = CONTINUE_TYPE; - dword_ptr = - (uint32_t *)&((cont_entry_t *) - cont_pkt64)->dseg_0_address; - } + cont_pkt64->entry_type = CONTINUE_A64_TYPE; + dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address; /* Load continuation entry data segments */ for (cnt = 0; @@ -2463,12 +2453,8 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, *dword_ptr++ = cpu_to_le32(pci_dma_lo32 (sg_dma_address(prm->sg))); - if (enable_64bit_addressing) { - *dword_ptr++ = - cpu_to_le32(pci_dma_hi32 - (sg_dma_address - (prm->sg))); - } + *dword_ptr++ = cpu_to_le32(pci_dma_hi32 + (sg_dma_address(prm->sg))); *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); prm->sg = sg_next(prm->sg); @@ -2480,12 +2466,10 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, * ha->hardware_lock supposed to be held on entry. We have already made sure * that there is sufficient amount of request entries to not drop it. */ -static void qlt_load_data_segments(struct qla_tgt_prm *prm, - struct scsi_qla_host *vha) +static void qlt_load_data_segments(struct qla_tgt_prm *prm) { int cnt; uint32_t *dword_ptr; - int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); @@ -2512,17 +2496,16 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm, cnt++, prm->seg_cnt--) { *dword_ptr++ = cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); - if (enable_64bit_addressing) { - *dword_ptr++ = - cpu_to_le32(pci_dma_hi32( - sg_dma_address(prm->sg))); - } + + *dword_ptr++ = cpu_to_le32(pci_dma_hi32( + sg_dma_address(prm->sg))); + *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); prm->sg = sg_next(prm->sg); } - qlt_load_cont_data_segments(prm, vha); + qlt_load_cont_data_segments(prm); } static inline int qlt_has_data(struct qla_tgt_cmd *cmd) @@ -3136,7 +3119,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, CTIO7_FLAGS_STATUS_MODE_0); if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) - qlt_load_data_segments(&prm, vha); + qlt_load_data_segments(&prm); if (prm.add_status_pkt == 0) { if (xmit_type & QLA_TGT_XMIT_STATUS) { @@ -3272,7 +3255,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) CTIO7_FLAGS_STATUS_MODE_0); if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) - qlt_load_data_segments(&prm, vha); + qlt_load_data_segments(&prm); cmd->state = QLA_TGT_STATE_NEED_DATA; cmd->cmd_sent_to_fw = 1; @@ -6179,7 +6162,6 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) ql_dbg(ql_dbg_tgt, base_vha, 0xe067, "qla_target(%d): using 64 Bit PCI addressing", base_vha->vp_idx); - tgt->tgt_enable_64bit_addr = 1; /* 3 is reserved */ tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index bb8f73a86b2b..902685f85506 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -809,7 +809,6 @@ struct qla_tgt { int datasegs_per_cmd, datasegs_per_cont, sg_tablesize; /* Target's flags, serialized by pha->hardware_lock */ - unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */ unsigned int link_reinit_iocb_pending:1; /* -- cgit From b5399f7d80a249fafa870d8da9fcd0d077d943b9 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:27 -0700 Subject: scsi: qla2xxx: Remove datasegs_per_cmd and datasegs_per_cont field These fields only hold one set of value. Replace it with macros to reduce cache thrash. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_target.c | 12 +++++------- drivers/scsi/qla2xxx/qla_target.h | 2 +- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index e8aa58dfa93c..84e813002982 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2228,10 +2228,10 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) * If greater than four sg entries then we need to allocate * the continuation entries */ - if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) + if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX) prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - - prm->tgt->datasegs_per_cmd, - prm->tgt->datasegs_per_cont); + QLA_TGT_DATASEGS_PER_CMD_24XX, + QLA_TGT_DATASEGS_PER_CONT_24XX); } else { /* DIF */ if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || @@ -2448,7 +2448,7 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm) /* Load continuation entry data segments */ for (cnt = 0; - cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt; + cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt; cnt++, prm->seg_cnt--) { *dword_ptr++ = cpu_to_le32(pci_dma_lo32 @@ -2492,7 +2492,7 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm) /* Load command entry data segments */ for (cnt = 0; - (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt; + (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt; cnt++, prm->seg_cnt--) { *dword_ptr++ = cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); @@ -6164,8 +6164,6 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) base_vha->vp_idx); /* 3 is reserved */ tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); - tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; - tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; mutex_lock(&qla_tgt_mutex); list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 902685f85506..7fe02d036bdf 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -806,7 +806,7 @@ struct qla_tgt { */ int atio_irq_cmd_count; - int datasegs_per_cmd, datasegs_per_cont, sg_tablesize; + int sg_tablesize; /* Target's flags, serialized by pha->hardware_lock */ unsigned int link_reinit_iocb_pending:1; -- cgit From 60a9eadb19f33a7b3e183207b5b5e1d01d8a6add Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:28 -0700 Subject: scsi: qla2xxx: Move target stat counters from vha to qpair. Move counters to qpair to reduce cache miss. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 27 +++++++++++---------- drivers/scsi/qla2xxx/qla_dfs.c | 48 +++++++++++++++++++++++++++++++------- drivers/scsi/qla2xxx/qla_iocb.c | 2 +- drivers/scsi/qla2xxx/qla_target.c | 13 +++++------ drivers/scsi/qla2xxx/tcm_qla2xxx.c | 8 +++---- 5 files changed, 63 insertions(+), 35 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 17b13dd92511..e1af9db3691d 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3183,6 +3183,18 @@ struct qla_tc_param { #define QLA_MAX_VPORTS_QLA24XX 128 #define QLA_MAX_VPORTS_QLA25XX 256 +struct qla_tgt_counters { + uint64_t qla_core_sbt_cmd; + uint64_t core_qla_que_buf; + uint64_t qla_core_ret_ctio; + uint64_t core_qla_snd_status; + uint64_t qla_core_ret_sta_ctio; + uint64_t core_qla_free_cmd; + uint64_t num_q_full_sent; + uint64_t num_alloc_iocb_failed; + uint64_t num_term_xchg_sent; +}; + struct qla_qpair; /* Response queue data structure */ @@ -3285,6 +3297,7 @@ struct qla_qpair { struct list_head qp_list_elem; /* vha->qp_list */ struct list_head hints_list; uint16_t cpuid; + struct qla_tgt_counters tgt_counters; }; /* Place holder for FW buffer parameters */ @@ -3964,18 +3977,6 @@ struct qla_hw_data { int allow_cna_fw_dump; }; -struct qla_tgt_counters { - uint64_t qla_core_sbt_cmd; - uint64_t core_qla_que_buf; - uint64_t qla_core_ret_ctio; - uint64_t core_qla_snd_status; - uint64_t qla_core_ret_sta_ctio; - uint64_t core_qla_free_cmd; - uint64_t num_q_full_sent; - uint64_t num_alloc_iocb_failed; - uint64_t num_term_xchg_sent; -}; - /* * Qlogic scsi host structure */ @@ -4140,10 +4141,8 @@ typedef struct scsi_qla_host { struct fc_host_statistics fc_host_stat; struct qla_statistics qla_stats; struct bidi_statistics bidi_stats; - atomic_t vref_count; struct qla8044_reset_template reset_tmplt; - struct qla_tgt_counters tgt_counters; uint16_t bbcr; struct name_list_extended gnl; /* Count of active session/fcport */ diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 63d7374dce77..d231e7156134 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -164,26 +164,56 @@ static int qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) { struct scsi_qla_host *vha = s->private; + struct qla_qpair *qpair = vha->hw->base_qpair; + uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio, + core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd, + num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent; + u16 i; + + qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd; + core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf; + qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio; + core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status; + qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio; + core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd; + num_q_full_sent = qpair->tgt_counters.num_q_full_sent; + num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed; + num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent; + + for (i = 0; i < vha->hw->max_qpairs; i++) { + qpair = vha->hw->queue_pair_map[i]; + qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd; + core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf; + qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio; + core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status; + qla_core_ret_sta_ctio += + qpair->tgt_counters.qla_core_ret_sta_ctio; + core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd; + num_q_full_sent += qpair->tgt_counters.num_q_full_sent; + num_alloc_iocb_failed += + qpair->tgt_counters.num_alloc_iocb_failed; + num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent; + } seq_puts(s, "Target Counters\n"); seq_printf(s, "qla_core_sbt_cmd = %lld\n", - vha->tgt_counters.qla_core_sbt_cmd); + qla_core_sbt_cmd); seq_printf(s, "qla_core_ret_sta_ctio = %lld\n", - vha->tgt_counters.qla_core_ret_sta_ctio); + qla_core_ret_sta_ctio); seq_printf(s, "qla_core_ret_ctio = %lld\n", - vha->tgt_counters.qla_core_ret_ctio); + qla_core_ret_ctio); seq_printf(s, "core_qla_que_buf = %lld\n", - vha->tgt_counters.core_qla_que_buf); + core_qla_que_buf); seq_printf(s, "core_qla_snd_status = %lld\n", - vha->tgt_counters.core_qla_snd_status); + core_qla_snd_status); seq_printf(s, "core_qla_free_cmd = %lld\n", - vha->tgt_counters.core_qla_free_cmd); + core_qla_free_cmd); seq_printf(s, "num alloc iocb failed = %lld\n", - vha->tgt_counters.num_alloc_iocb_failed); + num_alloc_iocb_failed); seq_printf(s, "num term exchange sent = %lld\n", - vha->tgt_counters.num_term_xchg_sent); + num_term_xchg_sent); seq_printf(s, "num Q full sent = %lld\n", - vha->tgt_counters.num_q_full_sent); + num_q_full_sent); /* DIF stats */ seq_printf(s, "DIF Inp Bytes = %lld\n", diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 6c710313adce..ac49febbac76 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2189,7 +2189,7 @@ skip_cmd_array: } queuing_error: - vha->tgt_counters.num_alloc_iocb_failed++; + qpair->tgt_counters.num_alloc_iocb_failed++; return pkt; } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 84e813002982..12e76cc37fb0 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -3078,9 +3078,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, spin_lock_irqsave(qpair->qp_lock_ptr, flags); if (xmit_type == QLA_TGT_XMIT_STATUS) - vha->tgt_counters.core_qla_snd_status++; + qpair->tgt_counters.core_qla_snd_status++; else - vha->tgt_counters.core_qla_que_buf++; + qpair->tgt_counters.core_qla_que_buf++; if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) { /* @@ -3500,7 +3500,7 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair, ret = 1; } - vha->tgt_counters.num_term_xchg_sent++; + qpair->tgt_counters.num_term_xchg_sent++; pkt->entry_count = 1; pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; @@ -5103,7 +5103,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair, return -ENOMEM; } - vha->tgt_counters.num_q_full_sent++; + qpair->tgt_counters.num_q_full_sent++; pkt->entry_count = 1; pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; @@ -5466,13 +5466,12 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, static void qlt_response_pkt(struct scsi_qla_host *vha, struct rsp_que *rsp, response_t *pkt) { - struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; if (unlikely(tgt == NULL)) { ql_dbg(ql_dbg_tgt, vha, 0xe05d, - "qla_target(%d): Response pkt %x received, but no " - "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); + "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", + vha->vp_idx, pkt->entry_type, vha->hw); return; } diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 00ae1f4090e2..c4b414833b86 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -283,7 +283,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work) WARN_ON(cmd->trc_flags & TRC_CMD_FREE); - cmd->vha->tgt_counters.qla_core_ret_sta_ctio++; + cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++; cmd->trc_flags |= TRC_CMD_FREE; transport_generic_free_cmd(&cmd->se_cmd, 0); } @@ -295,7 +295,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work) */ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) { - cmd->vha->tgt_counters.core_qla_free_cmd++; + cmd->qpair->tgt_counters.core_qla_free_cmd++; cmd->cmd_in_wq = 1; WARN_ON(cmd->trc_flags & TRC_CMD_DONE); @@ -491,7 +491,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, } #endif - cmd->vha->tgt_counters.qla_core_sbt_cmd++; + cmd->qpair->tgt_counters.qla_core_sbt_cmd++; return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], cmd->unpacked_lun, data_length, fcp_task_attr, data_dir, flags); @@ -519,7 +519,7 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) } spin_unlock_irqrestore(&cmd->cmd_lock, flags); - cmd->vha->tgt_counters.qla_core_ret_ctio++; + cmd->qpair->tgt_counters.qla_core_ret_ctio++; if (!cmd->write_data_transferred) { /* * Check if se_cmd has already been aborted via LUN_RESET, and -- cgit From b945e777f95aa00c000fd2376bec85618897dd04 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 13 Jun 2017 20:47:29 -0700 Subject: scsi: qla2xxx: Include Exchange offload/Extended Login into FW dump Add missing memory dump of Exchange Offload and Extended login into FW dump. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_dbg.c | 49 +++++++++++++++++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_dbg.h | 11 +++++++++ drivers/scsi/qla2xxx/qla_init.c | 7 ++++++ drivers/scsi/qla2xxx/qla_os.c | 3 --- 4 files changed, 67 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index c0c90dcc7c7b..f91ee717202d 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -497,6 +497,50 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) return (char *)iter_reg + ntohl(fcec->size); } +static inline void * +qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) +{ + struct qla2xxx_offld_chain *c = ptr; + + if (!ha->exlogin_buf) + return ptr; + + *last_chain = &c->type; + + c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN); + c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + + ha->exlogin_size); + c->size = cpu_to_be32(ha->exlogin_size); + c->addr = cpu_to_be64(ha->exlogin_buf_dma); + + ptr += sizeof(struct qla2xxx_offld_chain); + memcpy(ptr, ha->exlogin_buf, ha->exlogin_size); + + return (char *)ptr + cpu_to_be32(c->size); +} + +static inline void * +qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) +{ + struct qla2xxx_offld_chain *c = ptr; + + if (!ha->exchoffld_buf) + return ptr; + + *last_chain = &c->type; + + c->type = cpu_to_be32(DUMP_CHAIN_EXCHG); + c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + + ha->exchoffld_size); + c->size = cpu_to_be32(ha->exchoffld_size); + c->addr = cpu_to_be64(ha->exchoffld_buf_dma); + + ptr += sizeof(struct qla2xxx_offld_chain); + memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size); + + return (char *)ptr + cpu_to_be32(c->size); +} + static inline void * qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) @@ -1606,6 +1650,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); + nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); *last_chain |= htonl(DUMP_CHAIN_LAST); @@ -1932,6 +1977,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); + nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); + nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); *last_chain |= htonl(DUMP_CHAIN_LAST); @@ -2443,6 +2490,8 @@ copy_queue: nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); + nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); + nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); *last_chain |= htonl(DUMP_CHAIN_LAST); diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index f60138f66dce..8877aa97d829 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -232,6 +232,15 @@ struct qla2xxx_fce_chain { uint32_t eregs[8]; }; +/* used by exchange off load and extended login offload */ +struct qla2xxx_offld_chain { + uint32_t type; + uint32_t chain_size; + + uint32_t size; + u64 addr; +}; + struct qla2xxx_mq_chain { uint32_t type; uint32_t chain_size; @@ -258,6 +267,8 @@ struct qla2xxx_mqueue_chain { #define DUMP_CHAIN_FCE 0x7FFFFAF0 #define DUMP_CHAIN_MQ 0x7FFFFAF1 #define DUMP_CHAIN_QUEUE 0x7FFFFAF2 +#define DUMP_CHAIN_EXLOGIN 0x7FFFFAF3 +#define DUMP_CHAIN_EXCHG 0x7FFFFAF4 #define DUMP_CHAIN_LAST 0x80000000 struct qla2xxx_fw_dump { diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 4a659460d6e6..1d815ab779ea 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2538,6 +2538,13 @@ cont_alloc: ha->chain_offset = dump_size; dump_size += mq_size + fce_size; + if (ha->exchoffld_buf) + dump_size += sizeof(struct qla2xxx_offld_chain) + + ha->exchoffld_size; + if (ha->exlogin_buf) + dump_size += sizeof(struct qla2xxx_offld_chain) + + ha->exlogin_size; + allocate: ha->fw_dump = vmalloc(dump_size); if (!ha->fw_dump) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 88e115fcea60..f7c4f723c405 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -4176,9 +4176,6 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) if (!IS_EXCHG_OFFLD_CAPABLE(ha)) return QLA_SUCCESS; - ql_log(ql_log_info, vha, 0xd014, - "Exchange offload count: %d.\n", ql2xexlogins); - max_cnt = 0; rval = qla_get_exchoffld_status(vha, &size, &max_cnt); if (rval != QLA_SUCCESS) { -- cgit From 34e0badda448f798e932db76723334ffbf8bc06b Mon Sep 17 00:00:00 2001 From: "himanshu.madhani@cavium.com" Date: Tue, 13 Jun 2017 20:47:30 -0700 Subject: scsi: qla2xxx: Update driver version to 9.01.00.00-k Signed-off-by: Himanshu Madhani Signed-off-by: Giridhar Malavali Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_version.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 45bc84e8e3bf..dcbb9bb05e99 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "9.00.00.00-k" +#define QLA2XXX_VERSION "9.01.00.00-k" #define QLA_DRIVER_MAJOR_VER 9 -#define QLA_DRIVER_MINOR_VER 0 +#define QLA_DRIVER_MINOR_VER 1 #define QLA_DRIVER_PATCH_VER 0 #define QLA_DRIVER_BETA_VER 0 -- cgit From a5d42f4cffa58d0e80d92dd11c810a22f14d41b3 Mon Sep 17 00:00:00 2001 From: Duane Grigsby Date: Wed, 21 Jun 2017 13:48:41 -0700 Subject: scsi: qla2xxx: Add FC-NVMe port discovery and PRLI handling Added logic to change the login process into an optional PRIL step for FC-NVMe ports as a separate operation, such that we can change type to 0x28 (NVMe). Currently, the driver performs the PLOGI/PRLI together as one operation, but if the discovered port is an NVMe port then we first issue the PLOGI and then we issue the PRLI. Also, the fabric discovery logic was changed to mark each discovered FC NVMe port, so that we can register them with the FC-NVMe transport later. Signed-off-by: Darren Trapp Signed-off-by: Duane Grigsby Signed-off-by: Anil Gurumurthy Signed-off-by: Giridhar Malavali Signed-off-by: Himanshu Madhani Reviewed-by: Johannes Thumshirn Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_dbg.c | 9 +- drivers/scsi/qla2xxx/qla_def.h | 30 ++++++- drivers/scsi/qla2xxx/qla_fw.h | 13 ++- drivers/scsi/qla2xxx/qla_gbl.h | 1 + drivers/scsi/qla2xxx/qla_init.c | 168 ++++++++++++++++++++++++++++++++++++-- drivers/scsi/qla2xxx/qla_iocb.c | 21 +++++ drivers/scsi/qla2xxx/qla_mbx.c | 33 +++++--- drivers/scsi/qla2xxx/qla_os.c | 4 + drivers/scsi/qla2xxx/qla_target.c | 4 +- 9 files changed, 256 insertions(+), 27 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index f91ee717202d..c8057c798125 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -15,9 +15,10 @@ * | | | 0x015b-0x0160 | * | | | 0x016e | * | Mailbox commands | 0x1199 | 0x1193 | - * | Device Discovery | 0x2004 | 0x2016 | - * | | | 0x2011-0x2012, | - * | | | 0x2099-0x20a4 | + * | Device Discovery | 0x2131 | 0x210e-0x2116 | + * | | | 0x211a | + * | | | 0x211c-0x2128 | + * | | | 0x212a-0x2130 | * | Queue Command and IO tracing | 0x3074 | 0x300b | * | | | 0x3027-0x3028 | * | | | 0x303d-0x3041 | @@ -59,7 +60,7 @@ * | | | 0xb13c-0xb140 | * | | | 0xb149 | * | MultiQ | 0xc010 | | - * | Misc | 0xd301 | 0xd031-0xd0ff | + * | Misc | 0xd302 | 0xd031-0xd0ff | * | | | 0xd101-0xd1fe | * | | | 0xd214-0xd2fe | * | Target Mode | 0xe081 | | diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index e1af9db3691d..6f8df9cea8ff 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -343,6 +343,7 @@ struct srb_iocb { #define SRB_LOGIN_RETRIED BIT_0 #define SRB_LOGIN_COND_PLOGI BIT_1 #define SRB_LOGIN_SKIP_PRLI BIT_2 +#define SRB_LOGIN_NVME_PRLI BIT_3 uint16_t data[2]; u32 iop[2]; } logio; @@ -436,6 +437,7 @@ struct srb_iocb { #define SRB_NACK_PLOGI 16 #define SRB_NACK_PRLI 17 #define SRB_NACK_LOGO 18 +#define SRB_PRLI_CMD 21 enum { TYPE_SRB, @@ -1088,6 +1090,7 @@ struct mbx_cmd_32 { #define MBX_1 BIT_1 #define MBX_0 BIT_0 +#define RNID_TYPE_PORT_LOGIN 0x7 #define RNID_TYPE_SET_VERSION 0x9 #define RNID_TYPE_ASIC_TEMP 0xC @@ -2152,6 +2155,7 @@ typedef struct { uint8_t fabric_port_name[WWN_SIZE]; uint16_t fp_speed; uint8_t fc4_type; + uint8_t fc4f_nvme; /* nvme fc4 feature bits */ } sw_info_t; /* FCP-4 types */ @@ -2180,7 +2184,8 @@ typedef enum { FCT_SWITCH, FCT_BROADCAST, FCT_INITIATOR, - FCT_TARGET + FCT_TARGET, + FCT_NVME } fc_port_type_t; enum qla_sess_deletion { @@ -2237,10 +2242,12 @@ enum fcport_mgt_event { FCME_RSCN, FCME_GIDPN_DONE, FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */ + FCME_PRLI_DONE, FCME_GNL_DONE, FCME_GPSC_DONE, FCME_GPDB_DONE, FCME_GPNID_DONE, + FCME_GFFID_DONE, FCME_DELETE_DONE, }; @@ -2274,6 +2281,16 @@ typedef struct fc_port { unsigned int login_pause:1; unsigned int login_succ:1; + struct work_struct nvme_del_work; + atomic_t nvme_ref_count; + uint32_t nvme_prli_service_param; +#define NVME_PRLI_SP_CONF BIT_7 +#define NVME_PRLI_SP_INITIATOR BIT_5 +#define NVME_PRLI_SP_TARGET BIT_4 +#define NVME_PRLI_SP_DISCOVERY BIT_3 + uint8_t nvme_flag; +#define NVME_FLAG_REGISTERED 4 + struct fc_port *conflict; unsigned char logout_completed; int generation; @@ -2306,6 +2323,7 @@ typedef struct fc_port { u32 supported_classes; uint8_t fc4_type; + uint8_t fc4f_nvme; uint8_t scan_state; unsigned long last_queue_full; @@ -2313,6 +2331,8 @@ typedef struct fc_port { uint16_t port_id; + struct nvme_fc_remote_port *nvme_remote_port; + unsigned long retry_delay_timestamp; struct qla_tgt_sess *tgt_session; struct ct_sns_desc ct_desc; @@ -2745,7 +2765,7 @@ struct ct_sns_req { struct { uint8_t reserved; - uint8_t port_name[3]; + uint8_t port_id[3]; } gff_id; struct { @@ -3052,6 +3072,7 @@ enum qla_work_type { QLA_EVT_GPNID_DONE, QLA_EVT_NEW_SESS, QLA_EVT_GPDB, + QLA_EVT_PRLI, QLA_EVT_GPSC, QLA_EVT_UPD_FCPORT, QLA_EVT_GNL, @@ -4007,6 +4028,7 @@ typedef struct scsi_qla_host { uint32_t qpairs_available:1; uint32_t qpairs_req_created:1; uint32_t qpairs_rsp_created:1; + uint32_t nvme_enabled:1; } flags; atomic_t loop_state; @@ -4085,6 +4107,10 @@ typedef struct scsi_qla_host { uint8_t port_name[WWN_SIZE]; uint8_t fabric_node_name[WWN_SIZE]; + struct nvme_fc_local_port *nvme_local_port; + atomic_t nvme_ref_count; + struct list_head nvme_rport_list; + uint16_t fcoe_vlan_id; uint16_t fcoe_fcf_idx; uint8_t fcoe_vn_port_mac[6]; diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 1f808928763b..dcae62d4cbeb 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -37,6 +37,12 @@ struct port_database_24xx { #define PDF_CLASS_2 BIT_4 #define PDF_HARD_ADDR BIT_1 + /* + * for NVMe, the login_state field has been + * split into nibbles. + * The lower nibble is for FCP. + * The upper nibble is for NVMe. + */ uint8_t current_login_state; uint8_t last_login_state; #define PDS_PLOGI_PENDING 0x03 @@ -69,7 +75,11 @@ struct port_database_24xx { uint8_t port_name[WWN_SIZE]; uint8_t node_name[WWN_SIZE]; - uint8_t reserved_3[24]; + uint8_t reserved_3[4]; + uint16_t prli_nvme_svc_param_word_0; /* Bits 15-0 of word 0 */ + uint16_t prli_nvme_svc_param_word_3; /* Bits 15-0 of word 3 */ + uint16_t nvme_first_burst_size; + uint8_t reserved_4[14]; }; /* @@ -819,6 +829,7 @@ struct logio_entry_24xx { #define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */ #define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */ #define LCF_EXPL_LOGO BIT_6 /* Perform an explicit LOGO. */ +#define LCF_NVME_PRLI BIT_6 /* Perform NVME FC4 PRLI */ #define LCF_SKIP_PRLI BIT_5 /* Skip PRLI after PLOGI. */ #define LCF_IMPL_LOGO_ALL BIT_5 /* Implicit LOGO to all ports. */ #define LCF_COND_PLOGI BIT_4 /* PLOGI only if not logged-in. */ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index beebf96c23d4..6fbee11c1a18 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -99,6 +99,7 @@ extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *, extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *); void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *); int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8); +int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, struct imm_ntfy_from_isp *, int); int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 1d815ab779ea..ed898b7a32c8 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -37,8 +37,11 @@ static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); static int qla84xx_init_chip(scsi_qla_host_t *); static int qla25xx_init_queues(struct qla_hw_data *); static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); +static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *); static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *, struct event_arg *); +static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, + struct event_arg *); /* SRB Extensions ---------------------------------------------------------- */ @@ -191,6 +194,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, lio->timeout = qla2x00_async_iocb_timeout; sp->done = qla2x00_async_login_sp_done; lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; + + if (fcport->fc4f_nvme) + lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; + if (data[1] & QLA_LOGIO_LOGIN_RETRIED) lio->u.logio.flags |= SRB_LOGIN_RETRIED; rval = qla2x00_start_sp(sp); @@ -327,7 +334,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, u16 i, n, found = 0, loop_id; port_id_t id; u64 wwn; - u8 opt = 0; + u8 opt = 0, current_login_state; fcport = ea->fcport; @@ -414,7 +421,12 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, fcport->login_pause = 1; } - switch (e->current_login_state) { + if (fcport->fc4f_nvme) + current_login_state = e->current_login_state >> 4; + else + current_login_state = e->current_login_state & 0xf; + + switch (current_login_state) { case DSC_LS_PRLI_COMP: ql_dbg(ql_dbg_disc, vha, 0x20e4, "%s %d %8phC post gpdb\n", @@ -422,7 +434,6 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, opt = PDO_FORCE_ADISC; qla24xx_post_gpdb_work(vha, fcport, opt); break; - case DSC_LS_PORT_UNAVAIL: default: if (fcport->loop_id == FC_NO_LOOP_ID) { @@ -665,6 +676,104 @@ gpd_error_out: sp->free(sp); } +static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_PRLI); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + + return qla2x00_post_work(vha, e); +} + +static void +qla2x00_async_prli_sp_done(void *ptr, int res) +{ + srb_t *sp = ptr; + struct scsi_qla_host *vha = sp->vha; + struct srb_iocb *lio = &sp->u.iocb_cmd; + struct event_arg ea; + + ql_dbg(ql_dbg_disc, vha, 0x2129, + "%s %8phC res %d \n", __func__, + sp->fcport->port_name, res); + + sp->fcport->flags &= ~FCF_ASYNC_SENT; + + if (!test_bit(UNLOADING, &vha->dpc_flags)) { + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_PRLI_DONE; + ea.fcport = sp->fcport; + ea.data[0] = lio->u.logio.data[0]; + ea.data[1] = lio->u.logio.data[1]; + ea.iop[0] = lio->u.logio.iop[0]; + ea.iop[1] = lio->u.logio.iop[1]; + ea.sp = sp; + + qla2x00_fcport_event_handler(vha, &ea); + } + + sp->free(sp); +} + +int +qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + srb_t *sp; + struct srb_iocb *lio; + int rval = QLA_FUNCTION_FAILED; + + if (!vha->flags.online) + return rval; + + if (fcport->fw_login_state == DSC_LS_PLOGI_PEND || + fcport->fw_login_state == DSC_LS_PLOGI_COMP || + fcport->fw_login_state == DSC_LS_PRLI_PEND) + return rval; + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + return rval; + + fcport->flags |= FCF_ASYNC_SENT; + fcport->logout_completed = 0; + + sp->type = SRB_PRLI_CMD; + sp->name = "prli"; + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + lio = &sp->u.iocb_cmd; + lio->timeout = qla2x00_async_iocb_timeout; + sp->done = qla2x00_async_prli_sp_done; + lio->u.logio.flags = 0; + + if (fcport->fc4f_nvme) + lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + fcport->flags &= ~FCF_ASYNC_SENT; + fcport->flags |= FCF_LOGIN_NEEDED; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + goto done_free_sp; + } + + ql_dbg(ql_dbg_disc, vha, 0x211b, + "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n", + fcport->port_name, sp->handle, fcport->loop_id, + fcport->d_id.b24, fcport->login_retry); + + return rval; + +done_free_sp: + sp->free(sp); + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) { @@ -1126,6 +1235,9 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */ qla24xx_handle_plogi_done_event(vha, ea); break; + case FCME_PRLI_DONE: + qla24xx_handle_prli_done_event(vha, ea); + break; case FCME_GPDB_DONE: qla24xx_handle_gpdb_event(vha, ea); break; @@ -1307,6 +1419,27 @@ qla24xx_async_abort_command(srb_t *sp) return qla24xx_async_abort_cmd(sp); } +static void +qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) +{ + switch (ea->data[0]) { + case MBS_COMMAND_COMPLETE: + ql_dbg(ql_dbg_disc, vha, 0x2118, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, ea->fcport->port_name); + + ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; + ea->fcport->logout_on_delete = 1; + qla24xx_post_gpdb_work(vha, ea->fcport, 0); + break; + default: + ql_dbg(ql_dbg_disc, vha, 0x2119, + "%s %d %8phC unhandle event of %x\n", + __func__, __LINE__, ea->fcport->port_name, ea->data[0]); + break; + } +} + static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { @@ -1319,12 +1452,19 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) * force a relogin attempt via implicit LOGO, PLOGI, and PRLI * requests. */ - ql_dbg(ql_dbg_disc, vha, 0x20ea, - "%s %d %8phC post gpdb\n", - __func__, __LINE__, ea->fcport->port_name); - ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; - ea->fcport->logout_on_delete = 1; - qla24xx_post_gpdb_work(vha, ea->fcport, 0); + if (ea->fcport->fc4f_nvme) { + ql_dbg(ql_dbg_disc, vha, 0x2117, + "%s %d %8phC post prli\n", + __func__, __LINE__, ea->fcport->port_name); + qla24xx_post_prli_work(vha, ea->fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ea, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, ea->fcport->port_name); + ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; + ea->fcport->logout_on_delete = 1; + qla24xx_post_gpdb_work(vha, ea->fcport, 0); + } break; case MBS_COMMAND_ERROR: ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", @@ -4646,6 +4786,16 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) new_fcport->fp_speed = swl[swl_idx].fp_speed; new_fcport->fc4_type = swl[swl_idx].fc4_type; + new_fcport->nvme_flag = 0; + if (vha->flags.nvme_enabled && + swl[swl_idx].fc4f_nvme) { + new_fcport->fc4f_nvme = + swl[swl_idx].fc4f_nvme; + ql_log(ql_log_info, vha, 0x2131, + "FOUND: NVME port %8phC as FC Type 28h\n", + new_fcport->port_name); + } + if (swl[swl_idx].d_id.b.rsvd_1 != 0) { last_dev = 1; } diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index ac49febbac76..daa53235a28a 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2210,6 +2210,23 @@ qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp) return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp); } +static void +qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio) +{ + struct srb_iocb *lio = &sp->u.iocb_cmd; + + logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; + logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); + if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) + logio->control_flags |= LCF_NVME_PRLI; + + logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); + logio->port_id[0] = sp->fcport->d_id.b.al_pa; + logio->port_id[1] = sp->fcport->d_id.b.area; + logio->port_id[2] = sp->fcport->d_id.b.domain; + logio->vp_index = sp->vha->vp_idx; +} + static void qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) { @@ -2217,6 +2234,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); + if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) @@ -3162,6 +3180,9 @@ qla2x00_start_sp(srb_t *sp) qla24xx_login_iocb(sp, pkt) : qla2x00_login_iocb(sp, pkt); break; + case SRB_PRLI_CMD: + qla24xx_prli_iocb(sp, pkt); + break; case SRB_LOGOUT_CMD: IS_FWI2_CAPABLE(ha) ? qla24xx_logout_iocb(sp, pkt) : diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index f02a2baffb5b..1eac67e8fdfd 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -5968,14 +5968,22 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, { int rval = QLA_SUCCESS; uint64_t zero = 0; + u8 current_login_state, last_login_state; + + if (fcport->fc4f_nvme) { + current_login_state = pd->current_login_state >> 4; + last_login_state = pd->last_login_state >> 4; + } else { + current_login_state = pd->current_login_state & 0xf; + last_login_state = pd->last_login_state & 0xf; + } /* Check for logged in state. */ - if (pd->current_login_state != PDS_PRLI_COMPLETE && - pd->last_login_state != PDS_PRLI_COMPLETE) { + if (current_login_state != PDS_PRLI_COMPLETE && + last_login_state != PDS_PRLI_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x119a, "Unable to verify login-state (%x/%x) for loop_id %x.\n", - pd->current_login_state, pd->last_login_state, - fcport->loop_id); + current_login_state, last_login_state, fcport->loop_id); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } @@ -5998,12 +6006,17 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, fcport->d_id.b.al_pa = pd->port_id[2]; fcport->d_id.b.rsvd_1 = 0; - /* If not target must be initiator or unknown type. */ - if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) - fcport->port_type = FCT_INITIATOR; - else - fcport->port_type = FCT_TARGET; - + if (fcport->fc4f_nvme) { + fcport->nvme_prli_service_param = + pd->prli_nvme_svc_param_word_3; + fcport->port_type = FCT_NVME; + } else { + /* If not target must be initiator or unknown type. */ + if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) + fcport->port_type = FCT_INITIATOR; + else + fcport->port_type = FCT_TARGET; + } /* Passback COS information. */ fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? FC_COS_CLASS2 : FC_COS_CLASS3; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index f7c4f723c405..aee884d63e55 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -4428,6 +4428,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->plogi_ack_list); INIT_LIST_HEAD(&vha->qp_list); INIT_LIST_HEAD(&vha->gnl.fcports); + INIT_LIST_HEAD(&vha->nvme_rport_list); spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); @@ -4717,6 +4718,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) qla24xx_async_gpdb(vha, e->u.fcport.fcport, e->u.fcport.opt); break; + case QLA_EVT_PRLI: + qla24xx_async_prli(vha, e->u.fcport.fcport); + break; case QLA_EVT_GPSC: qla24xx_async_gpsc(vha, e->u.fcport.fcport); break; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 12e76cc37fb0..2a0173e5d10e 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -963,7 +963,6 @@ static void qlt_free_session_done(struct work_struct *work) sess->logout_on_delete, sess->keep_nport_handle, sess->send_els_logo); - if (!IS_SW_RESV_ADDR(sess->d_id)) { if (sess->send_els_logo) { qlt_port_logo_t logo; @@ -1118,6 +1117,9 @@ void qlt_unreg_sess(struct fc_port *sess) sess->last_rscn_gen = sess->rscn_gen; sess->last_login_gen = sess->login_gen; + if (sess->nvme_flag & NVME_FLAG_REGISTERED) + schedule_work(&sess->nvme_del_work); + INIT_WORK(&sess->free_work, qlt_free_session_done); schedule_work(&sess->free_work); } -- cgit From 7401bc18d1ee39e46b668341097e0183975c02eb Mon Sep 17 00:00:00 2001 From: Duane Grigsby Date: Wed, 21 Jun 2017 13:48:42 -0700 Subject: scsi: qla2xxx: Add FC-NVMe command handling This patch adds logic to handle the completion of FC-NVMe commands and creates a sub-command in the SRB command structure to manage NVMe commands. Signed-off-by: Darren Trapp Signed-off-by: Duane Grigsby Signed-off-by: Anil Gurumurthy Signed-off-by: Giridhar Malavali Signed-off-by: Himanshu Madhani Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 17 +++++++++ drivers/scsi/qla2xxx/qla_fw.h | 22 ++++++++++-- drivers/scsi/qla2xxx/qla_isr.c | 79 ++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_os.c | 18 ++++++++-- 4 files changed, 131 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 6f8df9cea8ff..4d889eb2993e 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -412,6 +412,20 @@ struct srb_iocb { struct { struct imm_ntfy_from_isp *ntfy; } nack; + struct { + __le16 comp_status; + uint16_t rsp_pyld_len; + uint8_t aen_op; + void *desc; + + /* These are only used with ls4 requests */ + int cmd_len; + int rsp_len; + dma_addr_t cmd_dma; + dma_addr_t rsp_dma; + uint32_t dl; + uint32_t timeout_sec; + } nvme; } u; struct timer_list timer; @@ -437,6 +451,7 @@ struct srb_iocb { #define SRB_NACK_PLOGI 16 #define SRB_NACK_PRLI 17 #define SRB_NACK_LOGO 18 +#define SRB_NVME_CMD 19 #define SRB_PRLI_CMD 21 enum { @@ -4110,6 +4125,8 @@ typedef struct scsi_qla_host { struct nvme_fc_local_port *nvme_local_port; atomic_t nvme_ref_count; struct list_head nvme_rport_list; + atomic_t nvme_active_aen_cnt; + uint16_t nvme_last_rptd_aen; uint16_t fcoe_vlan_id; uint16_t fcoe_fcf_idx; diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index dcae62d4cbeb..b9c9886e8b1d 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -7,6 +7,9 @@ #ifndef __QLA_FW_H #define __QLA_FW_H +#include +#include + #define MBS_CHECKSUM_ERROR 0x4010 #define MBS_INVALID_PRODUCT_KEY 0x4020 @@ -603,9 +606,14 @@ struct sts_entry_24xx { uint32_t residual_len; /* FW calc residual transfer length. */ - uint16_t reserved_1; + union { + uint16_t reserved_1; + uint16_t nvme_rsp_pyld_len; + }; + uint16_t state_flags; /* State flags. */ #define SF_TRANSFERRED_DATA BIT_11 +#define SF_NVME_ERSP BIT_6 #define SF_FCP_RSP_DMA BIT_0 uint16_t retry_delay; @@ -615,8 +623,16 @@ struct sts_entry_24xx { uint32_t rsp_residual_count; /* FCP RSP residual count. */ uint32_t sense_len; /* FCP SENSE length. */ - uint32_t rsp_data_len; /* FCP response data length. */ - uint8_t data[28]; /* FCP response/sense information. */ + + union { + struct { + uint32_t rsp_data_len; /* FCP response data length */ + uint8_t data[28]; /* FCP rsp/sense information */ + }; + struct nvme_fc_ersp_iu nvme_ersp; + uint8_t nvme_ersp_data[32]; + }; + /* * If DIF Error is set in comp_status, these additional fields are * defined: diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 40385bc1d1fa..477aea7c9a88 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1799,6 +1799,79 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) sp->done(sp, 0); } +static void +qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) +{ + const char func[] = "NVME-IOCB"; + fc_port_t *fcport; + srb_t *sp; + struct srb_iocb *iocb; + struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; + uint16_t state_flags; + struct nvmefc_fcp_req *fd; + uint16_t ret = 0; + struct srb_iocb *nvme; + + sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); + if (!sp) + return; + + iocb = &sp->u.iocb_cmd; + fcport = sp->fcport; + iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status); + state_flags = le16_to_cpu(sts->state_flags); + fd = iocb->u.nvme.desc; + nvme = &sp->u.iocb_cmd; + + if (unlikely(nvme->u.nvme.aen_op)) + atomic_dec(&sp->vha->nvme_active_aen_cnt); + + /* + * State flags: Bit 6 and 0. + * If 0 is set, we don't care about 6. + * both cases resp was dma'd to host buffer + * if both are 0, that is good path case. + * if six is set and 0 is clear, we need to + * copy resp data from status iocb to resp buffer. + */ + if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { + iocb->u.nvme.rsp_pyld_len = 0; + } else if ((state_flags & SF_FCP_RSP_DMA)) { + iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); + } else if (state_flags & SF_NVME_ERSP) { + uint32_t *inbuf, *outbuf; + uint16_t iter; + + inbuf = (uint32_t *)&sts->nvme_ersp_data; + outbuf = (uint32_t *)fd->rspaddr; + iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); + iter = iocb->u.nvme.rsp_pyld_len >> 2; + for (; iter; iter--) + *outbuf++ = swab32(*inbuf++); + } else { /* unhandled case */ + ql_log(ql_log_warn, fcport->vha, 0x503a, + "NVME-%s error. Unhandled state_flags of %x\n", + sp->name, state_flags); + } + + fd->transferred_length = fd->payload_length - + le32_to_cpu(sts->residual_len); + + if (sts->entry_status) { + ql_log(ql_log_warn, fcport->vha, 0x5038, + "NVME-%s error - hdl=%x entry-status(%x).\n", + sp->name, sp->handle, sts->entry_status); + ret = QLA_FUNCTION_FAILED; + } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { + ql_log(ql_log_warn, fcport->vha, 0x5039, + "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n", + sp->name, sp->handle, sts->comp_status, + le32_to_cpu(sts->residual_len), sts->ox_id); + ret = QLA_FUNCTION_FAILED; + } + sp->done(sp, ret); +} + /** * qla2x00_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context @@ -2289,6 +2362,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) return; } + /* NVME completion. */ + if (sp->type == SRB_NVME_CMD) { + qla24xx_nvme_iocb_entry(vha, req, pkt); + return; + } + if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); return; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index aee884d63e55..5ad6328e5c33 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -695,8 +695,10 @@ qla2x00_sp_free_dma(void *ptr) } end: - CMD_SP(cmd) = NULL; - qla2x00_rel_sp(sp); + if (sp->type != SRB_NVME_CMD) { + CMD_SP(cmd) = NULL; + qla2x00_rel_sp(sp); + } } void @@ -5993,6 +5995,18 @@ qla2x00_timer(scsi_qla_host_t *vha) if (!list_empty(&vha->work_list)) start_dpc++; + /* + * FC-NVME + * see if the active AEN count has changed from what was last reported. + */ + if (atomic_read(&vha->nvme_active_aen_cnt) != vha->nvme_last_rptd_aen) { + vha->nvme_last_rptd_aen = + atomic_read(&vha->nvme_active_aen_cnt); + ql_log(ql_log_info, vha, 0x3002, + "reporting new aen count of %d to the fw\n", + vha->nvme_last_rptd_aen); + } + /* Schedule the DPC routine if needed */ if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || -- cgit From e84067d7430107a982858f11c5239542b56a8449 Mon Sep 17 00:00:00 2001 From: Duane Grigsby Date: Wed, 21 Jun 2017 13:48:43 -0700 Subject: scsi: qla2xxx: Add FC-NVMe F/W initialization and transport registration This code provides the interfaces to register remote and local ports of FC4 type 0x28 with the FC-NVMe transport and transports the requests (FC-NVMe FC link services and FC-NVMe commands IUs) to the fabric. It also provides the support for allocating h/w queues and aborting FC-NVMe FC requests. Signed-off-by: Darren Trapp Signed-off-by: Duane Grigsby Signed-off-by: Anil Gurumurthy Signed-off-by: Giridhar Malavali Signed-off-by: Himanshu Madhani Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/Makefile | 2 +- drivers/scsi/qla2xxx/qla_dbg.c | 2 +- drivers/scsi/qla2xxx/qla_def.h | 6 + drivers/scsi/qla2xxx/qla_gbl.h | 11 + drivers/scsi/qla2xxx/qla_init.c | 8 + drivers/scsi/qla2xxx/qla_iocb.c | 36 ++ drivers/scsi/qla2xxx/qla_isr.c | 19 + drivers/scsi/qla2xxx/qla_mbx.c | 21 ++ drivers/scsi/qla2xxx/qla_nvme.c | 756 ++++++++++++++++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_nvme.h | 132 +++++++ drivers/scsi/qla2xxx/qla_os.c | 40 ++- 11 files changed, 1024 insertions(+), 9 deletions(-) create mode 100644 drivers/scsi/qla2xxx/qla_nvme.c create mode 100644 drivers/scsi/qla2xxx/qla_nvme.h diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile index 44def6bb4bb0..0b767a0bb308 100644 --- a/drivers/scsi/qla2xxx/Makefile +++ b/drivers/scsi/qla2xxx/Makefile @@ -1,6 +1,6 @@ qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ - qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o + qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index c8057c798125..26751d34bcf2 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -15,7 +15,7 @@ * | | | 0x015b-0x0160 | * | | | 0x016e | * | Mailbox commands | 0x1199 | 0x1193 | - * | Device Discovery | 0x2131 | 0x210e-0x2116 | + * | Device Discovery | 0x2134 | 0x210e-0x2116 | * | | | 0x211a | * | | | 0x211c-0x2128 | * | | | 0x212a-0x2130 | diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 4d889eb2993e..0dbcb84011b0 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -37,6 +37,7 @@ #include "qla_bsg.h" #include "qla_nx.h" #include "qla_nx2.h" +#include "qla_nvme.h" #define QLA2XXX_DRIVER_NAME "qla2xxx" #define QLA2XXX_APIDEV "ql2xapidev" #define QLA2XXX_MANUFACTURER "QLogic Corporation" @@ -423,6 +424,7 @@ struct srb_iocb { int rsp_len; dma_addr_t cmd_dma; dma_addr_t rsp_dma; + enum nvmefc_fcp_datadir dir; uint32_t dl; uint32_t timeout_sec; } nvme; @@ -452,6 +454,7 @@ struct srb_iocb { #define SRB_NACK_PRLI 17 #define SRB_NACK_LOGO 18 #define SRB_NVME_CMD 19 +#define SRB_NVME_LS 20 #define SRB_PRLI_CMD 21 enum { @@ -467,6 +470,7 @@ typedef struct srb { uint8_t cmd_type; uint8_t pad[3]; atomic_t ref_count; + wait_queue_head_t nvme_ls_waitQ; struct fc_port *fcport; struct scsi_qla_host *vha; uint32_t handle; @@ -2298,6 +2302,7 @@ typedef struct fc_port { struct work_struct nvme_del_work; atomic_t nvme_ref_count; + wait_queue_head_t nvme_waitQ; uint32_t nvme_prli_service_param; #define NVME_PRLI_SP_CONF BIT_7 #define NVME_PRLI_SP_INITIATOR BIT_5 @@ -4124,6 +4129,7 @@ typedef struct scsi_qla_host { struct nvme_fc_local_port *nvme_local_port; atomic_t nvme_ref_count; + wait_queue_head_t nvme_waitQ; struct list_head nvme_rport_list; atomic_t nvme_active_aen_cnt; uint16_t nvme_last_rptd_aen; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 6fbee11c1a18..c6af45f7d5d6 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -9,6 +9,16 @@ #include +/* + * Global functions prototype in qla_nvme.c source file. + */ +extern void qla_nvme_register_hba(scsi_qla_host_t *); +extern int qla_nvme_register_remote(scsi_qla_host_t *, fc_port_t *); +extern void qla_nvme_delete(scsi_qla_host_t *); +extern void qla_nvme_abort(struct qla_hw_data *, srb_t *sp); +extern void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *, struct pt_ls4_request *, + struct req_que *); + /* * Global Function Prototypes in qla_init.c source file. */ @@ -141,6 +151,7 @@ extern int ql2xiniexchg; extern int ql2xfwholdabts; extern int ql2xmvasynctoatio; extern int ql2xuctrlirq; +extern int ql2xnvmeenable; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index ed898b7a32c8..d06c3c758322 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -4520,6 +4520,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->deleted = 0; fcport->logout_on_delete = 1; + if (fcport->fc4f_nvme) { + qla_nvme_register_remote(vha, fcport); + return; + } + qla2x00_set_fcport_state(fcport, FCS_ONLINE); qla2x00_iidma_fcport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); @@ -4669,6 +4674,9 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) break; } while (0); + if (!vha->nvme_local_port && vha->flags.nvme_enabled) + qla_nvme_register_hba(vha); + if (rval) ql_dbg(ql_dbg_disc, vha, 0x2068, "Configure fabric error exit rval=%d.\n", rval); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index daa53235a28a..d40fa000615c 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -3155,6 +3155,39 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp, nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; } +/* + * Build NVME LS request + */ +static int +qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) +{ + struct srb_iocb *nvme; + int rval = QLA_SUCCESS; + + nvme = &sp->u.iocb_cmd; + cmd_pkt->entry_type = PT_LS4_REQUEST; + cmd_pkt->entry_count = 1; + cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT; + + cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + + cmd_pkt->tx_dseg_count = 1; + cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len; + cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len; + cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma)); + cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma)); + + cmd_pkt->rx_dseg_count = 1; + cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len; + cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len; + cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma)); + cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma)); + + return rval; +} + int qla2x00_start_sp(srb_t *sp) { @@ -3211,6 +3244,9 @@ qla2x00_start_sp(srb_t *sp) case SRB_FXIOCB_BCMD: qlafx00_fxdisc_iocb(sp, pkt); break; + case SRB_NVME_LS: + qla_nvme_ls(sp, pkt); + break; case SRB_ABT_CMD: IS_QLAFX00(ha) ? qlafx00_abort_iocb(sp, pkt) : diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 477aea7c9a88..011faa1dc618 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2828,6 +2828,21 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, sp->done(sp, 0); } +void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *vha, struct pt_ls4_request *pkt, + struct req_que *req) +{ + srb_t *sp; + const char func[] = "LS4_IOCB"; + uint16_t comp_status; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + comp_status = le16_to_cpu(pkt->status); + sp->done(sp, comp_status); +} + /** * qla24xx_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context @@ -2901,6 +2916,10 @@ process_err: case CTIO_CRC2: qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); break; + case PT_LS4_REQUEST: + qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, + rsp->req); + break; case NOTIFY_ACK_TYPE: if (pkt->handle == QLA_TGT_SKIP_HANDLE) qlt_response_pkt_all_vps(vha, rsp, diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 1eac67e8fdfd..0764b6172ed1 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -560,6 +560,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, } #define EXTENDED_BB_CREDITS BIT_0 +#define NVME_ENABLE_FLAG BIT_3 + /* * qla2x00_execute_fw * Start adapter firmware. @@ -601,6 +603,9 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) } else mcp->mb[4] = 0; + if (ql2xnvmeenable && IS_QLA27XX(ha)) + mcp->mb[4] |= NVME_ENABLE_FLAG; + if (ha->flags.exlogins_enabled) mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; @@ -941,6 +946,22 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, "%s: Firmware supports Exchange Offload 0x%x\n", __func__, ha->fw_attributes_h); + + /* bit 26 of fw_attributes */ + if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) { + struct init_cb_24xx *icb; + + icb = (struct init_cb_24xx *)ha->init_cb; + /* + * fw supports nvme and driver load + * parameter requested nvme + */ + vha->flags.nvme_enabled = 1; + icb->firmware_options_2 &= cpu_to_le32(~0xf); + ha->zio_mode = 0; + ha->zio_timer = 0; + } + } if (IS_QLA27XX(ha)) { diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c new file mode 100644 index 000000000000..1da8fa8f641d --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -0,0 +1,756 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2017 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ +#include "qla_nvme.h" +#include "qla_def.h" +#include +#include +#include +#include + +static struct nvme_fc_port_template qla_nvme_fc_transport; + +static void qla_nvme_unregister_remote_port(struct work_struct *); + +int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct nvme_rport *rport; + int ret; + + if (fcport->nvme_flag & NVME_FLAG_REGISTERED) + return 0; + + if (!vha->flags.nvme_enabled) { + ql_log(ql_log_info, vha, 0x2100, + "%s: Not registering target since Host NVME is not enabled\n", + __func__); + return 0; + } + + if (!(fcport->nvme_prli_service_param & + (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY))) + return 0; + + INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port); + rport = kzalloc(sizeof(*rport), GFP_KERNEL); + if (!rport) { + ql_log(ql_log_warn, vha, 0x2101, + "%s: unable to alloc memory\n", __func__); + return -ENOMEM; + } + + rport->req.port_name = wwn_to_u64(fcport->port_name); + rport->req.node_name = wwn_to_u64(fcport->node_name); + rport->req.port_role = 0; + + if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR) + rport->req.port_role = FC_PORT_ROLE_NVME_INITIATOR; + + if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET) + rport->req.port_role |= FC_PORT_ROLE_NVME_TARGET; + + if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY) + rport->req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; + + rport->req.port_id = fcport->d_id.b24; + + ql_log(ql_log_info, vha, 0x2102, + "%s: traddr=pn-0x%016llx:nn-0x%016llx PortID:%06x\n", + __func__, rport->req.port_name, rport->req.node_name, + rport->req.port_id); + + ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req, + &fcport->nvme_remote_port); + if (ret) { + ql_log(ql_log_warn, vha, 0x212e, + "Failed to register remote port. Transport returned %d\n", + ret); + return ret; + } + + fcport->nvme_remote_port->private = fcport; + fcport->nvme_flag |= NVME_FLAG_REGISTERED; + atomic_set(&fcport->nvme_ref_count, 1); + init_waitqueue_head(&fcport->nvme_waitQ); + rport->fcport = fcport; + list_add_tail(&rport->list, &vha->nvme_rport_list); +#endif + return 0; +} + +/* Allocate a queue for NVMe traffic */ +static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, unsigned int qidx, + u16 qsize, void **handle) +{ + struct scsi_qla_host *vha; + struct qla_hw_data *ha; + struct qla_qpair *qpair; + + if (!qidx) + qidx++; + + vha = (struct scsi_qla_host *)lport->private; + ha = vha->hw; + + ql_log(ql_log_info, vha, 0x2104, + "%s: handle %p, idx =%d, qsize %d\n", + __func__, handle, qidx, qsize); + + if (qidx > qla_nvme_fc_transport.max_hw_queues) { + ql_log(ql_log_warn, vha, 0x212f, + "%s: Illegal qidx=%d. Max=%d\n", + __func__, qidx, qla_nvme_fc_transport.max_hw_queues); + return -EINVAL; + } + + if (ha->queue_pair_map[qidx]) { + *handle = ha->queue_pair_map[qidx]; + ql_log(ql_log_info, vha, 0x2121, + "Returning existing qpair of %p for idx=%x\n", + *handle, qidx); + return 0; + } + + ql_log(ql_log_warn, vha, 0xffff, + "allocating q for idx=%x w/o cpu mask\n", qidx); + qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true); + if (qpair == NULL) { + ql_log(ql_log_warn, vha, 0x2122, + "Failed to allocate qpair\n"); + return -EINVAL; + } + *handle = qpair; + + return 0; +} + +static void qla_nvme_sp_ls_done(void *ptr, int res) +{ + srb_t *sp = ptr; + struct srb_iocb *nvme; + struct nvmefc_ls_req *fd; + struct nvme_private *priv; + + if (atomic_read(&sp->ref_count) == 0) { + ql_log(ql_log_warn, sp->fcport->vha, 0x2123, + "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp); + return; + } + + if (!atomic_dec_and_test(&sp->ref_count)) + return; + + if (res) + res = -EINVAL; + + nvme = &sp->u.iocb_cmd; + fd = nvme->u.nvme.desc; + priv = fd->private; + priv->comp_status = res; + schedule_work(&priv->ls_work); + /* work schedule doesn't need the sp */ + qla2x00_rel_sp(sp); +} + +static void qla_nvme_sp_done(void *ptr, int res) +{ + srb_t *sp = ptr; + struct srb_iocb *nvme; + struct nvmefc_fcp_req *fd; + + nvme = &sp->u.iocb_cmd; + fd = nvme->u.nvme.desc; + + if (!atomic_dec_and_test(&sp->ref_count)) + return; + + if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED)) + goto rel; + + if (unlikely(nvme->u.nvme.comp_status || res)) + fd->status = -EINVAL; + else + fd->status = 0; + + fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len; + fd->done(fd); +rel: + qla2xxx_rel_qpair_sp(sp->qpair, sp); +} + +static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport, + struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) +{ + struct nvme_private *priv = fd->private; + fc_port_t *fcport = rport->private; + srb_t *sp = priv->sp; + int rval; + struct qla_hw_data *ha = fcport->vha->hw; + + rval = ha->isp_ops->abort_command(sp); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, fcport->vha, 0x2125, + "%s: failed to abort LS command for SP:%p rval=%x\n", + __func__, sp, rval); + + ql_dbg(ql_dbg_io, fcport->vha, 0x212b, + "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport); +} + +static void qla_nvme_ls_complete(struct work_struct *work) +{ + struct nvme_private *priv = + container_of(work, struct nvme_private, ls_work); + struct nvmefc_ls_req *fd = priv->fd; + + fd->done(fd, priv->comp_status); +} + +static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, + struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) +{ + fc_port_t *fcport = (fc_port_t *)rport->private; + struct srb_iocb *nvme; + struct nvme_private *priv = fd->private; + struct scsi_qla_host *vha; + int rval = QLA_FUNCTION_FAILED; + struct qla_hw_data *ha; + srb_t *sp; + + if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)) + return rval; + + vha = fcport->vha; + ha = vha->hw; + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); + if (!sp) + return rval; + + sp->type = SRB_NVME_LS; + sp->name = "nvme_ls"; + sp->done = qla_nvme_sp_ls_done; + atomic_set(&sp->ref_count, 1); + init_waitqueue_head(&sp->nvme_ls_waitQ); + nvme = &sp->u.iocb_cmd; + priv->sp = sp; + priv->fd = fd; + INIT_WORK(&priv->ls_work, qla_nvme_ls_complete); + nvme->u.nvme.desc = fd; + nvme->u.nvme.dir = 0; + nvme->u.nvme.dl = 0; + nvme->u.nvme.cmd_len = fd->rqstlen; + nvme->u.nvme.rsp_len = fd->rsplen; + nvme->u.nvme.rsp_dma = fd->rspdma; + nvme->u.nvme.timeout_sec = fd->timeout; + nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr, + fd->rqstlen, DMA_TO_DEVICE); + dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, + fd->rqstlen, DMA_TO_DEVICE); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x700e, + "qla2x00_start_sp failed = %d\n", rval); + atomic_dec(&sp->ref_count); + wake_up(&sp->nvme_ls_waitQ); + return rval; + } + + return rval; +} + +static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport, + struct nvme_fc_remote_port *rport, void *hw_queue_handle, + struct nvmefc_fcp_req *fd) +{ + struct nvme_private *priv = fd->private; + srb_t *sp = priv->sp; + int rval; + fc_port_t *fcport = rport->private; + struct qla_hw_data *ha = fcport->vha->hw; + + rval = ha->isp_ops->abort_command(sp); + if (!rval) + ql_log(ql_log_warn, fcport->vha, 0x2127, + "%s: failed to abort command for SP:%p rval=%x\n", + __func__, sp, rval); + + ql_dbg(ql_dbg_io, fcport->vha, 0x2126, + "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport); +} + +static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle) +{ + struct scsi_qla_host *vha = lport->private; + unsigned long flags; + struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle; + + /* Acquire ring specific lock */ + spin_lock_irqsave(&qpair->qp_lock, flags); + qla24xx_process_response_queue(vha, qpair->rsp); + spin_unlock_irqrestore(&qpair->qp_lock, flags); +} + +static int qla2x00_start_nvme_mq(srb_t *sp) +{ + unsigned long flags; + uint32_t *clr_ptr; + uint32_t index; + uint32_t handle; + struct cmd_nvme *cmd_pkt; + uint16_t cnt, i; + uint16_t req_cnt; + uint16_t tot_dsds; + uint16_t avail_dsds; + uint32_t *cur_dsd; + struct req_que *req = NULL; + struct scsi_qla_host *vha = sp->fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_qpair *qpair = sp->qpair; + struct srb_iocb *nvme = &sp->u.iocb_cmd; + struct scatterlist *sgl, *sg; + struct nvmefc_fcp_req *fd = nvme->u.nvme.desc; + uint32_t rval = QLA_SUCCESS; + + /* Setup qpair pointers */ + req = qpair->req; + tot_dsds = fd->sg_cnt; + + /* Acquire qpair specific lock */ + spin_lock_irqsave(&qpair->qp_lock, flags); + + /* Check for room in outstanding command list. */ + handle = req->current_outstanding_cmd; + for (index = 1; index < req->num_outstanding_cmds; index++) { + handle++; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; + } + + if (index == req->num_outstanding_cmds) { + rval = -1; + goto queuing_error; + } + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (req->cnt < (req_cnt + 2)) { + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - (req->ring_index - cnt); + + if (req->cnt < (req_cnt + 2)){ + rval = -1; + goto queuing_error; + } + } + + if (unlikely(!fd->sqid)) { + struct nvme_fc_cmd_iu *cmd = fd->cmdaddr; + if (cmd->sqe.common.opcode == nvme_admin_async_event) { + nvme->u.nvme.aen_op = 1; + atomic_inc(&vha->nvme_active_aen_cnt); + } + } + + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + req->cnt -= req_cnt; + + cmd_pkt = (struct cmd_nvme *)req->ring_ptr; + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); + + /* Zero out remaining portion of packet. */ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + + cmd_pkt->entry_status = 0; + + /* Update entry type to indicate Command NVME IOCB */ + cmd_pkt->entry_type = COMMAND_NVME; + + /* No data transfer how do we check buffer len == 0?? */ + if (fd->io_dir == NVMEFC_FCP_READ) { + cmd_pkt->control_flags = + cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE); + vha->qla_stats.input_bytes += fd->payload_length; + vha->qla_stats.input_requests++; + } else if (fd->io_dir == NVMEFC_FCP_WRITE) { + cmd_pkt->control_flags = + cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE); + vha->qla_stats.output_bytes += fd->payload_length; + vha->qla_stats.output_requests++; + } else if (fd->io_dir == 0) { + cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE); + } + + /* Set NPORT-ID */ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + + /* NVME RSP IU */ + cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen); + cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma)); + cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma)); + + /* NVME CNMD IU */ + cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen); + cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma)); + cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma)); + + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + cmd_pkt->byte_count = cpu_to_le32(fd->payload_length); + + /* One DSD is available in the Command Type NVME IOCB */ + avail_dsds = 1; + cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0]; + sgl = fd->first_sgl; + + /* Load data segments */ + for_each_sg(sgl, sg, tot_dsds, i) { + dma_addr_t sle_dma; + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Continuation + * Type 1 IOCB. + */ + + /* Adjust ring index */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + cont_pkt = (cont_a64_entry_t *)req->ring_ptr; + cont_pkt->entry_type = cpu_to_le32(CONTINUE_A64_TYPE); + + cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; + avail_dsds = 5; + } + + sle_dma = sg_dma_address(sg); + *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); + avail_dsds--; + } + + /* Set total entry count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + wmb(); + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + + /* Set chip new ring index. */ + WRT_REG_DWORD(req->req_q_in, req->ring_index); + +queuing_error: + spin_unlock_irqrestore(&qpair->qp_lock, flags); + return rval; +} + +/* Post a command */ +static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, + struct nvme_fc_remote_port *rport, void *hw_queue_handle, + struct nvmefc_fcp_req *fd) +{ + fc_port_t *fcport; + struct srb_iocb *nvme; + struct scsi_qla_host *vha; + int rval = QLA_FUNCTION_FAILED; + srb_t *sp; + struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle; + struct nvme_private *priv; + + if (!fd) { + ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP reqeust\n"); + return rval; + } + + priv = fd->private; + fcport = (fc_port_t *)rport->private; + if (!fcport) { + ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n"); + return rval; + } + + vha = fcport->vha; + if ((!qpair) || (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))) + return -EBUSY; + + /* Alloc SRB structure */ + sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC); + if (!sp) + return -EIO; + + atomic_set(&sp->ref_count, 1); + init_waitqueue_head(&sp->nvme_ls_waitQ); + priv->sp = sp; + sp->type = SRB_NVME_CMD; + sp->name = "nvme_cmd"; + sp->done = qla_nvme_sp_done; + sp->qpair = qpair; + nvme = &sp->u.iocb_cmd; + nvme->u.nvme.desc = fd; + + rval = qla2x00_start_nvme_mq(sp); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x212d, + "qla2x00_start_nvme_mq failed = %d\n", rval); + atomic_dec(&sp->ref_count); + wake_up(&sp->nvme_ls_waitQ); + return -EIO; + } + + return rval; +} + +static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) +{ + struct scsi_qla_host *vha = lport->private; + + atomic_dec(&vha->nvme_ref_count); + wake_up_all(&vha->nvme_waitQ); + + ql_log(ql_log_info, vha, 0x210f, + "localport delete of %p completed.\n", vha->nvme_local_port); + vha->nvme_local_port = NULL; +} + +static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) +{ + fc_port_t *fcport; + struct nvme_rport *r_port, *trport; + + fcport = (fc_port_t *)rport->private; + fcport->nvme_remote_port = NULL; + fcport->nvme_flag &= ~NVME_FLAG_REGISTERED; + atomic_dec(&fcport->nvme_ref_count); + wake_up_all(&fcport->nvme_waitQ); + + list_for_each_entry_safe(r_port, trport, + &fcport->vha->nvme_rport_list, list) { + if (r_port->fcport == fcport) { + list_del(&r_port->list); + break; + } + } + kfree(r_port); + + ql_log(ql_log_info, fcport->vha, 0x2110, + "remoteport_delete of %p completed.\n", fcport); +} + +static struct nvme_fc_port_template qla_nvme_fc_transport = { + .localport_delete = qla_nvme_localport_delete, + .remoteport_delete = qla_nvme_remoteport_delete, + .create_queue = qla_nvme_alloc_queue, + .delete_queue = NULL, + .ls_req = qla_nvme_ls_req, + .ls_abort = qla_nvme_ls_abort, + .fcp_io = qla_nvme_post_cmd, + .fcp_abort = qla_nvme_fcp_abort, + .poll_queue = qla_nvme_poll, + .max_hw_queues = 8, + .max_sgl_segments = 128, + .max_dif_sgl_segments = 64, + .dma_boundary = 0xFFFFFFFF, + .local_priv_sz = 8, + .remote_priv_sz = 0, + .lsrqst_priv_sz = sizeof(struct nvme_private), + .fcprqst_priv_sz = sizeof(struct nvme_private), +}; + +#define NVME_ABORT_POLLING_PERIOD 2 +static int qla_nvme_wait_on_command(srb_t *sp) +{ + int ret = QLA_SUCCESS; + + wait_event_timeout(sp->nvme_ls_waitQ, (atomic_read(&sp->ref_count) > 1), + NVME_ABORT_POLLING_PERIOD*HZ); + + if (atomic_read(&sp->ref_count) > 1) + ret = QLA_FUNCTION_FAILED; + + return ret; +} + +static int qla_nvme_wait_on_rport_del(fc_port_t *fcport) +{ + int ret = QLA_SUCCESS; + + wait_event_timeout(fcport->nvme_waitQ, + atomic_read(&fcport->nvme_ref_count), + NVME_ABORT_POLLING_PERIOD*HZ); + + if (atomic_read(&fcport->nvme_ref_count)) { + ret = QLA_FUNCTION_FAILED; + ql_log(ql_log_info, fcport->vha, 0x2111, + "timed out waiting for fcport=%p to delete\n", fcport); + } + + return ret; +} + +void qla_nvme_abort(struct qla_hw_data *ha, srb_t *sp) +{ + int rval; + + rval = ha->isp_ops->abort_command(sp); + if (!rval) { + if (!qla_nvme_wait_on_command(sp)) + ql_log(ql_log_warn, NULL, 0x2112, + "nvme_wait_on_comand timed out waiting on sp=%p\n", + sp); + } +} + +static void qla_nvme_abort_all(fc_port_t *fcport) +{ + int que, cnt; + unsigned long flags; + srb_t *sp; + struct qla_hw_data *ha = fcport->vha->hw; + struct req_que *req; + + spin_lock_irqsave(&ha->hardware_lock, flags); + for (que = 0; que < ha->max_req_queues; que++) { + req = ha->req_q_map[que]; + if (!req) + continue; + if (!req->outstanding_cmds) + continue; + for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { + sp = req->outstanding_cmds[cnt]; + if ((sp) && ((sp->type == SRB_NVME_CMD) || + (sp->type == SRB_NVME_LS)) && + (sp->fcport == fcport)) { + atomic_inc(&sp->ref_count); + spin_unlock_irqrestore(&ha->hardware_lock, + flags); + qla_nvme_abort(ha, sp); + spin_lock_irqsave(&ha->hardware_lock, flags); + req->outstanding_cmds[cnt] = NULL; + sp->done(sp, 1); + } + } + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static void qla_nvme_unregister_remote_port(struct work_struct *work) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct fc_port *fcport = container_of(work, struct fc_port, + nvme_del_work); + struct nvme_rport *rport, *trport; + + list_for_each_entry_safe(rport, trport, + &fcport->vha->nvme_rport_list, list) { + if (rport->fcport == fcport) { + ql_log(ql_log_info, fcport->vha, 0x2113, + "%s: fcport=%p\n", __func__, fcport); + nvme_fc_unregister_remoteport( + fcport->nvme_remote_port); + } + } +#endif +} + +void qla_nvme_delete(scsi_qla_host_t *vha) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct nvme_rport *rport, *trport; + fc_port_t *fcport; + int nv_ret; + + list_for_each_entry_safe(rport, trport, &vha->nvme_rport_list, list) { + fcport = rport->fcport; + + ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n", + __func__, fcport); + + nvme_fc_unregister_remoteport(fcport->nvme_remote_port); + qla_nvme_wait_on_rport_del(fcport); + qla_nvme_abort_all(fcport); + } + + if (vha->nvme_local_port) { + nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port); + if (nv_ret == 0) + ql_log(ql_log_info, vha, 0x2116, + "unregistered localport=%p\n", + vha->nvme_local_port); + else + ql_log(ql_log_info, vha, 0x2115, + "Unregister of localport failed\n"); + } +#endif +} + +void qla_nvme_register_hba(scsi_qla_host_t *vha) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct nvme_fc_port_template *tmpl; + struct qla_hw_data *ha; + struct nvme_fc_port_info pinfo; + int ret; + + ha = vha->hw; + tmpl = &qla_nvme_fc_transport; + + WARN_ON(vha->nvme_local_port); + WARN_ON(ha->max_req_queues < 3); + + qla_nvme_fc_transport.max_hw_queues = + min((uint8_t)(qla_nvme_fc_transport.max_hw_queues), + (uint8_t)(ha->max_req_queues - 2)); + + pinfo.node_name = wwn_to_u64(vha->node_name); + pinfo.port_name = wwn_to_u64(vha->port_name); + pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR; + pinfo.port_id = vha->d_id.b24; + + ql_log(ql_log_info, vha, 0xffff, + "register_localport: host-traddr=pn-0x%llx:nn-0x%llx on portID:%x\n", + pinfo.port_name, pinfo.node_name, pinfo.port_id); + qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary; + + ret = nvme_fc_register_localport(&pinfo, tmpl, + get_device(&ha->pdev->dev), &vha->nvme_local_port); + if (ret) { + ql_log(ql_log_warn, vha, 0xffff, + "register_localport failed: ret=%x\n", ret); + return; + } + atomic_set(&vha->nvme_ref_count, 1); + vha->nvme_local_port->private = vha; + init_waitqueue_head(&vha->nvme_waitQ); +#endif +} diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h new file mode 100644 index 000000000000..dfe56f207b28 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_nvme.h @@ -0,0 +1,132 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2017 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ +#ifndef __QLA_NVME_H +#define __QLA_NVME_H + +#include +#include +#include +#include + +#define NVME_ATIO_CMD_OFF 32 +#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF) +#define Q2T_NVME_NUM_TAGS 2048 +#define QLA_MAX_FC_SEGMENTS 64 + +struct srb; +struct nvme_private { + struct srb *sp; + struct nvmefc_ls_req *fd; + struct work_struct ls_work; + int comp_status; +}; + +struct nvme_rport { + struct nvme_fc_port_info req; + struct list_head list; + struct fc_port *fcport; +}; + +#define COMMAND_NVME 0x88 /* Command Type FC-NVMe IOCB */ +struct cmd_nvme { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + uint16_t nport_handle; /* N_PORT handle. */ + uint16_t timeout; /* Command timeout. */ + + uint16_t dseg_count; /* Data segment count. */ + uint16_t nvme_rsp_dsd_len; /* NVMe RSP DSD length */ + + uint64_t rsvd; + + uint16_t control_flags; /* Control Flags */ +#define CF_NVME_ENABLE BIT_9 +#define CF_DIF_SEG_DESCR_ENABLE BIT_3 +#define CF_DATA_SEG_DESCR_ENABLE BIT_2 +#define CF_READ_DATA BIT_1 +#define CF_WRITE_DATA BIT_0 + + uint16_t nvme_cmnd_dseg_len; /* Data segment length. */ + uint32_t nvme_cmnd_dseg_address[2]; /* Data segment address. */ + uint32_t nvme_rsp_dseg_address[2]; /* Data segment address. */ + + uint32_t byte_count; /* Total byte count. */ + + uint8_t port_id[3]; /* PortID of destination port. */ + uint8_t vp_index; + + uint32_t nvme_data_dseg_address[2]; /* Data segment address. */ + uint32_t nvme_data_dseg_len; /* Data segment length. */ +}; + +#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */ +struct pt_ls4_request { + uint8_t entry_type; + uint8_t entry_count; + uint8_t sys_define; + uint8_t entry_status; + uint32_t handle; + uint16_t status; + uint16_t nport_handle; + uint16_t tx_dseg_count; + uint8_t vp_index; + uint8_t rsvd; + uint16_t timeout; + uint16_t control_flags; +#define CF_LS4_SHIFT 13 +#define CF_LS4_ORIGINATOR 0 +#define CF_LS4_RESPONDER 1 +#define CF_LS4_RESPONDER_TERM 2 + + uint16_t rx_dseg_count; + uint16_t rsvd2; + uint32_t exchange_address; + uint32_t rsvd3; + uint32_t rx_byte_count; + uint32_t tx_byte_count; + uint32_t dseg0_address[2]; + uint32_t dseg0_len; + uint32_t dseg1_address[2]; + uint32_t dseg1_len; +}; + +#define PT_LS4_UNSOL 0x56 /* pass-up unsolicited rec FC-NVMe request */ +struct pt_ls4_rx_unsol { + uint8_t entry_type; + uint8_t entry_count; + uint16_t rsvd0; + uint16_t rsvd1; + uint8_t vp_index; + uint8_t rsvd2; + uint16_t rsvd3; + uint16_t nport_handle; + uint16_t frame_size; + uint16_t rsvd4; + uint32_t exchange_address; + uint8_t d_id[3]; + uint8_t r_ctl; + uint8_t s_id[3]; + uint8_t cs_ctl; + uint8_t f_ctl[3]; + uint8_t type; + uint16_t seq_cnt; + uint8_t df_ctl; + uint8_t seq_id; + uint16_t rx_id; + uint16_t ox_id; + uint32_t param; + uint32_t desc0; +#define PT_LS4_PAYLOAD_OFFSET 0x2c +#define PT_LS4_FIRST_PACKET_LEN 20 + uint32_t desc_len; + uint32_t payload[3]; +}; +#endif diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 5ad6328e5c33..df57655779ed 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -120,7 +120,11 @@ MODULE_PARM_DESC(ql2xmaxqdepth, "Maximum queue depth to set for each LUN. " "Default is 32."); +#if (IS_ENABLED(CONFIG_NVME_FC)) +int ql2xenabledif; +#else int ql2xenabledif = 2; +#endif module_param(ql2xenabledif, int, S_IRUGO); MODULE_PARM_DESC(ql2xenabledif, " Enable T10-CRC-DIF:\n" @@ -129,6 +133,16 @@ MODULE_PARM_DESC(ql2xenabledif, " 1 -- Enable DIF for all types\n" " 2 -- Enable DIF for all types, except Type 0.\n"); +#if (IS_ENABLED(CONFIG_NVME_FC)) +int ql2xnvmeenable = 1; +#else +int ql2xnvmeenable; +#endif +module_param(ql2xnvmeenable, int, 0644); +MODULE_PARM_DESC(ql2xnvmeenable, + "Enables NVME support. " + "0 - no NVMe. Default is Y"); + int ql2xenablehba_err_chk = 2; module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xenablehba_err_chk, @@ -267,6 +281,7 @@ static void qla2x00_clear_drv_active(struct qla_hw_data *); static void qla2x00_free_device(scsi_qla_host_t *); static void qla83xx_disable_laser(scsi_qla_host_t *vha); static int qla2xxx_map_queues(struct Scsi_Host *shost); +static void qla2x00_destroy_deferred_work(struct qla_hw_data *); struct scsi_host_template qla2xxx_driver_template = { .module = THIS_MODULE, @@ -695,7 +710,7 @@ qla2x00_sp_free_dma(void *ptr) } end: - if (sp->type != SRB_NVME_CMD) { + if ((sp->type != SRB_NVME_CMD) && (sp->type != SRB_NVME_LS)) { CMD_SP(cmd) = NULL; qla2x00_rel_sp(sp); } @@ -1700,15 +1715,23 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) if (sp) { req->outstanding_cmds[cnt] = NULL; if (sp->cmd_type == TYPE_SRB) { - /* - * Don't abort commands in adapter - * during EEH recovery as it's not - * accessible/responding. - */ - if (GET_CMD_SP(sp) && + if ((sp->type == SRB_NVME_CMD) || + (sp->type == SRB_NVME_LS)) { + sp_get(sp); + spin_unlock_irqrestore( + &ha->hardware_lock, flags); + qla_nvme_abort(ha, sp); + spin_lock_irqsave( + &ha->hardware_lock, flags); + } else if (GET_CMD_SP(sp) && !ha->flags.eeh_busy && (sp->type == SRB_SCSI_CMD)) { /* + * Don't abort commands in + * adapter during EEH + * recovery as it's not + * accessible/responding. + * * Get a reference to the sp * and drop the lock. The * reference ensures this @@ -3534,6 +3557,9 @@ qla2x00_remove_one(struct pci_dev *pdev) return; set_bit(UNLOADING, &base_vha->dpc_flags); + + qla_nvme_delete(base_vha); + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); -- cgit From d3bae931172eb94af7d21b05f6e9bf79cccf8fa0 Mon Sep 17 00:00:00 2001 From: Duane Grigsby Date: Wed, 21 Jun 2017 13:48:44 -0700 Subject: scsi: qla2xxx: Send FC4 type NVMe to the management server This patch adds switch command support for FC-4 type of FC-NVMe (0x28) for resgistering HBA port to the management server. RFT_ID command is used to register FC-4 type of 0x28 and RFF_ID is used to register FC-4 features bits for FC-NVMe port. Signed-off-by: Darren Trapp Signed-off-by: Duane Grigsby Signed-off-by: Anil Gurumurthy Signed-off-by: Giridhar Malavali Signed-off-by: Himanshu Madhani Reviewed-By: James Smart Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 1 + drivers/scsi/qla2xxx/qla_gbl.h | 6 +- drivers/scsi/qla2xxx/qla_gs.c | 118 +++++++++++++++++++++++++++++++++++++++- drivers/scsi/qla2xxx/qla_init.c | 11 +++- 4 files changed, 131 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 0dbcb84011b0..0730b10b4280 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2867,6 +2867,7 @@ struct ct_sns_rsp { } gpsc; #define GFF_FCP_SCSI_OFFSET 7 +#define GFF_NVME_OFFSET 23 /* type = 28h */ struct { uint8_t fc4_features[128]; } gff_id; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index c6af45f7d5d6..cadb6e3baacc 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -18,6 +18,7 @@ extern void qla_nvme_delete(scsi_qla_host_t *); extern void qla_nvme_abort(struct qla_hw_data *, srb_t *sp); extern void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *, struct pt_ls4_request *, struct req_que *); +extern void qla24xx_async_gffid_sp_done(void *, int); /* * Global Function Prototypes in qla_init.c source file. @@ -618,7 +619,7 @@ extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *); extern int qla2x00_gnn_id(scsi_qla_host_t *, sw_info_t *); extern void qla2x00_gff_id(scsi_qla_host_t *, sw_info_t *); extern int qla2x00_rft_id(scsi_qla_host_t *); -extern int qla2x00_rff_id(scsi_qla_host_t *); +extern int qla2x00_rff_id(scsi_qla_host_t *, u8); extern int qla2x00_rnn_id(scsi_qla_host_t *); extern int qla2x00_rsnn_nn(scsi_qla_host_t *); extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t); @@ -644,7 +645,8 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *); int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *); int qla2x00_mgmt_svr_login(scsi_qla_host_t *); - +void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea); +int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport); /* * Global Function Prototypes in qla_attr.c source file. */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 540fec524ccb..c91478529b51 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -550,6 +550,8 @@ qla2x00_rft_id(scsi_qla_host_t *vha) ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ + if (vha->flags.nvme_enabled) + ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */ /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); @@ -575,7 +577,7 @@ qla2x00_rft_id(scsi_qla_host_t *vha) * Returns 0 on success. */ int -qla2x00_rff_id(scsi_qla_host_t *vha) +qla2x00_rff_id(scsi_qla_host_t *vha, u8 type) { int rval; struct qla_hw_data *ha = vha->hw; @@ -613,7 +615,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha) qlt_rff_id(vha, ct_req); - ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ + ct_req->req.rff_id.fc4_type = type; /* SCSI - FCP */ /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, @@ -2754,6 +2756,10 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) list[i].fc4_type = FC4_TYPE_FCP_SCSI; else list[i].fc4_type = FC4_TYPE_OTHER; + + list[i].fc4f_nvme = + ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; + list[i].fc4f_nvme &= 0xf; } /* Last device exit. */ @@ -3305,3 +3311,111 @@ done_free_sp: done: return rval; } + +void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + + qla24xx_post_gnl_work(vha, fcport); +} + +void qla24xx_async_gffid_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + fc_port_t *fcport = sp->fcport; + struct ct_sns_rsp *ct_rsp; + struct event_arg ea; + + ql_dbg(ql_dbg_disc, vha, 0x2133, + "Async done-%s res %x ID %x. %8phC\n", + sp->name, res, fcport->d_id.b24, fcport->port_name); + + fcport->flags &= ~FCF_ASYNC_SENT; + ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; + /* + * FC-GS-7, 5.2.3.12 FC-4 Features - format + * The format of the FC-4 Features object, as defined by the FC-4, + * Shall be an array of 4-bit values, one for each type code value + */ + if (!res) { + if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) { + /* w1 b00:03 */ + fcport->fc4_type = + ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; + fcport->fc4_type &= 0xf; + } + + if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) { + /* w5 [00:03]/28h */ + fcport->fc4f_nvme = + ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; + fcport->fc4f_nvme &= 0xf; + } + } + + memset(&ea, 0, sizeof(ea)); + ea.sp = sp; + ea.fcport = sp->fcport; + ea.rc = res; + ea.event = FCME_GFFID_DONE; + + qla2x00_fcport_event_handler(vha, &ea); + sp->free(sp); +} + +/* Get FC4 Feature with Nport ID. */ +int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + + if (!vha->flags.online) + return rval; + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + return rval; + + fcport->flags |= FCF_ASYNC_SENT; + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gffid"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + /* CT_IU preamble */ + ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD, + GFF_ID_RSP_SIZE); + + ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain; + ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area; + ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa; + + sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->done = qla24xx_async_gffid_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0x2132, + "Async-%s hdl=%x %8phC.\n", sp->name, + sp->handle, fcport->port_name); + + return rval; +done_free_sp: + sp->free(sp); + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index d06c3c758322..8a5f5ef069ae 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1244,6 +1244,9 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) case FCME_GPNID_DONE: qla24xx_handle_gpnid_event(vha, ea); break; + case FCME_GFFID_DONE: + qla24xx_handle_gffid_event(vha, ea); + break; case FCME_DELETE_DONE: qla24xx_handle_delete_done_event(vha, ea); break; @@ -4633,7 +4636,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) &vha->dpc_flags)) break; } - if (qla2x00_rff_id(vha)) { + if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x209a, "Register FC-4 Features failed.\n"); @@ -4641,6 +4644,12 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) &vha->dpc_flags)) break; } + if (vha->flags.nvme_enabled) { + if (qla2x00_rff_id(vha, FC_TYPE_NVME)) { + ql_dbg(ql_dbg_disc, vha, 0x2049, + "Register NVME FC Type Features failed.\n"); + } + } if (qla2x00_rnn_id(vha)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x2104, -- cgit From 7d2364809f0a6d04137bc628d72a6f382ee155c1 Mon Sep 17 00:00:00 2001 From: Duane Grigsby Date: Wed, 21 Jun 2017 13:48:45 -0700 Subject: scsi: qla2xxx: Use FC-NVMe FC4 type for FDMI registration [mkp: fixed typo] Signed-off-by: Duane Grigsby Signed-off-by: Darren Trapp Signed-off-by: Anil Gurumurthy Signed-off-by: Giridhar Malavali Signed-off-by: Himanshu Madhani Reviewed-by: Johannes Thumshirn Reviewed-by: James Smart Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_gs.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index c91478529b51..b323a7c71eda 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -2166,6 +2166,13 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha) eiter->a.fc4_types[2], eiter->a.fc4_types[1]); + if (vha->flags.nvme_enabled) { + eiter->a.fc4_types[6] = 1; /* NVMe type 28h */ + ql_dbg(ql_dbg_disc, vha, 0x211f, + "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n", + eiter->a.fc4_types[6]); + } + /* Supported speed. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); @@ -2363,6 +2370,15 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha) "Port Active FC4 Type = %02x %02x.\n", eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]); + if (vha->flags.nvme_enabled) { + eiter->a.port_fc4_type[4] = 0; + eiter->a.port_fc4_type[5] = 0; + eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */ + ql_dbg(ql_dbg_disc, vha, 0x2120, + "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n", + eiter->a.port_fc4_type[6]); + } + /* Port State */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_STATE); -- cgit From d06c587dd723882cd99c89d335b89c535079cdb1 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Wed, 21 Jun 2017 13:48:46 -0700 Subject: scsi: qla2xxx: Update Driver version to 10.00.00.00-k Signed-off-by: Himanshu Madhani Reviewed-by: Johannes Thumshirn Reviewed-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_version.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index dcbb9bb05e99..005a378f7fab 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "9.01.00.00-k" +#define QLA2XXX_VERSION "10.00.00.00-k" -#define QLA_DRIVER_MAJOR_VER 9 -#define QLA_DRIVER_MINOR_VER 1 +#define QLA_DRIVER_MAJOR_VER 10 +#define QLA_DRIVER_MINOR_VER 0 #define QLA_DRIVER_PATCH_VER 0 #define QLA_DRIVER_BETA_VER 0 -- cgit From 6d311fa7d2c18659d040b9beba5e41fe24c2a6f5 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Wed, 14 Jun 2017 13:52:43 +0200 Subject: scsi: sas: scsi_queue_work can fail, so make callers aware libsas uses scsi_queue_work() to queue its internal event notifications. scsi_queue_work() can return -EINVAL if the work queue doesn't exist and it does call queue_work() which can return false if the work is already queued. Make the SAS event code capable of returning errors up to the caller, which is handy when changing to dynamically allocated work in libsas as well, as discussed here: https://lkml.org/lkml/2017/6/14/121. [mkp: fixed typo] Signed-off-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/libsas/sas_event.c | 36 ++++++++++++++++++++++-------------- drivers/scsi/libsas/sas_internal.h | 4 ++-- include/scsi/libsas.h | 6 +++--- 3 files changed, 27 insertions(+), 19 deletions(-) diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c index aadbd5314c5c..c0d0d979b76d 100644 --- a/drivers/scsi/libsas/sas_event.c +++ b/drivers/scsi/libsas/sas_event.c @@ -27,30 +27,38 @@ #include "sas_internal.h" #include "sas_dump.h" -void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw) +int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw) { + int rc = 0; + if (!test_bit(SAS_HA_REGISTERED, &ha->state)) - return; + return 0; if (test_bit(SAS_HA_DRAINING, &ha->state)) { /* add it to the defer list, if not already pending */ if (list_empty(&sw->drain_node)) list_add(&sw->drain_node, &ha->defer_q); } else - scsi_queue_work(ha->core.shost, &sw->work); + rc = scsi_queue_work(ha->core.shost, &sw->work); + + return rc; } -static void sas_queue_event(int event, unsigned long *pending, +static int sas_queue_event(int event, unsigned long *pending, struct sas_work *work, struct sas_ha_struct *ha) { + int rc = 0; + if (!test_and_set_bit(event, pending)) { unsigned long flags; spin_lock_irqsave(&ha->lock, flags); - sas_queue_work(ha, work); + rc = sas_queue_work(ha, work); spin_unlock_irqrestore(&ha->lock, flags); } + + return rc; } @@ -116,32 +124,32 @@ void sas_enable_revalidation(struct sas_ha_struct *ha) mutex_unlock(&ha->disco_mutex); } -static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) +static int notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) { BUG_ON(event >= HA_NUM_EVENTS); - sas_queue_event(event, &sas_ha->pending, - &sas_ha->ha_events[event].work, sas_ha); + return sas_queue_event(event, &sas_ha->pending, + &sas_ha->ha_events[event].work, sas_ha); } -static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) +static int notify_port_event(struct asd_sas_phy *phy, enum port_event event) { struct sas_ha_struct *ha = phy->ha; BUG_ON(event >= PORT_NUM_EVENTS); - sas_queue_event(event, &phy->port_events_pending, - &phy->port_events[event].work, ha); + return sas_queue_event(event, &phy->port_events_pending, + &phy->port_events[event].work, ha); } -void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) +int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) { struct sas_ha_struct *ha = phy->ha; BUG_ON(event >= PHY_NUM_EVENTS); - sas_queue_event(event, &phy->phy_events_pending, - &phy->phy_events[event].work, ha); + return sas_queue_event(event, &phy->phy_events_pending, + &phy->phy_events[event].work, ha); } int sas_init_events(struct sas_ha_struct *sas_ha) diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index b306b7843d99..a216c957b639 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -76,7 +76,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work); void sas_porte_link_reset_err(struct work_struct *work); void sas_porte_timer_event(struct work_struct *work); void sas_porte_hard_reset(struct work_struct *work); -void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw); +int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw); int sas_notify_lldd_dev_found(struct domain_device *); void sas_notify_lldd_dev_gone(struct domain_device *); @@ -85,7 +85,7 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id, enum phy_func phy_func, struct sas_phy_linkrates *); int sas_smp_get_phy_events(struct sas_phy *phy); -void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event); +int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event); void sas_device_set_phy(struct domain_device *dev, struct sas_port *port); struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index dd0f72c95abe..cfaeed256ab2 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -415,9 +415,9 @@ struct sas_ha_struct { * their siblings when forming wide ports */ /* LLDD calls these to notify the class of an event. */ - void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event); - void (*notify_port_event)(struct asd_sas_phy *, enum port_event); - void (*notify_phy_event)(struct asd_sas_phy *, enum phy_event); + int (*notify_ha_event)(struct sas_ha_struct *, enum ha_event); + int (*notify_port_event)(struct asd_sas_phy *, enum port_event); + int (*notify_phy_event)(struct asd_sas_phy *, enum phy_event); void *lldd_ha; /* not touched by sas class code */ -- cgit From 5c279bd9e40624f4ab6e688671026d6005b066fa Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 16 Jun 2017 10:27:55 +0200 Subject: scsi: default to scsi-mq Remove the SCSI_MQ_DEFAULT config option and default to the blk-mq I/O path now that we had plenty of testing, and have I/O schedulers for blk-mq. The module option to disable the blk-mq path is kept around for now. Signed-off-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- drivers/scsi/Kconfig | 11 ----------- drivers/scsi/scsi.c | 4 ---- 2 files changed, 15 deletions(-) diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 3c52867dfe28..d384f4f86c26 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -47,17 +47,6 @@ config SCSI_NETLINK default n depends on NET -config SCSI_MQ_DEFAULT - bool "SCSI: use blk-mq I/O path by default" - depends on SCSI - ---help--- - This option enables the new blk-mq based I/O path for SCSI - devices by default. With the option the scsi_mod.use_blk_mq - module/boot option defaults to Y, without it to N, but it can - still be overridden either way. - - If unsure say N. - config SCSI_PROC_FS bool "legacy /proc/scsi/ support" depends on SCSI && PROC_FS diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 1bf274e3b2b6..3d38c6d463b8 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -800,11 +800,7 @@ MODULE_LICENSE("GPL"); module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); -#ifdef CONFIG_SCSI_MQ_DEFAULT bool scsi_use_blk_mq = true; -#else -bool scsi_use_blk_mq = false; -#endif module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); static int __init init_scsi(void) -- cgit From 56ffd3a0500e3b0c133da451036074fd0a8c3f1c Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Mon, 26 Jun 2017 08:59:32 -0700 Subject: scsi: bnx2fc: Add filters to the non-offload FCoE receive path. Add the following filters to bnx2fc_recv_frame(): 1. Filter out invalid packets - eth->dest_mac[3] matches FC frame's D_ID 2. Filter out packets that are not from our connected target - In FIP_ST_ENABLED mode - eth->src_mac matches fcoe_ctlr->dest_addr 3. Filter out packets where if d_id of the packet doesn't belong to the device when one is already assigned a port_id, only then this packet is dropped Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 902722dc4ce3..342a316d9341 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -522,10 +522,12 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) struct fcoe_crc_eof crc_eof; struct fc_frame *fp; struct fc_lport *vn_port; - struct fcoe_port *port; + struct fcoe_port *port, *phys_port; u8 *mac = NULL; u8 *dest_mac = NULL; struct fcoe_hdr *hp; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; fr = fcoe_dev_from_skb(skb); lport = fr->fr_dev; @@ -561,8 +563,19 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) return; } + phys_port = lport_priv(lport); + interface = phys_port->priv; + ctlr = bnx2fc_to_ctlr(interface); + fh = fc_frame_header_get(fp); + if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) { + BNX2FC_HBA_DBG(lport, "FC frame d_id mismatch with MAC %pM.\n", + dest_mac); + kfree_skb(skb); + return; + } + vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); if (vn_port) { port = lport_priv(vn_port); @@ -572,6 +585,14 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) return; } } + if (ctlr->state) { + if (!ether_addr_equal(mac, ctlr->dest_addr)) { + BNX2FC_HBA_DBG(lport, "Wrong source address: mac:%pM dest_addr:%pM.\n", + mac, ctlr->dest_addr); + kfree_skb(skb); + return; + } + } if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) { /* Drop FCP data. We dont this in L2 path */ @@ -597,6 +618,18 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) return; } + /* + * If the destination ID from the frame header does not match what we + * have on record for lport and the search for a NPIV port came up + * empty then this is not addressed to our port so simply drop it. + */ + if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { + BNX2FC_HBA_DBG(lport, "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n", + lport->port_id, ntoh24(fh->fh_d_id)); + kfree_skb(skb); + return; + } + stats = per_cpu_ptr(lport->stats, smp_processor_id()); stats->RxFrames++; stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; -- cgit From de08f46b90d3b353f53d273604e6544186564e59 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Mon, 26 Jun 2017 08:59:33 -0700 Subject: scsi: bnx2fc: Check for connection offload before sending RRQ. If the connection is not offloaded then the backpointers from the tgt pointer are undefined. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/bnx2fc/bnx2fc_els.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index 68ca518d34b0..fee12c3d67cd 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -61,13 +61,20 @@ int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req) struct fc_els_rrq rrq; struct bnx2fc_rport *tgt = aborted_io_req->tgt; - struct fc_lport *lport = tgt->rdata->local_port; + struct fc_lport *lport = NULL; struct bnx2fc_els_cb_arg *cb_arg = NULL; - u32 sid = tgt->sid; - u32 r_a_tov = lport->r_a_tov; + u32 sid = 0; + u32 r_a_tov = 0; unsigned long start = jiffies; int rc; + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) + return -EINVAL; + + lport = tgt->rdata->local_port; + sid = tgt->sid; + r_a_tov = lport->r_a_tov; + BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n", aborted_io_req->xid); memset(&rrq, 0, sizeof(rrq)); -- cgit From 21144b80619fc953bcf3615252960f2cdf846883 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Mon, 26 Jun 2017 08:59:34 -0700 Subject: scsi: bnx2fc: Update copyright for 2017. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h | 3 ++- drivers/scsi/bnx2fc/bnx2fc.h | 3 ++- drivers/scsi/bnx2fc/bnx2fc_constants.h | 3 ++- drivers/scsi/bnx2fc/bnx2fc_debug.c | 3 ++- drivers/scsi/bnx2fc/bnx2fc_debug.h | 3 ++- drivers/scsi/bnx2fc/bnx2fc_els.c | 3 ++- drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 3 ++- drivers/scsi/bnx2fc/bnx2fc_hwi.c | 3 ++- drivers/scsi/bnx2fc/bnx2fc_io.c | 3 ++- drivers/scsi/bnx2fc/bnx2fc_tgt.c | 3 ++- 10 files changed, 20 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h index ac1c0b631aca..e4469df9c469 100644 --- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h +++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h @@ -3,7 +3,8 @@ * session resources such as connection id and qp resources. * * Copyright (c) 2008-2013 Broadcom Corporation - * Copyright (c) 2014-2015 QLogic Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 1f424e40afdf..c19284040952 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -1,7 +1,8 @@ /* bnx2fc.h: QLogic Linux FCoE offload driver. * * Copyright (c) 2008-2013 Broadcom Corporation - * Copyright (c) 2014-2015 QLogic Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h index 5b20efb661a5..9ed150307a39 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_constants.h +++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h @@ -3,7 +3,8 @@ * session resources such as connection id and qp resources. * * Copyright (c) 2008-2013 Broadcom Corporation - * Copyright (c) 2014-2015 QLogic Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.c b/drivers/scsi/bnx2fc/bnx2fc_debug.c index c9e0bc7fad3b..47ba3ba1e03b 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_debug.c +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.c @@ -3,7 +3,8 @@ * session resources such as connection id and qp resources. * * Copyright (c) 2008-2013 Broadcom Corporation - * Copyright (c) 2014-2015 QLogic Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h index 34fda3e04d27..76717acee3ab 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_debug.h +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h @@ -3,7 +3,8 @@ * session resources such as connection id and qp resources. * * Copyright (c) 2008-2013 Broadcom Corporation - * Copyright (c) 2014-2015 QLogic Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index fee12c3d67cd..76e65a32f38c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -4,7 +4,8 @@ * and responses. * * Copyright (c) 2008-2013 Broadcom Corporation - * Copyright (c) 2014-2015 QLogic Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 342a316d9341..0d313a15e677 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -4,7 +4,8 @@ * FIP/FCoE packets, listen to link events etc. * * Copyright (c) 2008-2013 Broadcom Corporation - * Copyright (c) 2014-2015 QLogic Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 5ff9f89c17c7..913c750205ce 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -3,7 +3,8 @@ * with 57712 FCoE firmware. * * Copyright (c) 2008-2013 Broadcom Corporation - * Copyright (c) 2014-2015 QLogic Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 898461b146cc..6e8b9bea89fc 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -2,7 +2,8 @@ * IO manager and SCSI IO processing. * * Copyright (c) 2008-2013 Broadcom Corporation - * Copyright (c) 2014-2015 QLogic Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 739bfb62aff6..59a2dfbcbc69 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -3,7 +3,8 @@ * session resources such as connection id and qp resources. * * Copyright (c) 2008-2013 Broadcom Corporation - * Copyright (c) 2014-2015 QLogic Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by -- cgit From 59fb870f0ed4da64cbbc44f6a4dc1038be9f8904 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Mon, 26 Jun 2017 08:59:35 -0700 Subject: scsi: bnx2fc: Adjust bnx2fc_npiv_create_vports for WWNNs from NVRAM that are zero. Some vports addresses stored in NVRAM may have zero for the WWNN. Adjust the WWNN that we'll use to be the same as the WWPN. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 0d313a15e677..71dd36756ca4 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -2139,6 +2139,9 @@ static uint bnx2fc_npiv_create_vports(struct fc_lport *lport, { struct fc_vport_identifiers vpid; uint i, created = 0; + u64 wwnn = 0; + char wwpn_str[32]; + char wwnn_str[32]; if (npiv_tbl->count > MAX_NPIV_ENTRIES) { BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n"); @@ -2157,11 +2160,23 @@ static uint bnx2fc_npiv_create_vports(struct fc_lport *lport, vpid.disable = false; for (i = 0; i < npiv_tbl->count; i++) { - vpid.node_name = wwn_to_u64(npiv_tbl->wwnn[i]); + wwnn = wwn_to_u64(npiv_tbl->wwnn[i]); + if (wwnn == 0) { + /* + * If we get a 0 element from for the WWNN then assume + * the WWNN should be the same as the physical port. + */ + wwnn = lport->wwnn; + } + vpid.node_name = wwnn; vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]); scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name), "NPIV[%u]:%016llx-%016llx", created, vpid.port_name, vpid.node_name); + fcoe_wwn_to_str(vpid.node_name, wwnn_str, sizeof(wwnn_str)); + fcoe_wwn_to_str(vpid.port_name, wwpn_str, sizeof(wwpn_str)); + BNX2FC_HBA_DBG(lport, "Creating vport %s:%s.\n", wwnn_str, + wwpn_str); if (fc_vport_create(lport->host, 0, &vpid)) created++; else -- cgit From 5c63daf67d4753f80883672d429ff7b6595ec2f4 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Mon, 26 Jun 2017 08:59:36 -0700 Subject: scsi: bnx2fc: If IO is still in cleanup then do not return to SCSI layer. In eh_abort, driver is calling scsi->done() for a IO for which cleanup is pending. As the IO is outstanding with the firmware, it may do DMA associated with the IO. This may lead to heap corruption. Do not complete the IO for which cleanup is still pending. Return failure from eh_abort and let the SCSI-ml retry the IO. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/bnx2fc/bnx2fc_io.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 6e8b9bea89fc..5b6153f23f01 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1167,16 +1167,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " "not on active_q\n", io_req->xid); /* - * This condition can happen only due to the FW bug, - * where we do not receive cleanup response from - * the FW. Handle this case gracefully by erroring - * back the IO request to SCSI-ml + * The IO is still with the FW. + * Return failure and let SCSI-ml retry eh_abort. */ - bnx2fc_scsi_done(io_req, DID_ABORT); - - kref_put(&io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); - return SUCCESS; + return FAILED; } /* -- cgit From 5eddc3358f6e2e2e7c56a24a75566c90ca207303 Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Mon, 26 Jun 2017 08:59:37 -0700 Subject: scsi: bnx2fc: Make rport_terminate_io callback a NOOP. Do not call the stock libfc terminate rport i/o handler so we won't reset the libfc exchange manager and kill any outstanding discovery requests. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 71dd36756ca4..b1c105f4130d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -2573,6 +2573,11 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev) bnx2fc_hba_destroy(hba); } +static void bnx2fc_rport_terminate_io(struct fc_rport *rport) +{ + /* This is a no-op */ +} + /** * bnx2fc_fcoe_reset - Resets the fcoe * @@ -2909,7 +2914,7 @@ static struct fc_function_template bnx2fc_transport_function = { .issue_fc_host_lip = bnx2fc_fcoe_reset, - .terminate_rport_io = fc_rport_terminate_io, + .terminate_rport_io = bnx2fc_rport_terminate_io, .vport_create = bnx2fc_vport_create, .vport_delete = bnx2fc_vport_destroy, -- cgit From 3655a286ca5c244612f586cbb034cab433a20a1a Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Mon, 26 Jun 2017 08:59:38 -0700 Subject: scsi: bnx2fc: Update version number to 2.11.8. Signed-off-by: Chad Dupuis Signed-off-by: Martin K. Petersen --- drivers/scsi/bnx2fc/bnx2fc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index c19284040952..7e007e142aab 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -66,7 +66,7 @@ #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "2.10.3" +#define BNX2FC_VERSION "2.11.8" #define PFX "bnx2fc: " -- cgit From 1cdf8bc18f1ee43a39e543506fff8d5db3020ae1 Mon Sep 17 00:00:00 2001 From: Satish Kharat Date: Mon, 26 Jun 2017 17:46:23 -0700 Subject: scsi: fnic: Zero io_cmpl_skip on fw reset completion io_cmpl_skip keep track of number of completions to skip when stats are reset. If a fw_reset happens immediately after stats reset it could put it out of sync so need to reset io_cmpl_skip when fw reset is completed. Signed-off-by: Satish Kharat Signed-off-by: Sesidhar Baddela Signed-off-by: Martin K. Petersen --- drivers/scsi/fnic/fnic_scsi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index d048f3b5006f..beea14c40a95 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -633,6 +633,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); + atomic64_set(&fnic->io_cmpl_skip, 0); spin_lock_irqsave(&fnic->fnic_lock, flags); -- cgit From 43caa03fec79d062c5f7a959a823770d72717b24 Mon Sep 17 00:00:00 2001 From: Satish Kharat Date: Mon, 26 Jun 2017 17:48:08 -0700 Subject: scsi: fnic: added timestamp reporting in fnic debug stats Added the timestamps for 1. current timestamp 2. last fnic stats read timestamp 3. last fnic stats reset timestamp and the deltas since last stats read and last reset in fnic stats. fnic stats uses debugfs Signed-off-by: Sesidhar Baddela Signed-off-by: Satish Kharat Signed-off-by: Martin K. Petersen --- drivers/scsi/fnic/fnic_debugfs.c | 1 + drivers/scsi/fnic/fnic_stats.h | 7 +++++++ drivers/scsi/fnic/fnic_trace.c | 24 ++++++++++++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index d6498fabe628..5e3d909cfc53 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -632,6 +632,7 @@ static ssize_t fnic_reset_stats_write(struct file *file, sizeof(struct io_path_stats) - sizeof(u64)); memset(fw_stats_p+1, 0, sizeof(struct fw_stats) - sizeof(u64)); + getnstimeofday(&stats->stats_timestamps.last_reset_time); } (*ppos)++; diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h index 88c73cccb015..e007feedbf72 100644 --- a/drivers/scsi/fnic/fnic_stats.h +++ b/drivers/scsi/fnic/fnic_stats.h @@ -16,6 +16,12 @@ */ #ifndef _FNIC_STATS_H_ #define _FNIC_STATS_H_ + +struct stats_timestamps { + struct timespec last_reset_time; + struct timespec last_read_time; +}; + struct io_path_stats { atomic64_t active_ios; atomic64_t max_active_ios; @@ -110,6 +116,7 @@ struct misc_stats { }; struct fnic_stats { + struct stats_timestamps stats_timestamps; struct io_path_stats io_stats; struct abort_stats abts_stats; struct terminate_stats term_stats; diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index b5ac5381a0d7..4826f596cb31 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -219,7 +219,31 @@ int fnic_get_stats_data(struct stats_debug_info *debug, int buf_size = debug->buf_size; struct timespec val1, val2; + getnstimeofday(&val1); len = snprintf(debug->debug_buffer + len, buf_size - len, + "------------------------------------------\n" + "\t\tTime\n" + "------------------------------------------\n"); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Current time : [%ld:%ld]\n" + "Last stats reset time: [%ld:%ld]\n" + "Last stats read time: [%ld:%ld]\n" + "delta since last reset: [%ld:%ld]\n" + "delta since last read: [%ld:%ld]\n", + val1.tv_sec, val1.tv_nsec, + stats->stats_timestamps.last_reset_time.tv_sec, + stats->stats_timestamps.last_reset_time.tv_nsec, + stats->stats_timestamps.last_read_time.tv_sec, + stats->stats_timestamps.last_read_time.tv_nsec, + timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec, + timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec, + timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_sec, + timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec); + + stats->stats_timestamps.last_read_time = val1; + + len += snprintf(debug->debug_buffer + len, buf_size - len, "------------------------------------------\n" "\t\tIO Statistics\n" "------------------------------------------\n"); -- cgit From c22fa50b2d41f3091d2ab0acf60ffdedb7ccd765 Mon Sep 17 00:00:00 2001 From: Satish Kharat Date: Mon, 26 Jun 2017 17:48:44 -0700 Subject: scsi: fnic: correct speed display and add support for 25,40 and 100G Setting speed based on the vinc device parameter read during linkup. Also adding support to display 25,40 and 100G Signed-off-by: Satish Kharat Signed-off-by: Sesidhar Baddela Signed-off-by: Martin K. Petersen --- drivers/scsi/fnic/fnic_fcs.c | 24 ++++++++++++++++++++++++ drivers/scsi/fnic/fnic_io.h | 9 +++++++++ drivers/scsi/fnic/fnic_main.c | 14 ++++++++++++-- 3 files changed, 45 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 245dcd95e11f..d212eb574c94 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -65,6 +65,30 @@ void fnic_handle_link(struct work_struct *work) fnic->link_status = vnic_dev_link_status(fnic->vdev); fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); + switch (vnic_dev_port_speed(fnic->vdev)) { + case DCEM_PORTSPEED_10G: + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT; + fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT; + break; + case DCEM_PORTSPEED_25G: + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT; + fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT; + break; + case DCEM_PORTSPEED_40G: + case DCEM_PORTSPEED_4x10G: + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT; + fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT; + break; + case DCEM_PORTSPEED_100G: + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT; + fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT; + break; + default: + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN; + fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; + break; + } + if (old_link_status == fnic->link_status) { if (!fnic->link_status) { /* DOWN -> DOWN */ diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h index c35b8f1889ea..e0bc659ed71f 100644 --- a/drivers/scsi/fnic/fnic_io.h +++ b/drivers/scsi/fnic/fnic_io.h @@ -66,4 +66,13 @@ struct fnic_io_req { struct completion *dr_done; /* completion for device reset */ }; +enum fnic_port_speeds { + DCEM_PORTSPEED_NONE = 0, + DCEM_PORTSPEED_1G = 1000, + DCEM_PORTSPEED_10G = 10000, + DCEM_PORTSPEED_40G = 40000, + DCEM_PORTSPEED_4x10G = 41000, + DCEM_PORTSPEED_25G = 25000, + DCEM_PORTSPEED_100G = 100000, +}; #endif /* _FNIC_IO_H_ */ diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index ba58b7953263..aacadbf20b69 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -176,11 +176,21 @@ static void fnic_get_host_speed(struct Scsi_Host *shost) /* Add in other values as they get defined in fw */ switch (port_speed) { - case 10000: + case DCEM_PORTSPEED_10G: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; + case DCEM_PORTSPEED_25G: + fc_host_speed(shost) = FC_PORTSPEED_25GBIT; + break; + case DCEM_PORTSPEED_40G: + case DCEM_PORTSPEED_4x10G: + fc_host_speed(shost) = FC_PORTSPEED_40GBIT; + break; + case DCEM_PORTSPEED_100G: + fc_host_speed(shost) = FC_PORTSPEED_100GBIT; + break; default: - fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } -- cgit From 4a1108d6caf7cd0f15d439bee6163ba00c7c1b33 Mon Sep 17 00:00:00 2001 From: Satish Kharat Date: Mon, 26 Jun 2017 17:49:06 -0700 Subject: scsi: fnic: changing queue command to return result DID_IMM_RETRY when rport is init Currently the queue command returns DID_NO_CONNECT anytime the rport is not in RPORT_ST_READY state. Changing it to return DID_NO_CONNECT only when the rport is in RPORT_ST_DELETE state. When the rport is in one of the init states retruning DID_IMM_RETRY. Signed-off-by: Sesidhar Baddela Signed-off-by: Satish Kharat Signed-off-by: Martin K. Petersen --- drivers/scsi/fnic/fnic_scsi.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index beea14c40a95..6c0646d62dfb 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -466,15 +466,27 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ } rp = rport->dd_data; - if (!rp || rp->rp_state != RPORT_ST_READY) { + if (!rp || rp->rp_state == RPORT_ST_DELETE) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "returning DID_NO_CONNECT for IO as rport is removed\n"); + "rport 0x%x removed, returning DID_NO_CONNECT\n", + rport->port_id); + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); sc->result = DID_NO_CONNECT<<16; done(sc); return 0; } + if (rp->rp_state != RPORT_ST_READY) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n", + rport->port_id, rp->rp_state); + + sc->result = DID_IMM_RETRY << 16; + done(sc); + return 0; + } + if (lp->state != LPORT_ST_READY || !(lp->link_up)) return SCSI_MLQUEUE_HOST_BUSY; -- cgit From f62f9ffdb5ef683ef8cffb43932fa72cc3713e94 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 21 Jun 2017 11:35:09 +0200 Subject: scsi: sun_esp: fix device reference leaks Make sure to drop the reference to the dma device taken by of_find_device_by_node() on probe errors and on driver unbind. Fixes: 334ae614772b ("sparc: Kill SBUS DVMA layer.") Signed-off-by: Johan Hovold Signed-off-by: Martin K. Petersen --- drivers/scsi/sun_esp.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c index 7b6d4c2087d7..747ee64a78e1 100644 --- a/drivers/scsi/sun_esp.c +++ b/drivers/scsi/sun_esp.c @@ -566,6 +566,7 @@ static int esp_sbus_probe(struct platform_device *op) struct device_node *dp = op->dev.of_node; struct platform_device *dma_of = NULL; int hme = 0; + int ret; if (dp->parent && (!strcmp(dp->parent->name, "espdma") || @@ -580,7 +581,11 @@ static int esp_sbus_probe(struct platform_device *op) if (!dma_of) return -ENODEV; - return esp_sbus_probe_one(op, dma_of, hme); + ret = esp_sbus_probe_one(op, dma_of, hme); + if (ret) + put_device(&dma_of->dev); + + return ret; } static int esp_sbus_remove(struct platform_device *op) @@ -613,6 +618,8 @@ static int esp_sbus_remove(struct platform_device *op) dev_set_drvdata(&op->dev, NULL); + put_device(&dma_of->dev); + return 0; } -- cgit From 578079fab3f89eda73d7f3bb3bdfdf650ab1b8ec Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Fri, 23 Jun 2017 09:10:11 +0200 Subject: scsi: qla2xxx: Protect access to qpair members with qpair->qp_lock In qla2xx_start_scsi_mq() and qla2xx_dif_start_scsi_mq() we grab the qpair->qp_lock but do access members of the qpair before having the lock. Re-order the locking sequence to have all read and write access to qpair members under the qpair->qp_lock. Signed-off-by: Johannes Thumshirn Acked-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_iocb.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index d40fa000615c..a36c485fae50 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1770,6 +1770,9 @@ qla2xxx_start_scsi_mq(srb_t *sp) struct qla_hw_data *ha = vha->hw; struct qla_qpair *qpair = sp->qpair; + /* Acquire qpair specific lock */ + spin_lock_irqsave(&qpair->qp_lock, flags); + /* Setup qpair pointers */ rsp = qpair->rsp; req = qpair->req; @@ -1779,15 +1782,14 @@ qla2xxx_start_scsi_mq(srb_t *sp) /* Send marker if required */ if (vha->marker_needed != 0) { - if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != - QLA_SUCCESS) + if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) { + spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; + } vha->marker_needed = 0; } - /* Acquire qpair specific lock */ - spin_lock_irqsave(&qpair->qp_lock, flags); - /* Check for room in outstanding command list. */ handle = req->current_outstanding_cmd; for (index = 1; index < req->num_outstanding_cmds; index++) { @@ -1942,6 +1944,8 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) return qla2xxx_start_scsi_mq(sp); } + spin_lock_irqsave(&qpair->qp_lock, flags); + /* Setup qpair pointers */ rsp = qpair->rsp; req = qpair->req; @@ -1951,15 +1955,14 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) /* Send marker if required */ if (vha->marker_needed != 0) { - if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != - QLA_SUCCESS) + if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) { + spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; + } vha->marker_needed = 0; } - /* Acquire ring specific lock */ - spin_lock_irqsave(&qpair->qp_lock, flags); - /* Check for room in outstanding command list. */ handle = req->current_outstanding_cmd; for (index = 1; index < req->num_outstanding_cmds; index++) { -- cgit From 8dc0da7981a3f9680c873e92e3ec17308377e3b9 Mon Sep 17 00:00:00 2001 From: Zang Leigang Date: Sat, 24 Jun 2017 19:14:32 +0800 Subject: scsi: ufs: flush eh_work when eh_work scheduled. Forget a condition: eh_work scheduled but do not start to work. Signed-off-by: Zang Leigang Reviewed-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 88ccd63f83c1..5bc9dc14e075 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5809,7 +5809,8 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) do { spin_lock_irqsave(hba->host->host_lock, flags); if (!(work_pending(&hba->eh_work) || - hba->ufshcd_state == UFSHCD_STATE_RESET)) + hba->ufshcd_state == UFSHCD_STATE_RESET || + hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED)) break; spin_unlock_irqrestore(hba->host->host_lock, flags); dev_dbg(hba->dev, "%s: reset in progress\n", __func__); -- cgit From 62e62ffd95539b9220894a7900a619e0f3ef4756 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Tue, 27 Jun 2017 11:53:27 +0200 Subject: scsi: ses: do not add a device to an enclosure if enclosure_add_links() fails. The enclosure_add_device() function should fail if it can't create the relevant sysfs links. Cc: Signed-off-by: Maurizio Lombardi Tested-by: Douglas Miller Acked-by: James Bottomley Signed-off-by: Martin K. Petersen --- drivers/misc/enclosure.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index d3fe3ea902d4..eb29113e0bac 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -375,6 +375,7 @@ int enclosure_add_device(struct enclosure_device *edev, int component, struct device *dev) { struct enclosure_component *cdev; + int err; if (!edev || component >= edev->components) return -EINVAL; @@ -384,12 +385,17 @@ int enclosure_add_device(struct enclosure_device *edev, int component, if (cdev->dev == dev) return -EEXIST; - if (cdev->dev) + if (cdev->dev) { enclosure_remove_links(cdev); - - put_device(cdev->dev); + put_device(cdev->dev); + } cdev->dev = get_device(dev); - return enclosure_add_links(cdev); + err = enclosure_add_links(cdev); + if (err) { + put_device(cdev->dev); + cdev->dev = NULL; + } + return err; } EXPORT_SYMBOL_GPL(enclosure_add_device); -- cgit From f9279c968c257ee39b0d7bd2571a4d231a67bcc1 Mon Sep 17 00:00:00 2001 From: "Ewan D. Milne" Date: Tue, 27 Jun 2017 14:55:58 -0400 Subject: scsi: Add STARGET_CREATED_REMOVE state to scsi_target_state The addition of the STARGET_REMOVE state had the side effect of introducing a race condition that can cause a crash. scsi_target_reap_ref_release() checks the starget->state to see if it still in STARGET_CREATED, and if so, skips calling transport_remove_device() and device_del(), because the starget->state is only set to STARGET_RUNNING after scsi_target_add() has called device_add() and transport_add_device(). However, if an rport loss occurs while a target is being scanned, it can happen that scsi_remove_target() will be called while the starget is still in the STARGET_CREATED state. In this case, the starget->state will be set to STARGET_REMOVE, and as a result, scsi_target_reap_ref_release() will take the wrong path. The end result is a panic: [ 1255.356653] Oops: 0000 [#1] SMP [ 1255.360154] Modules linked in: x86_pkg_temp_thermal kvm_intel kvm irqbypass crc32c_intel ghash_clmulni_i [ 1255.393234] CPU: 5 PID: 149 Comm: kworker/u96:4 Tainted: G W 4.11.0+ #8 [ 1255.401879] Hardware name: Dell Inc. PowerEdge R320/08VT7V, BIOS 2.0.22 11/19/2013 [ 1255.410327] Workqueue: scsi_wq_6 fc_scsi_scan_rport [scsi_transport_fc] [ 1255.417720] task: ffff88060ca8c8c0 task.stack: ffffc900048a8000 [ 1255.424331] RIP: 0010:kernfs_find_ns+0x13/0xc0 [ 1255.429287] RSP: 0018:ffffc900048abbf0 EFLAGS: 00010246 [ 1255.435123] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 [ 1255.443083] RDX: 0000000000000000 RSI: ffffffff8188d659 RDI: 0000000000000000 [ 1255.451043] RBP: ffffc900048abc10 R08: 0000000000000000 R09: 0000012433fe0025 [ 1255.459005] R10: 0000000025e5a4b5 R11: 0000000025e5a4b5 R12: ffffffff8188d659 [ 1255.466972] R13: 0000000000000000 R14: ffff8805f55e5088 R15: 0000000000000000 [ 1255.474931] FS: 0000000000000000(0000) GS:ffff880616b40000(0000) knlGS:0000000000000000 [ 1255.483959] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1255.490370] CR2: 0000000000000068 CR3: 0000000001c09000 CR4: 00000000000406e0 [ 1255.498332] Call Trace: [ 1255.501058] kernfs_find_and_get_ns+0x31/0x60 [ 1255.505916] sysfs_unmerge_group+0x1d/0x60 [ 1255.510498] dpm_sysfs_remove+0x22/0x60 [ 1255.514783] device_del+0xf4/0x2e0 [ 1255.518577] ? device_remove_file+0x19/0x20 [ 1255.523241] attribute_container_class_device_del+0x1a/0x20 [ 1255.529457] transport_remove_classdev+0x4e/0x60 [ 1255.534607] ? transport_add_class_device+0x40/0x40 [ 1255.540046] attribute_container_device_trigger+0xb0/0xc0 [ 1255.546069] transport_remove_device+0x15/0x20 [ 1255.551025] scsi_target_reap_ref_release+0x25/0x40 [ 1255.556467] scsi_target_reap+0x2e/0x40 [ 1255.560744] __scsi_scan_target+0xaa/0x5b0 [ 1255.565312] scsi_scan_target+0xec/0x100 [ 1255.569689] fc_scsi_scan_rport+0xb1/0xc0 [scsi_transport_fc] [ 1255.576099] process_one_work+0x14b/0x390 [ 1255.580569] worker_thread+0x4b/0x390 [ 1255.584651] kthread+0x109/0x140 [ 1255.588251] ? rescuer_thread+0x330/0x330 [ 1255.592730] ? kthread_park+0x60/0x60 [ 1255.596815] ret_from_fork+0x29/0x40 [ 1255.600801] Code: 24 08 48 83 42 40 01 5b 41 5c 5d c3 66 66 66 2e 0f 1f 84 00 00 00 00 00 66 66 66 66 90 [ 1255.621876] RIP: kernfs_find_ns+0x13/0xc0 RSP: ffffc900048abbf0 [ 1255.628479] CR2: 0000000000000068 [ 1255.632756] ---[ end trace 34a69ba0477d036f ]--- Fix this by adding another scsi_target state STARGET_CREATED_REMOVE to distinguish this case. Fixes: f05795d3d771 ("scsi: Add intermediate STARGET_REMOVE state to scsi_target_state") Reported-by: David Jeffery Signed-off-by: Ewan D. Milne Cc: Reviewed-by: Laurence Oberman Tested-by: Laurence Oberman Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_scan.c | 5 +++-- drivers/scsi/scsi_sysfs.c | 8 ++++++-- include/scsi/scsi_device.h | 1 + 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 3c4403210a1a..3832ba57151b 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -385,11 +385,12 @@ static void scsi_target_reap_ref_release(struct kref *kref) = container_of(kref, struct scsi_target, reap_ref); /* - * if we get here and the target is still in the CREATED state that + * if we get here and the target is still in a CREATED state that * means it was allocated but never made visible (because a scan * turned up no LUNs), so don't call device_del() on it. */ - if (starget->state != STARGET_CREATED) { + if ((starget->state != STARGET_CREATED) && + (starget->state != STARGET_CREATED_REMOVE)) { transport_remove_device(&starget->dev); device_del(&starget->dev); } diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index ce470f62a8ae..d6984df71f1c 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1394,11 +1394,15 @@ restart: spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(starget, &shost->__targets, siblings) { if (starget->state == STARGET_DEL || - starget->state == STARGET_REMOVE) + starget->state == STARGET_REMOVE || + starget->state == STARGET_CREATED_REMOVE) continue; if (starget->dev.parent == dev || &starget->dev == dev) { kref_get(&starget->reap_ref); - starget->state = STARGET_REMOVE; + if (starget->state == STARGET_CREATED) + starget->state = STARGET_CREATED_REMOVE; + else + starget->state = STARGET_REMOVE; spin_unlock_irqrestore(shost->host_lock, flags); __scsi_remove_target(starget); scsi_target_reap(starget); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index d3fb98f72a03..b41ee9d8a042 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -248,6 +248,7 @@ enum scsi_target_state { STARGET_CREATED = 1, STARGET_RUNNING, STARGET_REMOVE, + STARGET_CREATED_REMOVE, STARGET_DEL, }; -- cgit From acfeb23b29894deaee65d63c55bea09183f6b538 Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 28 Jun 2017 12:14:21 -0500 Subject: scsi: cxlflash: Avoid double free of character device The device_unregister() service used when cleaning up the character device is already responsible for the internal state associated with the device upon successful creation. As the cxlflash driver does not obtain a second reference to the character device, the explicit call to put_device() is not required and can lead to an inconsistent sysfs among other issues as the reference is no longer valid after the first put_device() is performed. Remove the unnecessary put_device() to remedy this issue. Fixes: a834a36b57d9 ("scsi: cxlflash: Create character device to provide host management interface") Signed-off-by: Matthew R. Ochs Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/main.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 7a787b6e21c4..455564fb6c4d 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -923,7 +923,6 @@ static void cxlflash_put_minor(int minor) */ static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg) { - put_device(cfg->chardev); device_unregister(cfg->chardev); cfg->chardev = NULL; cdev_del(&cfg->cdev); -- cgit From 32abbedaafde5a0c1edfd07369dde73a4fda2554 Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 28 Jun 2017 12:14:30 -0500 Subject: scsi: cxlflash: Update send_tmf() parameters The current send_tmf() implementation is based on the caller providing a SCSI command reference. In reality all that is needed is a SCSI device reference as the routine uses a private command. Refactor send_tmf() to pass the private adapter configuration reference and a SCSI device reference. As a nice side effect, this will ease the burden of converting caller routines to be based solely off of a SCSI device reference. Signed-off-by: Matthew R. Ochs Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/main.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 455564fb6c4d..7054e11f732a 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -459,21 +459,20 @@ static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp, /** * send_tmf() - sends a Task Management Function (TMF) - * @afu: AFU to checkout from. - * @scp: SCSI command from stack describing target. + * @cfg: Internal structure associated with the host. + * @sdev: SCSI device destined for TMF. * @tmfcmd: TMF command to send. * * Return: * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure */ -static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) +static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev, + u64 tmfcmd) { - struct Scsi_Host *host = scp->device->host; - struct cxlflash_cfg *cfg = shost_priv(host); + struct afu *afu = cfg->afu; struct afu_cmd *cmd = NULL; struct device *dev = &cfg->dev->dev; - int hwq_index = cmd_to_target_hwq(host, scp, afu); - struct hwq *hwq = get_hwq(afu, hwq_index); + struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); char *buf = NULL; ulong lock_flags; int rc = 0; @@ -500,12 +499,12 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) cmd->parent = afu; cmd->cmd_tmf = true; - cmd->hwq_index = hwq_index; + cmd->hwq_index = hwq->index; cmd->rcb.ctx_id = hwq->ctx_hndl; cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; - cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); - cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); + cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel); + cmd->rcb.lun_id = lun_to_lunid(sdev->lun); cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_SUP_UNDERRUN | SISL_REQ_FLAGS_TMF_CMD); @@ -2430,15 +2429,15 @@ out: static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) { int rc = SUCCESS; - struct Scsi_Host *host = scp->device->host; + struct scsi_device *sdev = scp->device; + struct Scsi_Host *host = sdev->host; struct cxlflash_cfg *cfg = shost_priv(host); struct device *dev = &cfg->dev->dev; - struct afu *afu = cfg->afu; int rcr = 0; dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, - scp->device->channel, scp->device->id, scp->device->lun, + sdev->channel, sdev->id, sdev->lun, get_unaligned_be32(&((u32 *)scp->cmnd)[0]), get_unaligned_be32(&((u32 *)scp->cmnd)[1]), get_unaligned_be32(&((u32 *)scp->cmnd)[2]), @@ -2447,7 +2446,7 @@ static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) retry: switch (cfg->state) { case STATE_NORMAL: - rcr = send_tmf(afu, scp, TMF_LUN_RESET); + rcr = send_tmf(cfg, sdev, TMF_LUN_RESET); if (unlikely(rcr)) rc = FAILED; break; -- cgit From 5a4d9d7790422c4a92d8ca52e37c1e2b45d42c27 Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 28 Jun 2017 12:14:37 -0500 Subject: scsi: cxlflash: Update debug prints in reset handlers The device and host reset handler contain debug prints to help identify the entities being reset. Today these reset handlers are based on a SCSI EH design that uses a SCSI command reference as a means of identifying the target entity. As such, the debug trace includes the SCSI command pointer and associated CDB. This is not necessary as the SCSI command is simply the messenger in these scenarios. Refactor the debug prints in the host and reset handlers to only present information that is applicable given the function scope. Signed-off-by: Matthew R. Ochs Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/main.c | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 7054e11f732a..077f62e208aa 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -2435,14 +2435,8 @@ static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) struct device *dev = &cfg->dev->dev; int rcr = 0; - dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " - "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, - sdev->channel, sdev->id, sdev->lun, - get_unaligned_be32(&((u32 *)scp->cmnd)[0]), - get_unaligned_be32(&((u32 *)scp->cmnd)[1]), - get_unaligned_be32(&((u32 *)scp->cmnd)[2]), - get_unaligned_be32(&((u32 *)scp->cmnd)[3])); - + dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__, + host->host_no, sdev->channel, sdev->id, sdev->lun); retry: switch (cfg->state) { case STATE_NORMAL: @@ -2483,13 +2477,7 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) struct cxlflash_cfg *cfg = shost_priv(host); struct device *dev = &cfg->dev->dev; - dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " - "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, - scp->device->channel, scp->device->id, scp->device->lun, - get_unaligned_be32(&((u32 *)scp->cmnd)[0]), - get_unaligned_be32(&((u32 *)scp->cmnd)[1]), - get_unaligned_be32(&((u32 *)scp->cmnd)[2]), - get_unaligned_be32(&((u32 *)scp->cmnd)[3])); + dev_dbg(dev, "%s: %d\n", __func__, host->host_no); switch (cfg->state) { case STATE_NORMAL: -- cgit From e6b53a752ba76e1268b04052c9ec06555e3f6995 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Thu, 29 Jun 2017 13:13:55 +0530 Subject: scsi: ibmvscsi: constify dev_pm_ops structures. dev_pm_ops are not supposed to change at runtime. All functions working with dev_pm_ops provided by work with const dev_pm_ops. So mark the non-const structs as const. File size before: text data bss dec hex filename 17956 1456 8 19420 4bdc drivers/scsi/ibmvscsi/ibmvscsi.o File size After adding 'const': text data bss dec hex filename 18164 1264 8 19436 4bec drivers/scsi/ibmvscsi/ibmvscsi.o Signed-off-by: Arvind Yadav Acked-by: Tyrel Datwyler Signed-off-by: Martin K. Petersen --- drivers/scsi/ibmvscsi/ibmvscsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 1deb0a9f14a6..da22b3665cb0 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -2336,7 +2336,7 @@ static struct vio_device_id ibmvscsi_device_table[] = { }; MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); -static struct dev_pm_ops ibmvscsi_pm_ops = { +static const struct dev_pm_ops ibmvscsi_pm_ops = { .resume = ibmvscsi_resume }; -- cgit From e4a80c31f2e863295cb2351f23ae486b0513a705 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Thu, 29 Jun 2017 13:24:41 +0530 Subject: scsi: ibmvfc: constify dev_pm_ops structures. dev_pm_ops are not supposed to change at runtime. All functions working with dev_pm_ops provided by work with const dev_pm_ops. So mark the non-const structs as const. File size before: text data bss dec hex filename 41937 1296 20 43253 a8f5 drivers/scsi/ibmvscsi/ibmvfc.o File size After adding 'const': text data bss dec hex filename 42129 1104 20 43253 a8f5 drivers/scsi/ibmvscsi/ibmvfc.o Signed-off-by: Arvind Yadav Acked-by: Tyrel Datwyler Signed-off-by: Martin K. Petersen --- drivers/scsi/ibmvscsi/ibmvfc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 26cd3c28186a..cc4e05be8d4a 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4935,7 +4935,7 @@ static struct vio_device_id ibmvfc_device_table[] = { }; MODULE_DEVICE_TABLE(vio, ibmvfc_device_table); -static struct dev_pm_ops ibmvfc_pm_ops = { +static const struct dev_pm_ops ibmvfc_pm_ops = { .resume = ibmvfc_resume }; -- cgit From f557e32c0023ea0d67cdaa81b3398550dc1e4876 Mon Sep 17 00:00:00 2001 From: Xiaofei Tan Date: Thu, 29 Jun 2017 21:02:14 +0800 Subject: scsi: hisi_sas: optimise DMA slot memory Currently we allocate 3 sets of DMA memories from separate pools for each slot. This is inefficient in terms of memory usage (buffers are less than 1 page in size, so we lose due to alignment), and also time spent in doing 3 allocations + de-allocations per slot, instead of 1. To optimise, combine the 3 DMA buffers into a single buffer from a single pool. Signed-off-by: Xiaofei Tan Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas.h | 47 ++++++++++++++------- drivers/scsi/hisi_sas/hisi_sas_main.c | 75 +++++++++------------------------- drivers/scsi/hisi_sas/hisi_sas_v1_hw.c | 31 +++++++------- drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 37 ++++++++--------- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 36 ++++++++-------- 5 files changed, 104 insertions(+), 122 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 22dd48bc2fa0..a722f2bd72ab 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -34,10 +34,24 @@ #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES #define HISI_SAS_RESET_BIT 0 -#define HISI_SAS_STATUS_BUF_SZ \ - (sizeof(struct hisi_sas_err_record) + 1024) -#define HISI_SAS_COMMAND_TABLE_SZ \ - (((sizeof(union hisi_sas_command_table)+3)/4)*4) +#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer)) +#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table)) + +#define hisi_sas_status_buf_addr(buf) \ + (buf + offsetof(struct hisi_sas_slot_buf_table, status_buffer)) +#define hisi_sas_status_buf_addr_mem(slot) hisi_sas_status_buf_addr(slot->buf) +#define hisi_sas_status_buf_addr_dma(slot) \ + hisi_sas_status_buf_addr(slot->buf_dma) + +#define hisi_sas_cmd_hdr_addr(buf) \ + (buf + offsetof(struct hisi_sas_slot_buf_table, command_header)) +#define hisi_sas_cmd_hdr_addr_mem(slot) hisi_sas_cmd_hdr_addr(slot->buf) +#define hisi_sas_cmd_hdr_addr_dma(slot) hisi_sas_cmd_hdr_addr(slot->buf_dma) + +#define hisi_sas_sge_addr(buf) \ + (buf + offsetof(struct hisi_sas_slot_buf_table, sge_page)) +#define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr(slot->buf) +#define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr(slot->buf_dma) #define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024) #define HISI_SAS_MAX_SMP_RESP_SZ 1028 @@ -139,14 +153,10 @@ struct hisi_sas_slot { int cmplt_queue_slot; int idx; int abort; + void *buf; + dma_addr_t buf_dma; void *cmd_hdr; dma_addr_t cmd_hdr_dma; - void *status_buffer; - dma_addr_t status_buffer_dma; - void *command_table; - dma_addr_t command_table_dma; - struct hisi_sas_sge_page *sge_page; - dma_addr_t sge_page_dma; struct work_struct abort_slot; struct timer_list internal_abort_timer; }; @@ -232,10 +242,8 @@ struct hisi_hba { int queue_count; - struct dma_pool *sge_page_pool; + struct dma_pool *buffer_pool; struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES]; - struct dma_pool *command_table_pool; - struct dma_pool *status_buffer_pool; struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES]; dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES]; void *complete_hdr[HISI_SAS_MAX_QUEUES]; @@ -347,7 +355,7 @@ struct hisi_sas_command_table_stp { #define HISI_SAS_SGE_PAGE_CNT SG_CHUNK_SIZE struct hisi_sas_sge_page { struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT]; -}; +} __aligned(16); struct hisi_sas_command_table_ssp { struct ssp_frame_hdr hdr; @@ -366,6 +374,17 @@ union hisi_sas_command_table { struct hisi_sas_command_table_ssp ssp; struct hisi_sas_command_table_smp smp; struct hisi_sas_command_table_stp stp; +} __aligned(16); + +struct hisi_sas_status_buffer { + struct hisi_sas_err_record err; + u8 iu[1024]; +} __aligned(16); + +struct hisi_sas_slot_buf_table { + struct hisi_sas_status_buffer status_buffer; + union hisi_sas_command_table command_header; + struct hisi_sas_sge_page sge_page; }; extern struct scsi_transport_template *hisi_sas_stt; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index e2f8d928e579..4022c3f8295f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -87,8 +87,10 @@ void hisi_sas_sata_done(struct sas_task *task, { struct task_status_struct *ts = &task->task_status; struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; - struct dev_to_host_fis *d2h = slot->status_buffer + - sizeof(struct hisi_sas_err_record); + struct hisi_sas_status_buffer *status_buf = + hisi_sas_status_buf_addr_mem(slot); + u8 *iu = &status_buf->iu[0]; + struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; resp->frame_len = sizeof(struct dev_to_host_fis); memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); @@ -183,17 +185,9 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, atomic64_dec(&sas_dev->running_req); } - if (slot->command_table) - dma_pool_free(hisi_hba->command_table_pool, - slot->command_table, slot->command_table_dma); + if (slot->buf) + dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma); - if (slot->status_buffer) - dma_pool_free(hisi_hba->status_buffer_pool, - slot->status_buffer, slot->status_buffer_dma); - - if (slot->sge_page) - dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page, - slot->sge_page_dma); list_del_init(&slot->entry); slot->task = NULL; @@ -362,24 +356,15 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq task->lldd_task = slot; INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort); - slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool, - GFP_ATOMIC, - &slot->status_buffer_dma); - if (!slot->status_buffer) { + slot->buf = dma_pool_alloc(hisi_hba->buffer_pool, + GFP_ATOMIC, &slot->buf_dma); + if (!slot->buf) { rc = -ENOMEM; goto err_out_slot_buf; } - memset(slot->status_buffer, 0, HISI_SAS_STATUS_BUF_SZ); - - slot->command_table = dma_pool_alloc(hisi_hba->command_table_pool, - GFP_ATOMIC, - &slot->command_table_dma); - if (!slot->command_table) { - rc = -ENOMEM; - goto err_out_status_buf; - } - memset(slot->command_table, 0, HISI_SAS_COMMAND_TABLE_SZ); memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); + memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); + memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); switch (task->task_proto) { case SAS_PROTOCOL_SMP: @@ -402,9 +387,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq if (rc) { dev_err(dev, "task prep: rc = 0x%x\n", rc); - if (slot->sge_page) - goto err_out_sge; - goto err_out_command_table; + goto err_out_buf; } list_add_tail(&slot->entry, &sas_dev->list); @@ -419,15 +402,9 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq return 0; -err_out_sge: - dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page, - slot->sge_page_dma); -err_out_command_table: - dma_pool_free(hisi_hba->command_table_pool, slot->command_table, - slot->command_table_dma); -err_out_status_buf: - dma_pool_free(hisi_hba->status_buffer_pool, slot->status_buffer, - slot->status_buffer_dma); +err_out_buf: + dma_pool_free(hisi_hba->buffer_pool, slot->buf, + slot->buf_dma); err_out_slot_buf: /* Nothing to be done */ err_out_tag: @@ -1608,16 +1585,9 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) goto err_out; } - s = HISI_SAS_STATUS_BUF_SZ; - hisi_hba->status_buffer_pool = dma_pool_create("status_buffer", - dev, s, 16, 0); - if (!hisi_hba->status_buffer_pool) - goto err_out; - - s = HISI_SAS_COMMAND_TABLE_SZ; - hisi_hba->command_table_pool = dma_pool_create("command_table", - dev, s, 16, 0); - if (!hisi_hba->command_table_pool) + s = sizeof(struct hisi_sas_slot_buf_table); + hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0); + if (!hisi_hba->buffer_pool) goto err_out; s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); @@ -1652,11 +1622,6 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) if (!hisi_hba->slot_index_tags) goto err_out; - hisi_hba->sge_page_pool = dma_pool_create("status_sge", dev, - sizeof(struct hisi_sas_sge_page), 16, 0); - if (!hisi_hba->sge_page_pool) - goto err_out; - s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; hisi_hba->initial_fis = dma_alloc_coherent(dev, s, &hisi_hba->initial_fis_dma, GFP_KERNEL); @@ -1703,9 +1668,7 @@ void hisi_sas_free(struct hisi_hba *hisi_hba) hisi_hba->complete_hdr_dma[i]); } - dma_pool_destroy(hisi_hba->status_buffer_pool); - dma_pool_destroy(hisi_hba->command_table_pool); - dma_pool_destroy(hisi_hba->sge_page_pool); + dma_pool_destroy(hisi_hba->buffer_pool); s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); if (hisi_hba->itct) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index afa87d4c367a..08eca20b0b81 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -939,6 +939,7 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, struct scatterlist *scatter, int n_elem) { + struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); struct device *dev = hisi_hba->dev; struct scatterlist *sg; int i; @@ -949,13 +950,8 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, return -EINVAL; } - slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC, - &slot->sge_page_dma); - if (!slot->sge_page) - return -ENOMEM; - for_each_sg(scatter, sg, n_elem, i) { - struct hisi_sas_sge *entry = &slot->sge_page->sge[i]; + struct hisi_sas_sge *entry = &sge_page->sge[i]; entry->addr = cpu_to_le64(sg_dma_address(sg)); entry->page_ctrl_0 = entry->page_ctrl_1 = 0; @@ -963,7 +959,7 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, entry->data_off = 0; } - hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma); + hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); @@ -1026,7 +1022,7 @@ static int prep_smp_v1_hw(struct hisi_hba *hisi_hba, hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); - hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); return 0; @@ -1107,10 +1103,11 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba, } hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); - hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); - hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); - buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr); + buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + + sizeof(struct ssp_frame_hdr); if (task->ssp_task.enable_first_burst) { fburst = (1 << 7); dw2 |= 1 << CMD_HDR_FIRST_BURST_OFF; @@ -1147,7 +1144,8 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct task_status_struct *ts = &task->task_status; - struct hisi_sas_err_record_v1 *err_record = slot->status_buffer; + struct hisi_sas_err_record_v1 *err_record = + hisi_sas_status_buf_addr_mem(slot); struct device *dev = hisi_hba->dev; switch (task->task_proto) { @@ -1364,8 +1362,11 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, switch (task->task_proto) { case SAS_PROTOCOL_SSP: { - struct ssp_response_iu *iu = slot->status_buffer + - sizeof(struct hisi_sas_err_record); + struct hisi_sas_status_buffer *status_buffer = + hisi_sas_status_buf_addr_mem(slot); + struct ssp_response_iu *iu = (struct ssp_response_iu *) + &status_buffer->iu[0]; + sas_ssp_task_response(dev, task, iu); break; } @@ -1382,7 +1383,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, dma_unmap_sg(dev, &task->smp_task.smp_req, 1, DMA_TO_DEVICE); memcpy(to + sg_resp->offset, - slot->status_buffer + + hisi_sas_status_buf_addr_mem(slot) + sizeof(struct hisi_sas_err_record), sg_dma_len(sg_resp)); kunmap_atomic(to); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 341a0bf6c667..551d103c27f1 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -1492,6 +1492,7 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, struct scatterlist *scatter, int n_elem) { + struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); struct device *dev = hisi_hba->dev; struct scatterlist *sg; int i; @@ -1502,13 +1503,8 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, return -EINVAL; } - slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC, - &slot->sge_page_dma); - if (!slot->sge_page) - return -ENOMEM; - for_each_sg(scatter, sg, n_elem, i) { - struct hisi_sas_sge *entry = &slot->sge_page->sge[i]; + struct hisi_sas_sge *entry = &sge_page->sge[i]; entry->addr = cpu_to_le64(sg_dma_address(sg)); entry->page_ctrl_0 = entry->page_ctrl_1 = 0; @@ -1516,7 +1512,7 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, entry->data_off = 0; } - hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma); + hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); @@ -1580,7 +1576,7 @@ static int prep_smp_v2_hw(struct hisi_hba *hisi_hba, hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); - hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); return 0; @@ -1654,10 +1650,11 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, } hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); - hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); - hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); - buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr); + buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + + sizeof(struct ssp_frame_hdr); memcpy(buf_cmd, &task->ssp_task.LUN, 8); if (!is_tmf) { @@ -1884,7 +1881,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, int err_phase) { struct task_status_struct *ts = &task->task_status; - struct hisi_sas_err_record_v2 *err_record = slot->status_buffer; + struct hisi_sas_err_record_v2 *err_record = + hisi_sas_status_buf_addr_mem(slot); u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type); u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type); u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type); @@ -2273,8 +2271,10 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) switch (task->task_proto) { case SAS_PROTOCOL_SSP: { - struct ssp_response_iu *iu = slot->status_buffer + - sizeof(struct hisi_sas_err_record); + struct hisi_sas_status_buffer *status_buffer = + hisi_sas_status_buf_addr_mem(slot); + struct ssp_response_iu *iu = (struct ssp_response_iu *) + &status_buffer->iu[0]; sas_ssp_task_response(dev, task, iu); break; @@ -2292,7 +2292,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) dma_unmap_sg(dev, &task->smp_task.smp_req, 1, DMA_TO_DEVICE); memcpy(to + sg_resp->offset, - slot->status_buffer + + hisi_sas_status_buf_addr_mem(slot) + sizeof(struct hisi_sas_err_record), sg_dma_len(sg_resp)); kunmap_atomic(to); @@ -2398,12 +2398,11 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, return rc; } - hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); - hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); - hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); - buf_cmd = slot->command_table; + buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); if (likely(!task->ata_task.device_control_reg_update)) task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index c998b81f33de..83d2dca1c650 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -743,6 +743,7 @@ static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, struct scatterlist *scatter, int n_elem) { + struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); struct device *dev = hisi_hba->dev; struct scatterlist *sg; int i; @@ -753,13 +754,8 @@ static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, return -EINVAL; } - slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC, - &slot->sge_page_dma); - if (!slot->sge_page) - return -ENOMEM; - for_each_sg(scatter, sg, n_elem, i) { - struct hisi_sas_sge *entry = &slot->sge_page->sge[i]; + struct hisi_sas_sge *entry = &sge_page->sge[i]; entry->addr = cpu_to_le64(sg_dma_address(sg)); entry->page_ctrl_0 = entry->page_ctrl_1 = 0; @@ -767,7 +763,8 @@ static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, entry->data_off = 0; } - hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma); + hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); + hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); return 0; @@ -833,12 +830,13 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, } hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); - hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); - hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); - buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr); - memcpy(buf_cmd, ssp_task->LUN, 8); + buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + + sizeof(struct ssp_frame_hdr); + memcpy(buf_cmd, &task->ssp_task.LUN, 8); if (!is_tmf) { buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3); memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); @@ -917,7 +915,7 @@ static int prep_smp_v3_hw(struct hisi_hba *hisi_hba, hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); - hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); return 0; @@ -1012,10 +1010,10 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, } hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); - hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); - hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); - buf_cmd = slot->command_table; + buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); if (likely(!task->ata_task.device_control_reg_update)) task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ @@ -1283,7 +1281,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, hisi_hba->complete_hdr[slot->cmplt_queue]; struct hisi_sas_complete_v3_hdr *complete_hdr = &complete_queue[slot->cmplt_queue_slot]; - struct hisi_sas_err_record_v3 *record = slot->status_buffer; + struct hisi_sas_err_record_v3 *record = + hisi_sas_status_buf_addr_mem(slot); u32 dma_rx_err_type = record->dma_rx_err_type; u32 trans_tx_fail_type = record->trans_tx_fail_type; @@ -1402,7 +1401,8 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) switch (task->task_proto) { case SAS_PROTOCOL_SSP: { - struct ssp_response_iu *iu = slot->status_buffer + + struct ssp_response_iu *iu = + hisi_sas_status_buf_addr_mem(slot) + sizeof(struct hisi_sas_err_record); sas_ssp_task_response(dev, task, iu); @@ -1420,7 +1420,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) dma_unmap_sg(dev, &task->smp_task.smp_req, 1, DMA_TO_DEVICE); memcpy(to + sg_resp->offset, - slot->status_buffer + + hisi_sas_status_buf_addr_mem(slot) + sizeof(struct hisi_sas_err_record), sg_dma_len(sg_resp)); kunmap_atomic(to); -- cgit From c4031db72b4fd2640ff3a7240701397abaacf048 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 30 Jun 2017 11:02:51 +0300 Subject: scsi: lpfc: spin_lock_irq() is not nestable We're calling spin_lock_irq() multiple times, the problem is that on the first spin_unlock_irq() then we will re-enable IRQs and we don't want that. Fixes: 966bb5b71196 ("scsi: lpfc: Break up IO ctx list into a separate get and put list") Signed-off-by: Dan Carpenter Acked-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_nvmet.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 7dc061a14f95..afc523209845 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -866,44 +866,44 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) unsigned long flags; spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags); - spin_lock_irq(&phba->sli4_hba.nvmet_ctx_put_lock); + spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); list_for_each_entry_safe(ctx_buf, next_ctx_buf, &phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) { - spin_lock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); list_del_init(&ctx_buf->list); - spin_unlock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); ctx_buf->sglq->state = SGL_FREED; ctx_buf->sglq->ndlp = NULL; - spin_lock_irq(&phba->sli4_hba.sgl_list_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); list_add_tail(&ctx_buf->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list); - spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); + spin_unlock(&phba->sli4_hba.sgl_list_lock); lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); kfree(ctx_buf->context); } list_for_each_entry_safe(ctx_buf, next_ctx_buf, &phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) { - spin_lock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); list_del_init(&ctx_buf->list); - spin_unlock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); ctx_buf->sglq->state = SGL_FREED; ctx_buf->sglq->ndlp = NULL; - spin_lock_irq(&phba->sli4_hba.sgl_list_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); list_add_tail(&ctx_buf->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list); - spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); + spin_unlock(&phba->sli4_hba.sgl_list_lock); lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); kfree(ctx_buf->context); } - spin_unlock_irq(&phba->sli4_hba.nvmet_ctx_put_lock); + spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags); } -- cgit From 88e96174206e09e8eef7636067f4e24a9e67139a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 30 Jun 2017 11:03:53 +0300 Subject: scsi: lpfc: don't double count abort errors If lpfc_nvmet_unsol_fcp_issue_abort() fails then we accidentally increment "tgtp->xmt_abort_rsp_error" and then two lines later we increment it a second time. Fixes: 547077a44b3b ("scsi: lpfc: Adding additional stats counters for nvme.") Signed-off-by: Dan Carpenter Acked-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_nvmet.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index afc523209845..fbeec344c6cc 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -2583,7 +2583,6 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, } aerr: - atomic_inc(&tgtp->xmt_abort_rsp_error); ctxp->flag &= ~LPFC_NVMET_ABORT_OP; atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, -- cgit From 0bf0efa1f1a5230794618e633e54d68e088dfcf5 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 30 Jun 2017 14:47:41 +0100 Subject: scsi: qla2xxx: fix a bunch of typos and spelling mistakes Fix the following typos/spelling mistakes: "attribure" -> "attribute" "suppored" -> "supported" "Symobilic" -> "Symbolic" "iteself" -> "itself" "reqeust" -> "request" "nvme_wait_on_comand" -> "nvme_wait_on_command" "bount" -> "bound" "captrue_mask" -> "capture_mask" "tempelate" -> "template" ..and also unwrap a line to fix a checkpatch warning. Signed-off-by: Colin Ian King Acked-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_attr.c | 2 +- drivers/scsi/qla2xxx/qla_bsg.c | 2 +- drivers/scsi/qla2xxx/qla_init.c | 2 +- drivers/scsi/qla2xxx/qla_isr.c | 3 +-- drivers/scsi/qla2xxx/qla_mbx.c | 2 +- drivers/scsi/qla2xxx/qla_nvme.c | 4 ++-- drivers/scsi/qla2xxx/qla_nx.c | 4 ++-- drivers/scsi/qla2xxx/qla_nx2.c | 2 +- 8 files changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 6dd984203666..08a1feb3a195 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -929,7 +929,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) iter->name, ret); else ql_dbg(ql_dbg_init, vha, 0x00f4, - "Successfully created sysfs %s binary attribure.\n", + "Successfully created sysfs %s binary attribute.\n", iter->name); } } diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index e093795a0371..2ea0ef93f5cb 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -293,7 +293,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) if (bsg_job->request_payload.sg_cnt > 1 || bsg_job->reply_payload.sg_cnt > 1) { ql_dbg(ql_dbg_user, vha, 0x7002, - "Multiple SG's are not suppored for ELS requests, " + "Multiple SG's are not supported for ELS requests, " "request_sg_cnt=%x reply_sg_cnt=%x.\n", bsg_job->request_payload.sg_cnt, bsg_job->reply_payload.sg_cnt); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 8a5f5ef069ae..072ad1aa5505 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -4660,7 +4660,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) } else if (qla2x00_rsnn_nn(vha)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x209b, - "Register Symobilic Node Name failed.\n"); + "Register Symbolic Node Name failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 011faa1dc618..6c6e624a5aa6 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -432,8 +432,7 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) "Register: 0x%x%x.\n", mb[7], mb[3]); if (err_level == ERR_LEVEL_NON_FATAL) { ql_log(ql_log_warn, vha, 0x5063, - "Not a fatal error, f/w has recovered " - "iteself.\n"); + "Not a fatal error, f/w has recovered itself.\n"); } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { ql_log(ql_log_fatal, vha, 0x5064, "Recoverable Fatal error: Chip reset " diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 0764b6172ed1..7c6d1a404011 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3893,7 +3893,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) rval = QLA_FUNCTION_FAILED; } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x10c5, - "Failed to complet IOCB -- completion status (%x).\n", + "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(vce->comp_status)); rval = QLA_FUNCTION_FAILED; } else { diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 1da8fa8f641d..53e58e9daba8 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -489,7 +489,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, struct nvme_private *priv; if (!fd) { - ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP reqeust\n"); + ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP request\n"); return rval; } @@ -626,7 +626,7 @@ void qla_nvme_abort(struct qla_hw_data *ha, srb_t *sp) if (!rval) { if (!qla_nvme_wait_on_command(sp)) ql_log(ql_log_warn, NULL, 0x2112, - "nvme_wait_on_comand timed out waiting on sp=%p\n", + "nvme_wait_on_command timed out waiting on sp=%p\n", sp); } } diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 0a1723cc08cf..a77c33987703 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -782,7 +782,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); ql_log(ql_log_fatal, vha, 0xb009, - "%s out of bount memory " + "%s out of bound memory " "access, offset is 0x%llx.\n", QLA2XXX_DRIVER_NAME, off); return -1; @@ -4250,7 +4250,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha) ql_dbg(ql_dbg_p3p, vha, 0xb040, "[%s]: data ptr[%d]: %p, entry_hdr: %p\n" - "entry_type: 0x%x, captrue_mask: 0x%x\n", + "entry_type: 0x%x, capture_mask: 0x%x\n", __func__, i, data_ptr, entry_hdr, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index dc1ec9b61027..0aa9c38bf347 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -1572,7 +1572,7 @@ qla8044_read_reset_template(struct scsi_qla_host *vha) /* Copy rest of the template */ if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) { ql_log(ql_log_fatal, vha, 0xb0bd, - "%s: Failed to read reset tempelate\n", __func__); + "%s: Failed to read reset template\n", __func__); goto exit_read_template_error; } -- cgit From 9f80efda8f09283bb0fb7f8342a3e5c93857a7f2 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 30 Jun 2017 14:59:50 +0100 Subject: scsi: snic: fix a couple of spelling mistakes/typos Trivial fix to spelling mistakes/typos: "requrest_irq" -> "request_irq" "Firmwqre" -> "Firmware" Signed-off-by: Colin Ian King Signed-off-by: Martin K. Petersen --- drivers/scsi/snic/snic_isr.c | 4 ++-- drivers/scsi/snic/snic_scsi.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c index d859501e4ccd..c4da3673f2ae 100644 --- a/drivers/scsi/snic/snic_isr.c +++ b/drivers/scsi/snic/snic_isr.c @@ -141,7 +141,7 @@ snic_request_intr(struct snic *snic) snic->msix[i].devid); if (ret) { SNIC_HOST_ERR(snic->shost, - "MSI-X: requrest_irq(%d) failed %d\n", + "MSI-X: request_irq(%d) failed %d\n", i, ret); snic_free_intr(snic); @@ -151,7 +151,7 @@ snic_request_intr(struct snic *snic) } return ret; -} /* end of snic_requrest_intr */ +} /* end of snic_request_intr */ int snic_set_intr_mode(struct snic *snic) diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c index 05c3a7282d4a..d8a376b7882d 100644 --- a/drivers/scsi/snic/snic_scsi.c +++ b/drivers/scsi/snic/snic_scsi.c @@ -1260,7 +1260,7 @@ snic_io_cmpl_handler(struct vnic_dev *vdev, default: SNIC_BUG_ON(1); SNIC_SCSI_DBG(snic->shost, - "Unknown Firmwqre completion request type %d\n", + "Unknown Firmware completion request type %d\n", fwreq->hdr.type); break; } -- cgit From bcda771b2f7e4f2a7577eef62ddeb61394e03b69 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 30 Jun 2017 18:10:40 +0200 Subject: scsi: qla2xxx: avoid unused-function warning When NVMe support is disabled, we get a couple of harmless warnings: drivers/scsi/qla2xxx/qla_nvme.c:667:13: error: 'qla_nvme_unregister_remote_port' defined but not used [-Werror=unused-function] drivers/scsi/qla2xxx/qla_nvme.c:634:13: error: 'qla_nvme_abort_all' defined but not used [-Werror=unused-function] drivers/scsi/qla2xxx/qla_nvme.c:604:12: error: 'qla_nvme_wait_on_rport_del' defined but not used [-Werror=unused-function] This replaces the preprocessor checks in the code with equivalent compiler conditionals, which lets gcc drop the unused functions without warning, and is nicer to read. Fixes: e84067d74301 ("scsi: qla2xxx: Add FC-NVMe F/W initialization and transport registration") Signed-off-by: Arnd Bergmann Acked-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/Kconfig | 1 + drivers/scsi/qla2xxx/qla_nvme.c | 20 ++++++++++++-------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index de952935b5d2..036cc3f217b1 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -2,6 +2,7 @@ config SCSI_QLA_FC tristate "QLogic QLA2XXX Fibre Channel Support" depends on PCI && SCSI depends on SCSI_FC_ATTRS + depends on NVME_FC || !NVME_FC select FW_LOADER select BTREE ---help--- diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 53e58e9daba8..ec15a49532a6 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -17,10 +17,12 @@ static void qla_nvme_unregister_remote_port(struct work_struct *); int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport) { -#if (IS_ENABLED(CONFIG_NVME_FC)) struct nvme_rport *rport; int ret; + if (!IS_ENABLED(CONFIG_NVME_FC)) + return 0; + if (fcport->nvme_flag & NVME_FLAG_REGISTERED) return 0; @@ -78,7 +80,6 @@ int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport) init_waitqueue_head(&fcport->nvme_waitQ); rport->fcport = fcport; list_add_tail(&rport->list, &vha->nvme_rport_list); -#endif return 0; } @@ -666,11 +667,13 @@ static void qla_nvme_abort_all(fc_port_t *fcport) static void qla_nvme_unregister_remote_port(struct work_struct *work) { -#if (IS_ENABLED(CONFIG_NVME_FC)) struct fc_port *fcport = container_of(work, struct fc_port, nvme_del_work); struct nvme_rport *rport, *trport; + if (!IS_ENABLED(CONFIG_NVME_FC)) + return; + list_for_each_entry_safe(rport, trport, &fcport->vha->nvme_rport_list, list) { if (rport->fcport == fcport) { @@ -680,16 +683,17 @@ static void qla_nvme_unregister_remote_port(struct work_struct *work) fcport->nvme_remote_port); } } -#endif } void qla_nvme_delete(scsi_qla_host_t *vha) { -#if (IS_ENABLED(CONFIG_NVME_FC)) struct nvme_rport *rport, *trport; fc_port_t *fcport; int nv_ret; + if (!IS_ENABLED(CONFIG_NVME_FC)) + return; + list_for_each_entry_safe(rport, trport, &vha->nvme_rport_list, list) { fcport = rport->fcport; @@ -711,17 +715,18 @@ void qla_nvme_delete(scsi_qla_host_t *vha) ql_log(ql_log_info, vha, 0x2115, "Unregister of localport failed\n"); } -#endif } void qla_nvme_register_hba(scsi_qla_host_t *vha) { -#if (IS_ENABLED(CONFIG_NVME_FC)) struct nvme_fc_port_template *tmpl; struct qla_hw_data *ha; struct nvme_fc_port_info pinfo; int ret; + if (!IS_ENABLED(CONFIG_NVME_FC)) + return; + ha = vha->hw; tmpl = &qla_nvme_fc_transport; @@ -752,5 +757,4 @@ void qla_nvme_register_hba(scsi_qla_host_t *vha) atomic_set(&vha->nvme_ref_count, 1); vha->nvme_local_port->private = vha; init_waitqueue_head(&vha->nvme_waitQ); -#endif } -- cgit From c345c6ca13825d1a15de5399226802433dd30f8c Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Fri, 30 Jun 2017 19:32:53 -0700 Subject: qla2xxx: Fix NVMe entry_type for iocb packet on BE system This patch fixes incorrect assignment for entry_type field for Continuation Type iocb packet on BE system. This was caught by -Woverflow warning on BE system compilation. For Continuation Type iocb driver needs to write complete 32 bit value to initialize other field members in structure to 0. Following warning is seen on BE system compile: drivers/scsi/qla2xxx/qla_nvme.c: In function 'qla2x00_start_nvme_mq': include/uapi/linux/byteorder/big_endian.h:32:26: warning: large integer implicitly truncated to unsigned type [-Woverflow] #define __cpu_to_le32(x) ((__force __le32)__swab32((x))) [mkp: fixed typo] Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_nvme.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index ec15a49532a6..f3710a75fe1f 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -442,7 +442,8 @@ static int qla2x00_start_nvme_mq(srb_t *sp) req->ring_ptr++; } cont_pkt = (cont_a64_entry_t *)req->ring_ptr; - cont_pkt->entry_type = cpu_to_le32(CONTINUE_A64_TYPE); + *((uint32_t *)(&cont_pkt->entry_type)) = + cpu_to_le32(CONTINUE_A64_TYPE); cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; avail_dsds = 5; -- cgit