diff options
Diffstat (limited to 'drivers/scsi')
358 files changed, 20838 insertions, 21156 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 6fb61c88ea11..883d4a12a172 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -1968,13 +1968,14 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code) } /* End twa_string_lookup() */ /* This function gets called when a disk is coming on-line */ -static int twa_slave_configure(struct scsi_device *sdev) +static int twa_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { /* Force 60 second timeout */ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); return 0; -} /* End twa_slave_configure() */ +} /* End twa_sdev_configure() */ static const struct scsi_host_template driver_template = { .module = THIS_MODULE, @@ -1984,7 +1985,7 @@ static const struct scsi_host_template driver_template = { .bios_param = twa_scsi_biosparam, .change_queue_depth = scsi_change_queue_depth, .can_queue = TW_Q_LENGTH-2, - .slave_configure = twa_slave_configure, + .sdev_configure = twa_sdev_configure, .this_id = -1, .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH, .max_sectors = TW_MAX_SECTORS, @@ -2260,7 +2261,7 @@ out_disable_device: } /* End twa_resume() */ /* PCI Devices supported by this driver */ -static struct pci_device_id twa_pci_tbl[] = { +static const struct pci_device_id twa_pci_tbl[] = { { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX, diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index caa6713a62a4..e057ab9c7b90 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -96,7 +96,7 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res /* This function returns AENs through sysfs */ static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *outbuf, loff_t offset, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); @@ -116,18 +116,18 @@ static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj, } /* End twl_sysfs_aen_read() */ /* aen_read sysfs attribute initializer */ -static struct bin_attribute twl_sysfs_aen_read_attr = { +static const struct bin_attribute twl_sysfs_aen_read_attr = { .attr = { .name = "3ware_aen_read", .mode = S_IRUSR, }, .size = 0, - .read = twl_sysfs_aen_read + .read_new = twl_sysfs_aen_read }; /* This function returns driver compatibility info through sysfs */ static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *outbuf, loff_t offset, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); @@ -147,13 +147,13 @@ static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj, } /* End twl_sysfs_compat_info() */ /* compat_info sysfs attribute initializer */ -static struct bin_attribute twl_sysfs_compat_info_attr = { +static const struct bin_attribute twl_sysfs_compat_info_attr = { .attr = { .name = "3ware_compat_info", .mode = S_IRUSR, }, .size = 0, - .read = twl_sysfs_compat_info + .read_new = twl_sysfs_compat_info }; /* Show some statistics about the card */ @@ -1523,13 +1523,14 @@ static void twl_shutdown(struct pci_dev *pdev) } /* End twl_shutdown() */ /* This function configures unit settings when a unit is coming on-line */ -static int twl_slave_configure(struct scsi_device *sdev) +static int twl_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { /* Force 60 second timeout */ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); return 0; -} /* End twl_slave_configure() */ +} /* End twl_sdev_configure() */ static const struct scsi_host_template driver_template = { .module = THIS_MODULE, @@ -1539,7 +1540,7 @@ static const struct scsi_host_template driver_template = { .bios_param = twl_scsi_biosparam, .change_queue_depth = scsi_change_queue_depth, .can_queue = TW_Q_LENGTH-2, - .slave_configure = twl_slave_configure, + .sdev_configure = twl_sdev_configure, .this_id = -1, .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH, .max_sectors = TW_MAX_SECTORS, @@ -1821,7 +1822,7 @@ out_disable_device: } /* End twl_resume() */ /* PCI Devices supported by this driver */ -static struct pci_device_id twl_pci_tbl[] = { +static const struct pci_device_id twl_pci_tbl[] = { { PCI_VDEVICE(3WARE, PCI_DEVICE_ID_3WARE_9750) }, { } }; diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 2c0fb6da0e60..89bd56f78ef9 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -172,7 +172,7 @@ Initialize queues correctly when loading with no valid units. 1.02.00.034 - Fix tw_decode_bits() to handle multiple errors. Add support for user configurable cmd_per_lun. - Add support for sht->slave_configure(). + Add support for sht->sdev_configure(). 1.02.00.035 - Improve tw_allocate_memory() memory allocation. Fix tw_chrdev_ioctl() to sleep correctly. 1.02.00.036 - Increase character ioctl timeout to 60 seconds. @@ -2221,13 +2221,13 @@ static void tw_shutdown(struct pci_dev *pdev) } /* End tw_shutdown() */ /* This function gets called when a disk is coming online */ -static int tw_slave_configure(struct scsi_device *sdev) +static int tw_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { /* Force 60 second timeout */ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); return 0; -} /* End tw_slave_configure() */ +} /* End tw_sdev_configure() */ static const struct scsi_host_template driver_template = { .module = THIS_MODULE, @@ -2237,7 +2237,7 @@ static const struct scsi_host_template driver_template = { .bios_param = tw_scsi_biosparam, .change_queue_depth = scsi_change_queue_depth, .can_queue = TW_Q_LENGTH-2, - .slave_configure = tw_slave_configure, + .sdev_configure = tw_sdev_configure, .this_id = -1, .sg_tablesize = TW_MAX_SGL_LENGTH, .max_sectors = TW_MAX_SECTORS, @@ -2393,7 +2393,7 @@ static void tw_remove(struct pci_dev *pdev) } /* End tw_remove() */ /* PCI Devices supported by this driver */ -static struct pci_device_id tw_pci_tbl[] = { +static const struct pci_device_id tw_pci_tbl[] = { { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_1000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_7000, diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 85439e976143..71b7ac027f48 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -158,9 +158,10 @@ STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt); STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt); STATIC void NCR_700_chip_setup(struct Scsi_Host *host); STATIC void NCR_700_chip_reset(struct Scsi_Host *host); -STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt); -STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); -STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); +STATIC int NCR_700_sdev_init(struct scsi_device *SDpnt); +STATIC int NCR_700_sdev_configure(struct scsi_device *SDpnt, + struct queue_limits *lim); +STATIC void NCR_700_sdev_destroy(struct scsi_device *SDpnt); static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); STATIC const struct attribute_group *NCR_700_dev_groups[]; @@ -330,9 +331,9 @@ NCR_700_detect(struct scsi_host_template *tpnt, tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST; tpnt->sg_tablesize = NCR_700_SG_SEGMENTS; tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN; - tpnt->slave_configure = NCR_700_slave_configure; - tpnt->slave_destroy = NCR_700_slave_destroy; - tpnt->slave_alloc = NCR_700_slave_alloc; + tpnt->sdev_configure = NCR_700_sdev_configure; + tpnt->sdev_destroy = NCR_700_sdev_destroy; + tpnt->sdev_init = NCR_700_sdev_init; tpnt->change_queue_depth = NCR_700_change_queue_depth; if(tpnt->name == NULL) @@ -2017,7 +2018,7 @@ NCR_700_set_offset(struct scsi_target *STp, int offset) } STATIC int -NCR_700_slave_alloc(struct scsi_device *SDp) +NCR_700_sdev_init(struct scsi_device *SDp) { SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters), GFP_KERNEL); @@ -2029,7 +2030,7 @@ NCR_700_slave_alloc(struct scsi_device *SDp) } STATIC int -NCR_700_slave_configure(struct scsi_device *SDp) +NCR_700_sdev_configure(struct scsi_device *SDp, struct queue_limits *lim) { struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0]; @@ -2052,7 +2053,7 @@ NCR_700_slave_configure(struct scsi_device *SDp) } STATIC void -NCR_700_slave_destroy(struct scsi_device *SDp) +NCR_700_sdev_destroy(struct scsi_device *SDp) { kfree(SDp->hostdata); SDp->hostdata = NULL; diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index 72ceaf650b0d..1f100270cd38 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -78,6 +78,7 @@ static struct blogic_drvr_options blogic_drvr_options[BLOGIC_MAX_ADAPTERS]; BusLogic can be assigned a string by insmod. */ +MODULE_DESCRIPTION("BusLogic MultiMaster and FlashPoint SCSI Host Adapter driver"); MODULE_LICENSE("GPL"); #ifdef MODULE static char *BusLogic; @@ -2152,14 +2153,15 @@ static void __init blogic_inithoststruct(struct blogic_adapter *adapter, } /* - blogic_slaveconfig will actually set the queue depth on individual + blogic_sdev_configure will actually set the queue depth on individual scsi devices as they are permanently added to the device chain. We shamelessly rip off the SelectQueueDepths code to make this work mostly like it used to. Since we don't get called once at the end of the scan but instead get called for each device, we have to do things a bit differently. */ -static int blogic_slaveconfig(struct scsi_device *dev) +static int blogic_sdev_configure(struct scsi_device *dev, + struct queue_limits *lim) { struct blogic_adapter *adapter = (struct blogic_adapter *) dev->host->hostdata; @@ -3671,7 +3673,7 @@ static const struct scsi_host_template blogic_template = { .name = "BusLogic", .info = blogic_drvr_info, .queuecommand = blogic_qcmd, - .slave_configure = blogic_slaveconfig, + .sdev_configure = blogic_sdev_configure, .bios_param = blogic_diskparam, .eh_host_reset_handler = blogic_hostreset, #if 0 @@ -3714,7 +3716,7 @@ static void __exit blogic_exit(void) __setup("BusLogic=", blogic_setup); #ifdef MODULE -/*static struct pci_device_id blogic_pci_tbl[] = { +/*static const struct pci_device_id blogic_pci_tbl[] = { { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h index 7d1ec10f2430..61bf26d4fc10 100644 --- a/drivers/scsi/BusLogic.h +++ b/drivers/scsi/BusLogic.h @@ -1274,7 +1274,8 @@ static inline void blogic_incszbucket(unsigned int *cmdsz_buckets, static const char *blogic_drvr_info(struct Scsi_Host *); static int blogic_qcmd(struct Scsi_Host *h, struct scsi_cmnd *); static int blogic_diskparam(struct scsi_device *, struct block_device *, sector_t, int *); -static int blogic_slaveconfig(struct scsi_device *); +static int blogic_sdev_configure(struct scsi_device *, + struct queue_limits *lim); static void blogic_qcompleted_ccb(struct blogic_ccb *); static irqreturn_t blogic_inthandler(int, void *); static int blogic_resetadapter(struct blogic_adapter *, bool hard_reset); diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c index 3d9c56ac8224..9e77b8e1ea7c 100644 --- a/drivers/scsi/FlashPoint.c +++ b/drivers/scsi/FlashPoint.c @@ -2631,7 +2631,6 @@ static void FPT_sres(u32 port, unsigned char p_card, WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00); our_target = (unsigned char)(RD_HARPOON(port + hp_select_id) >> 4); - currTar_Info = &FPT_sccbMgrTbl[p_card][our_target]; msgRetryCount = 0; do { diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 634f2f501c6c..5522310bab8d 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -53,7 +53,7 @@ config SCSI_ESP_PIO config SCSI_NETLINK bool - default n + default n depends on NET config SCSI_PROC_FS @@ -82,7 +82,6 @@ comment "SCSI support type (disk, tape, CD-ROM)" config BLK_DEV_SD tristate "SCSI disk support" depends on SCSI - select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY help If you want to use SCSI hard disks, Fibre Channel disks, Serial ATA (SATA) or Parallel ATA (PATA) hard disks, @@ -304,9 +303,9 @@ if SCSI_LOWLEVEL && SCSI config ISCSI_TCP tristate "iSCSI Initiator over TCP/IP" depends on SCSI && INET + select CRC32 select CRYPTO select CRYPTO_MD5 - select CRYPTO_CRC32C select SCSI_ISCSI_ATTRS help The iSCSI Driver provides a host with the ability to access storage @@ -327,7 +326,7 @@ config ISCSI_TCP config ISCSI_BOOT_SYSFS tristate "iSCSI Boot Sysfs Interface" - default n + default n help This option enables support for exposing iSCSI boot information via sysfs to userspace. If you wish to export this information, @@ -337,7 +336,6 @@ source "drivers/scsi/cxgbi/Kconfig" source "drivers/scsi/bnx2i/Kconfig" source "drivers/scsi/bnx2fc/Kconfig" source "drivers/scsi/be2iscsi/Kconfig" -source "drivers/scsi/cxlflash/Kconfig" config SGIWD93_SCSI tristate "SGI WD93C93 SCSI Driver" @@ -405,6 +403,7 @@ config SCSI_ACARD config SCSI_AHA152X tristate "Adaptec AHA152X/2825 support" depends on ISA && SCSI + depends on !HIGHMEM select SCSI_SPI_ATTRS select CHECK_SIGNATURE help @@ -797,6 +796,7 @@ config SCSI_PPA tristate "IOMEGA parallel port (ppa - older drives)" depends on SCSI && PARPORT_PC depends on HAS_IOPORT + depends on !HIGHMEM help This driver supports older versions of IOMEGA's parallel port ZIP drive (a 100 MB removable media device). @@ -824,6 +824,7 @@ config SCSI_PPA config SCSI_IMM tristate "IOMEGA parallel port (imm - newer drives)" depends on SCSI && PARPORT_PC + depends on !HIGHMEM help This driver supports newer versions of IOMEGA's parallel port ZIP drive (a 100 MB removable media device). diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 1313ddf2fd1a..16de3e41f94c 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -96,7 +96,6 @@ obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/ obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o obj-$(CONFIG_SCSI_DC395x) += dc395x.o obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o -obj-$(CONFIG_CXLFLASH) += cxlflash/ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ obj-$(CONFIG_MEGARAID_SAS) += megaraid/ diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index cea3a79d538e..0e10502660de 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -157,7 +157,6 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd) } ncmd->status = 0; - ncmd->message = 0; } static inline void advance_sg_buffer(struct NCR5380_cmd *ncmd) @@ -199,7 +198,6 @@ static inline void set_resid_from_SCp(struct scsi_cmnd *cmd) * Polls the chip in a reasonably efficient manner waiting for an * event to occur. After a short quick poll we begin to yield the CPU * (if possible). In irq contexts the time-out is arbitrarily limited. - * Callers may hold locks as long as they are held in irq mode. * * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT. */ @@ -1228,24 +1226,15 @@ out: return ret; } -/* - * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, - * unsigned char *phase, int *count, unsigned char **data) - * - * Purpose : transfers data in given phase using polled I/O - * - * Inputs : instance - instance of driver, *phase - pointer to - * what phase is expected, *count - pointer to number of - * bytes to transfer, **data - pointer to data pointer, - * can_sleep - 1 or 0 when sleeping is permitted or not, respectively. - * - * Returns : -1 when different phase is entered without transferring - * maximum number of bytes, 0 if all bytes are transferred or exit - * is in same phase. - * - * Also, *phase, *count, *data are modified in place. +/** + * NCR5380_transfer_pio() - transfers data in given phase using polled I/O + * @instance: instance of driver + * @phase: pointer to what phase is expected + * @count: pointer to number of bytes to transfer + * @data: pointer to data pointer + * @can_sleep: 1 or 0 when sleeping is permitted or not, respectively * - * XXX Note : handling for bus free may be useful. + * Returns: void. *phase, *count, *data are modified in place. */ /* @@ -1254,9 +1243,9 @@ out: * counts, we will always do a pseudo DMA or DMA transfer. */ -static int NCR5380_transfer_pio(struct Scsi_Host *instance, - unsigned char *phase, int *count, - unsigned char **data, unsigned int can_sleep) +static void NCR5380_transfer_pio(struct Scsi_Host *instance, + unsigned char *phase, int *count, + unsigned char **data, unsigned int can_sleep) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char p = *phase, tmp; @@ -1277,8 +1266,8 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, * valid */ - if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, - HZ * can_sleep) < 0) + if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ | SR_BSY, + SR_REQ | SR_BSY, HZ * can_sleep) < 0) break; dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n"); @@ -1329,17 +1318,19 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n"); -/* - * We have several special cases to consider during REQ/ACK handshaking : - * 1. We were in MSGOUT phase, and we are on the last byte of the - * message. ATN must be dropped as ACK is dropped. - * - * 2. We are in a MSGIN phase, and we are on the last byte of the - * message. We must exit with ACK asserted, so that the calling - * code may raise ATN before dropping ACK to reject the message. - * - * 3. ACK and ATN are clear and the target may proceed as normal. - */ + /* + * We have several special cases to consider during REQ/ACK + * handshaking: + * + * 1. We were in MSGOUT phase, and we are on the last byte of + * the message. ATN must be dropped as ACK is dropped. + * + * 2. We are in MSGIN phase, and we are on the last byte of the + * message. We must exit with ACK asserted, so that the calling + * code may raise ATN before dropping ACK to reject the message. + * + * 3. ACK and ATN are clear & the target may proceed as normal. + */ if (!(p == PHASE_MSGIN && c == 1)) { if (p == PHASE_MSGOUT && c > 1) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); @@ -1361,11 +1352,6 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, *phase = tmp & PHASE_MASK; else *phase = PHASE_UNKNOWN; - - if (!c || (*phase == p)) - return 0; - else - return -1; } /** @@ -1485,6 +1471,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char **data) { struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected); int c = *count; unsigned char p = *phase; unsigned char *d = *data; @@ -1496,7 +1483,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, return -1; } - NCR5380_to_ncmd(hostdata->connected)->phase = p; + ncmd->phase = p; if (p & SR_IO) { if (hostdata->read_overruns) @@ -1574,79 +1561,80 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, /* The result is zero iff pseudo DMA send/receive was completed. */ hostdata->dma_len = c; -/* - * A note regarding the DMA errata workarounds for early NMOS silicon. - * - * For DMA sends, we want to wait until the last byte has been - * transferred out over the bus before we turn off DMA mode. Alas, there - * seems to be no terribly good way of doing this on a 5380 under all - * conditions. For non-scatter-gather operations, we can wait until REQ - * and ACK both go false, or until a phase mismatch occurs. Gather-sends - * are nastier, since the device will be expecting more data than we - * are prepared to send it, and REQ will remain asserted. On a 53C8[01] we - * could test Last Byte Sent to assure transfer (I imagine this is precisely - * why this signal was added to the newer chips) but on the older 538[01] - * this signal does not exist. The workaround for this lack is a watchdog; - * we bail out of the wait-loop after a modest amount of wait-time if - * the usual exit conditions are not met. Not a terribly clean or - * correct solution :-% - * - * DMA receive is equally tricky due to a nasty characteristic of the NCR5380. - * If the chip is in DMA receive mode, it will respond to a target's - * REQ by latching the SCSI data into the INPUT DATA register and asserting - * ACK, even if it has _already_ been notified by the DMA controller that - * the current DMA transfer has completed! If the NCR5380 is then taken - * out of DMA mode, this already-acknowledged byte is lost. This is - * not a problem for "one DMA transfer per READ command", because - * the situation will never arise... either all of the data is DMA'ed - * properly, or the target switches to MESSAGE IN phase to signal a - * disconnection (either operation bringing the DMA to a clean halt). - * However, in order to handle scatter-receive, we must work around the - * problem. The chosen fix is to DMA fewer bytes, then check for the - * condition before taking the NCR5380 out of DMA mode. One or two extra - * bytes are transferred via PIO as necessary to fill out the original - * request. - */ - - if (hostdata->flags & FLAG_DMA_FIXUP) { - if (p & SR_IO) { - /* - * The workaround was to transfer fewer bytes than we - * intended to with the pseudo-DMA read function, wait for - * the chip to latch the last byte, read it, and then disable - * pseudo-DMA mode. - * - * After REQ is asserted, the NCR5380 asserts DRQ and ACK. - * REQ is deasserted when ACK is asserted, and not reasserted - * until ACK goes false. Since the NCR5380 won't lower ACK - * until DACK is asserted, which won't happen unless we twiddle - * the DMA port or we take the NCR5380 out of DMA mode, we - * can guarantee that we won't handshake another extra - * byte. - */ + /* + * A note regarding the DMA errata workarounds for early NMOS silicon. + * + * For DMA sends, we want to wait until the last byte has been + * transferred out over the bus before we turn off DMA mode. Alas, there + * seems to be no terribly good way of doing this on a 5380 under all + * conditions. For non-scatter-gather operations, we can wait until REQ + * and ACK both go false, or until a phase mismatch occurs. Gather-sends + * are nastier, since the device will be expecting more data than we + * are prepared to send it, and REQ will remain asserted. On a 53C8[01] + * we could test Last Byte Sent to assure transfer (I imagine this is + * precisely why this signal was added to the newer chips) but on the + * older 538[01] this signal does not exist. The workaround for this + * lack is a watchdog; we bail out of the wait-loop after a modest + * amount of wait-time if the usual exit conditions are not met. + * Not a terribly clean or correct solution :-% + * + * DMA receive is equally tricky due to a nasty characteristic of the + * NCR5380. If the chip is in DMA receive mode, it will respond to a + * target's REQ by latching the SCSI data into the INPUT DATA register + * and asserting ACK, even if it has _already_ been notified by the + * DMA controller that the current DMA transfer has completed! If the + * NCR5380 is then taken out of DMA mode, this already-acknowledged + * byte is lost. + * + * This is not a problem for "one DMA transfer per READ + * command", because the situation will never arise... either all of + * the data is DMA'ed properly, or the target switches to MESSAGE IN + * phase to signal a disconnection (either operation bringing the DMA + * to a clean halt). However, in order to handle scatter-receive, we + * must work around the problem. The chosen fix is to DMA fewer bytes, + * then check for the condition before taking the NCR5380 out of DMA + * mode. One or two extra bytes are transferred via PIO as necessary + * to fill out the original request. + */ - if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, - BASR_DRQ, BASR_DRQ, 0) < 0) { - result = -1; - shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); - } - if (NCR5380_poll_politely(hostdata, STATUS_REG, - SR_REQ, 0, 0) < 0) { - result = -1; - shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); - } - d[*count - 1] = NCR5380_read(INPUT_DATA_REG); - } else { - /* - * Wait for the last byte to be sent. If REQ is being asserted for - * the byte we're interested, we'll ACK it and it will go false. - */ - if (NCR5380_poll_politely2(hostdata, - BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, - BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) { - result = -1; - shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n"); + if ((hostdata->flags & FLAG_DMA_FIXUP) && + (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { + /* + * The workaround was to transfer fewer bytes than we + * intended to with the pseudo-DMA receive function, wait for + * the chip to latch the last byte, read it, and then disable + * DMA mode. + * + * After REQ is asserted, the NCR5380 asserts DRQ and ACK. + * REQ is deasserted when ACK is asserted, and not reasserted + * until ACK goes false. Since the NCR5380 won't lower ACK + * until DACK is asserted, which won't happen unless we twiddle + * the DMA port or we take the NCR5380 out of DMA mode, we + * can guarantee that we won't handshake another extra + * byte. + * + * If sending, wait for the last byte to be sent. If REQ is + * being asserted for the byte we're interested, we'll ACK it + * and it will go false. + */ + if (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, + BASR_DRQ, BASR_DRQ, 0)) { + if ((p & SR_IO) && + (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { + if (!NCR5380_poll_politely(hostdata, STATUS_REG, + SR_REQ, 0, 0)) { + d[c] = NCR5380_read(INPUT_DATA_REG); + --ncmd->this_residual; + } else { + result = -1; + scmd_printk(KERN_ERR, hostdata->connected, + "PDMA fixup: !REQ timeout\n"); + } } + } else if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH) { + result = -1; + scmd_printk(KERN_ERR, hostdata->connected, + "PDMA fixup: DRQ timeout\n"); } } @@ -1666,9 +1654,6 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, * Side effects : SCSI things happen, the disconnected queue will be * modified if a command disconnects, *instance->connected will * change. - * - * XXX Note : we need to watch for bus free or a reset condition here - * to recover from an unexpected bus free condition. */ static void NCR5380_information_transfer(struct Scsi_Host *instance) @@ -1807,9 +1792,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) return; case PHASE_MSGIN: len = 1; + tmp = 0xff; data = &tmp; NCR5380_transfer_pio(instance, &phase, &len, &data, 0); - ncmd->message = tmp; + if (tmp == 0xff) + break; switch (tmp) { case ABORT: @@ -1996,6 +1983,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) break; case PHASE_STATIN: len = 1; + tmp = ncmd->status; data = &tmp; NCR5380_transfer_pio(instance, &phase, &len, &data, 0); ncmd->status = tmp; @@ -2005,9 +1993,20 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) NCR5380_dprint(NDEBUG_ANY, instance); } /* switch(phase) */ } else { + int err; + spin_unlock_irq(&hostdata->lock); - NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ); + err = NCR5380_poll_politely(hostdata, STATUS_REG, + SR_REQ, SR_REQ, HZ); spin_lock_irq(&hostdata->lock); + + if (err < 0 && hostdata->connected && + !(NCR5380_read(STATUS_REG) & SR_BSY)) { + scmd_printk(KERN_ERR, hostdata->connected, + "BSY signal lost\n"); + do_reset(instance); + bus_reset_cleanup(instance); + } } } } diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index 8dc2be4212dc..d402d4bffcb2 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -3,10 +3,10 @@ * NCR 5380 defines * * Copyright 1993, Drew Eckhardt - * Visionary Computing - * (Unix consulting and custom programming) - * drew@colorado.edu - * +1 (303) 666-5836 + * Visionary Computing + * (Unix consulting and custom programming) + * drew@colorado.edu + * +1 (303) 666-5836 * * For more information, please consult * @@ -78,7 +78,7 @@ #define ICR_DIFF_ENABLE 0x20 /* wo Set to enable diff. drivers */ #define ICR_ASSERT_ACK 0x10 /* rw ini Set to assert ACK */ #define ICR_ASSERT_BSY 0x08 /* rw Set to assert BSY */ -#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */ +#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */ #define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */ #define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */ @@ -135,7 +135,7 @@ #define BASR_IRQ 0x10 /* ro mirror of IRQ pin */ #define BASR_PHASE_MATCH 0x08 /* ro Set when MSG CD IO match TCR */ #define BASR_BUSY_ERROR 0x04 /* ro Unexpected change to inactive state */ -#define BASR_ATN 0x02 /* ro BUS status */ +#define BASR_ATN 0x02 /* ro BUS status */ #define BASR_ACK 0x01 /* ro BUS status */ /* Write any value to this register to start a DMA send */ @@ -170,7 +170,7 @@ #define CSR_BASE CSR_53C80_INTR /* Note : PHASE_* macros are based on the values of the STATUS register */ -#define PHASE_MASK (SR_MSG | SR_CD | SR_IO) +#define PHASE_MASK (SR_MSG | SR_CD | SR_IO) #define PHASE_DATAOUT 0 #define PHASE_DATAIN SR_IO @@ -231,7 +231,6 @@ struct NCR5380_cmd { int this_residual; struct scatterlist *buffer; int status; - int message; int phase; struct list_head list; }; @@ -286,8 +285,9 @@ static const char *NCR5380_info(struct Scsi_Host *instance); static void NCR5380_reselect(struct Scsi_Host *instance); static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); -static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data, - unsigned int can_sleep); +static void NCR5380_transfer_pio(struct Scsi_Host *instance, + unsigned char *phase, int *count, + unsigned char **data, unsigned int can_sleep); static int NCR5380_poll_politely2(struct NCR5380_hostdata *, unsigned int, u8, u8, unsigned int, u8, u8, unsigned long); diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index b95147fb18b0..a8979f9e30ff 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c @@ -1206,7 +1206,7 @@ static void inia100_remove_one(struct pci_dev *pdev) scsi_host_put(shost); } -static struct pci_device_id inia100_pci_tbl[] = { +static const struct pci_device_id inia100_pci_tbl[] = { {PCI_VENDOR_ID_INIT, 0x1060, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index 378906f77909..bf054dd7682b 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c @@ -295,10 +295,16 @@ static void __exit amiga_a3000_scsi_remove(struct platform_device *pdev) release_mem_region(res->start, resource_size(res)); } -static struct platform_driver amiga_a3000_scsi_driver = { - .remove_new = __exit_p(amiga_a3000_scsi_remove), - .driver = { - .name = "amiga-a3000-scsi", +/* + * amiga_a3000_scsi_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver amiga_a3000_scsi_driver __refdata = { + .remove = __exit_p(amiga_a3000_scsi_remove), + .driver = { + .name = "amiga-a3000-scsi", }, }; diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c index e435fc06a624..75b43047a155 100644 --- a/drivers/scsi/a4000t.c +++ b/drivers/scsi/a4000t.c @@ -108,10 +108,16 @@ static void __exit amiga_a4000t_scsi_remove(struct platform_device *pdev) release_mem_region(res->start, resource_size(res)); } -static struct platform_driver amiga_a4000t_scsi_driver = { - .remove_new = __exit_p(amiga_a4000t_scsi_remove), - .driver = { - .name = "amiga-a4000t-scsi", +/* + * amiga_a4000t_scsi_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver amiga_a4000t_scsi_driver __refdata = { + .remove = __exit_p(amiga_a4000t_scsi_remove), + .driver = { + .name = "amiga-a4000t-scsi", }, }; diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index b22857c6f3f4..0be719f38377 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -27,7 +27,7 @@ #include <linux/uaccess.h> #include <linux/module.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -1267,7 +1267,7 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 return ret; command = ContainerRawIo; fibsize = sizeof(struct aac_raw_io) + - ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw)); + (le32_to_cpu(readcmd->sg.count) * sizeof(struct sgentryraw)); } BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1302,7 +1302,7 @@ static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u if (ret < 0) return ret; fibsize = sizeof(struct aac_read64) + - ((le32_to_cpu(readcmd->sg.count) - 1) * + (le32_to_cpu(readcmd->sg.count) * sizeof (struct sgentry64)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1337,7 +1337,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 if (ret < 0) return ret; fibsize = sizeof(struct aac_read) + - ((le32_to_cpu(readcmd->sg.count) - 1) * + (le32_to_cpu(readcmd->sg.count) * sizeof (struct sgentry)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1401,7 +1401,7 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u return ret; command = ContainerRawIo; fibsize = sizeof(struct aac_raw_io) + - ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw)); + (le32_to_cpu(writecmd->sg.count) * sizeof(struct sgentryraw)); } BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1436,7 +1436,7 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, if (ret < 0) return ret; fibsize = sizeof(struct aac_write64) + - ((le32_to_cpu(writecmd->sg.count) - 1) * + (le32_to_cpu(writecmd->sg.count) * sizeof (struct sgentry64)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1473,7 +1473,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 if (ret < 0) return ret; fibsize = sizeof(struct aac_write) + - ((le32_to_cpu(writecmd->sg.count) - 1) * + (le32_to_cpu(writecmd->sg.count) * sizeof (struct sgentry)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1592,9 +1592,9 @@ static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd) /* * Build Scatter/Gather list */ - fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) + + fibsize = sizeof(struct aac_srb) + ((le32_to_cpu(srbcmd->sg.count) & 0xff) * - sizeof (struct sgentry64)); + sizeof(struct sgentry64)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1624,7 +1624,7 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd) * Build Scatter/Gather list */ fibsize = sizeof (struct aac_srb) + - (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * + ((le32_to_cpu(srbcmd->sg.count) & 0xff) * sizeof (struct sgentry)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1693,8 +1693,7 @@ static int aac_send_safw_bmic_cmd(struct aac_dev *dev, fibptr->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable); - fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + - sizeof(struct sgentry64); + fibsize = sizeof(struct aac_srb) + sizeof(struct sgentry64); /* allocate DMA buffer for response */ addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len, @@ -1833,7 +1832,7 @@ static int aac_get_safw_ciss_luns(struct aac_dev *dev) struct aac_ciss_phys_luns_resp *phys_luns; datasize = sizeof(struct aac_ciss_phys_luns_resp) + - (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun); + AAC_MAX_TARGETS * sizeof(struct _ciss_lun); phys_luns = kmalloc(datasize, GFP_KERNEL); if (phys_luns == NULL) goto out; @@ -2267,7 +2266,7 @@ int aac_get_adapter_info(struct aac_dev* dev) dev->a_ops.adapter_bounds = aac_bounds_32; dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) - - sizeof(struct aac_write) + sizeof(struct sgentry)) / + sizeof(struct aac_write)) / sizeof(struct sgentry); if (dev->dac_support) { dev->a_ops.adapter_read = aac_read_block64; @@ -2278,8 +2277,7 @@ int aac_get_adapter_info(struct aac_dev* dev) dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) - - sizeof(struct aac_write64) + - sizeof(struct sgentry64)) / + sizeof(struct aac_write64)) / sizeof(struct sgentry64); } else { dev->a_ops.adapter_read = aac_read_block; @@ -3223,8 +3221,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) break; } fallthrough; - case RESERVE: - case RELEASE: + case RESERVE_6: + case RELEASE_6: case REZERO_UNIT: case REASSIGN_BLOCKS: case SEEK_10: diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 7d5a155073c6..8c384c25dca1 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -322,7 +322,7 @@ struct aac_ciss_phys_luns_resp { u8 level3[2]; u8 level2[2]; u8 node_ident[16]; /* phys. node identifier */ - } lun[1]; /* List of phys. devices */ + } lun[]; /* List of phys. devices */ }; /* @@ -507,32 +507,27 @@ struct sge_ieee1212 { struct sgmap { __le32 count; - struct sgentry sg[1]; + struct sgentry sg[]; }; struct user_sgmap { u32 count; - struct user_sgentry sg[1]; + struct user_sgentry sg[]; }; struct sgmap64 { __le32 count; - struct sgentry64 sg[1]; + struct sgentry64 sg[]; }; struct user_sgmap64 { u32 count; - struct user_sgentry64 sg[1]; + struct user_sgentry64 sg[]; }; struct sgmapraw { __le32 count; - struct sgentryraw sg[1]; -}; - -struct user_sgmapraw { - u32 count; - struct user_sgentryraw sg[1]; + struct sgentryraw sg[]; }; struct creation_info @@ -873,7 +868,7 @@ union aac_init __le16 element_count; __le16 comp_thresh; __le16 unused; - } rrq[1]; /* up to 64 RRQ addresses */ + } rrq[] __counted_by_le(rr_queue_count); /* up to 64 RRQ addresses */ } r8; }; @@ -2029,8 +2024,8 @@ struct aac_srb_reply }; struct aac_srb_unit { - struct aac_srb srb; struct aac_srb_reply srb_reply; + struct aac_srb srb; }; /* @@ -2741,7 +2736,6 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index, int isAif, int isFastResponse, struct hw_fib *aif_fib); int aac_reset_adapter(struct aac_dev *dev, int forced, u8 reset_type); -int aac_check_health(struct aac_dev * dev); int aac_command_thread(void *data); int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size); diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index e7cc927ed952..68240d6f27ab 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -523,7 +523,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } - if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || + if ((fibsize < sizeof(struct user_aac_srb)) || (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { rcode = -EINVAL; goto cleanup; @@ -561,7 +561,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) rcode = -EINVAL; goto cleanup; } - actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + + actual_fibsize = sizeof(struct aac_srb) + ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * (sizeof(struct sgentry64) - sizeof(struct sgentry)); diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index bd99c5492b7d..28cf18955a08 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -522,8 +522,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) spin_lock_init(&dev->iq_lock); dev->max_fib_size = sizeof(struct hw_fib); dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size - - sizeof(struct aac_fibhdr) - - sizeof(struct aac_write) + sizeof(struct sgentry)) + - sizeof(struct aac_fibhdr) - sizeof(struct aac_write)) / sizeof(struct sgentry); dev->comm_interface = AAC_COMM_PRODUCER; dev->raw_io_interface = dev->raw_io_64 = 0; @@ -642,6 +641,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) if (aac_comm_init(dev)<0){ kfree(dev->queues); + dev->queues = NULL; return NULL; } /* @@ -649,6 +649,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) */ if (aac_fib_setup(dev) < 0) { kfree(dev->queues); + dev->queues = NULL; return NULL; } diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 25cee03d7f97..ffef61c4aa01 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1698,127 +1698,6 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) return retval; } -int aac_check_health(struct aac_dev * aac) -{ - int BlinkLED; - unsigned long time_now, flagv = 0; - struct list_head * entry; - - /* Extending the scope of fib_lock slightly to protect aac->in_reset */ - if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) - return 0; - - if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) { - spin_unlock_irqrestore(&aac->fib_lock, flagv); - return 0; /* OK */ - } - - aac->in_reset = 1; - - /* Fake up an AIF: - * aac_aifcmd.command = AifCmdEventNotify = 1 - * aac_aifcmd.seqnum = 0xFFFFFFFF - * aac_aifcmd.data[0] = AifEnExpEvent = 23 - * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3 - * aac.aifcmd.data[2] = AifHighPriority = 3 - * aac.aifcmd.data[3] = BlinkLED - */ - - time_now = jiffies/HZ; - entry = aac->fib_list.next; - - /* - * For each Context that is on the - * fibctxList, make a copy of the - * fib, and then set the event to wake up the - * thread that is waiting for it. - */ - while (entry != &aac->fib_list) { - /* - * Extract the fibctx - */ - struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next); - struct hw_fib * hw_fib; - struct fib * fib; - /* - * Check if the queue is getting - * backlogged - */ - if (fibctx->count > 20) { - /* - * It's *not* jiffies folks, - * but jiffies / HZ, so do not - * panic ... - */ - u32 time_last = fibctx->jiffies; - /* - * Has it been > 2 minutes - * since the last read off - * the queue? - */ - if ((time_now - time_last) > aif_timeout) { - entry = entry->next; - aac_close_fib_context(aac, fibctx); - continue; - } - } - /* - * Warning: no sleep allowed while - * holding spinlock - */ - hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC); - fib = kzalloc(sizeof(struct fib), GFP_ATOMIC); - if (fib && hw_fib) { - struct aac_aifcmd * aif; - - fib->hw_fib_va = hw_fib; - fib->dev = aac; - aac_fib_init(fib); - fib->type = FSAFS_NTC_FIB_CONTEXT; - fib->size = sizeof (struct fib); - fib->data = hw_fib->data; - aif = (struct aac_aifcmd *)hw_fib->data; - aif->command = cpu_to_le32(AifCmdEventNotify); - aif->seqnum = cpu_to_le32(0xFFFFFFFF); - ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent); - ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic); - ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority); - ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED); - - /* - * Put the FIB onto the - * fibctx's fibs - */ - list_add_tail(&fib->fiblink, &fibctx->fib_list); - fibctx->count++; - /* - * Set the event to wake up the - * thread that will waiting. - */ - complete(&fibctx->completion); - } else { - printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); - kfree(fib); - kfree(hw_fib); - } - entry = entry->next; - } - - spin_unlock_irqrestore(&aac->fib_lock, flagv); - - if (BlinkLED < 0) { - printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n", - aac->name, BlinkLED); - goto out; - } - - printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); - -out: - aac->in_reset = 0; - return BlinkLED; -} - static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target) { return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers; @@ -2327,8 +2206,9 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); sg64->sg[0].count = cpu_to_le32(datasize); - ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb), - FsaNormal, 1, 1, NULL, NULL); + ret = aac_fib_send(ScsiPortCommand64, fibptr, + sizeof(struct aac_srb) + sizeof(struct sgentry), + FsaNormal, 1, 1, NULL, NULL); dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr); diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 68f4dbcfff49..4b12e6dd8f07 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -377,15 +377,17 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev, } /** - * aac_slave_configure - compute queue depths + * aac_sdev_configure - compute queue depths * @sdev: SCSI device we are considering + * @lim: Request queue limits * * Selects queue depths for each target device based on the host adapter's * total capacity and the queue depth supported by the target device. * A queue depth of one automatically disables tagged queueing. */ -static int aac_slave_configure(struct scsi_device *sdev) +static int aac_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; int chn, tid; @@ -1487,7 +1489,7 @@ static const struct scsi_host_template aac_driver_template = { .queuecommand = aac_queuecommand, .bios_param = aac_biosparm, .shost_groups = aac_host_groups, - .slave_configure = aac_slave_configure, + .sdev_configure = aac_sdev_configure, .change_queue_depth = aac_change_queue_depth, .sdev_groups = aac_dev_groups, .eh_abort_handler = aac_eh_abort, @@ -2027,7 +2029,7 @@ static void aac_pci_resume(struct pci_dev *pdev) dev_err(&pdev->dev, "aacraid: PCI error - resume\n"); } -static struct pci_error_handlers aac_pci_err_handler = { +static const struct pci_error_handlers aac_pci_err_handler = { .error_detected = aac_pci_error_detected, .mmio_enabled = aac_pci_mmio_enabled, .slot_reset = aac_pci_slot_reset, diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 11ef58204e96..28115ed637e8 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -410,7 +410,7 @@ static void aac_src_start_adapter(struct aac_dev *dev) lower_32_bits(dev->init_pa), upper_32_bits(dev->init_pa), sizeof(struct _r8) + - (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq), + AAC_MAX_HRRQ * sizeof(struct _rrq), 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } else { init->r7.host_elapsed_seconds = diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index ab066bb27a57..3a2c336307c0 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -4496,7 +4496,7 @@ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc) /* * Microcode operating variables for WDTR, SDTR, and command tag - * queuing will be set in slave_configure() based on what a + * queuing will be set in sdev_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set @@ -5013,7 +5013,7 @@ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc) /* * Microcode operating variables for WDTR, SDTR, and command tag - * queuing will be set in slave_configure() based on what a + * queuing will be set in sdev_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set @@ -5508,7 +5508,7 @@ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc) /* * Microcode operating variables for WDTR, SDTR, and command tag - * queuing will be set in slave_configure() based on what a + * queuing will be set in sdev_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set @@ -7219,7 +7219,7 @@ static void AscAsyncFix(ASC_DVC_VAR *asc_dvc, struct scsi_device *sdev) } static void -advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc) +advansys_narrow_sdev_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc) { ASC_SCSI_BIT_ID_TYPE tid_bit = 1 << sdev->id; ASC_SCSI_BIT_ID_TYPE orig_use_tagged_qng = asc_dvc->use_tagged_qng; @@ -7345,7 +7345,7 @@ static void advansys_wide_enable_ppr(ADV_DVC_VAR *adv_dvc, } static void -advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc) +advansys_wide_sdev_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc) { AdvPortAddr iop_base = adv_dvc->iop_base; unsigned short tidmask = 1 << sdev->id; @@ -7391,16 +7391,17 @@ advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc) * Set the number of commands to queue per device for the * specified host adapter. */ -static int advansys_slave_configure(struct scsi_device *sdev) +static int advansys_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct asc_board *boardp = shost_priv(sdev->host); if (ASC_NARROW_BOARD(boardp)) - advansys_narrow_slave_configure(sdev, - &boardp->dvc_var.asc_dvc_var); + advansys_narrow_sdev_configure(sdev, + &boardp->dvc_var.asc_dvc_var); else - advansys_wide_slave_configure(sdev, - &boardp->dvc_var.adv_dvc_var); + advansys_wide_sdev_configure(sdev, + &boardp->dvc_var.adv_dvc_var); return 0; } @@ -10612,7 +10613,7 @@ static const struct scsi_host_template advansys_template = { .queuecommand = advansys_queuecommand, .eh_host_reset_handler = advansys_reset, .bios_param = advansys_biosparam, - .slave_configure = advansys_slave_configure, + .sdev_configure = advansys_sdev_configure, .cmd_size = sizeof(struct advansys_cmd), }; @@ -11408,7 +11409,7 @@ static struct eisa_driver advansys_eisa_driver = { }; /* PCI Devices supported by this driver */ -static struct pci_device_id advansys_pci_tbl[] = { +static const struct pci_device_id advansys_pci_tbl[] = { {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940, @@ -11545,6 +11546,7 @@ static void __exit advansys_exit(void) module_init(advansys_init); module_exit(advansys_exit); +MODULE_DESCRIPTION("AdvanSys SCSI Adapter driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("advansys/mcode.bin"); MODULE_FIRMWARE("advansys/3550.bin"); diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 055adb349b0e..e94c0a19c435 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -295,7 +295,7 @@ CMD_INC_RESID(struct scsi_cmnd *cmd, int inc) #else #define IRQ_MIN 9 #if defined(__PPC) -#define IRQ_MAX (nr_irqs-1) +#define IRQ_MAX (irq_get_nr_irqs()-1) #else #define IRQ_MAX 12 #endif @@ -1071,7 +1071,7 @@ static int aha152x_abort(struct scsi_cmnd *SCpnt) static int aha152x_device_reset(struct scsi_cmnd * SCpnt) { struct Scsi_Host *shpnt = SCpnt->device->host; - DECLARE_COMPLETION(done); + DECLARE_COMPLETION_ONSTACK(done); int ret, issued, disconnected; unsigned char old_cmd_len = SCpnt->cmd_len; unsigned long flags; @@ -2940,12 +2940,6 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) return 0; } -static int aha152x_adjust_queue(struct scsi_device *device) -{ - blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH); - return 0; -} - static const struct scsi_host_template aha152x_driver_template = { .module = THIS_MODULE, .name = AHA152X_REVID, @@ -2961,7 +2955,6 @@ static const struct scsi_host_template aha152x_driver_template = { .this_id = 7, .sg_tablesize = SG_ALL, .dma_boundary = PAGE_SIZE - 1, - .slave_alloc = aha152x_adjust_queue, .cmd_size = sizeof(struct aha152x_cmd_priv), }; diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index 9503996c6325..389499d3e00a 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -1009,6 +1009,8 @@ static int aha1542_biosparam(struct scsi_device *sdev, return 0; } + +MODULE_DESCRIPTION("Adaptec AHA-1542 SCSI host adapter driver"); MODULE_LICENSE("GPL"); static int aha1542_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd) diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c index 3d18945abaf7..be7ebbbb9ba8 100644 --- a/drivers/scsi/aha1740.c +++ b/drivers/scsi/aha1740.c @@ -681,4 +681,5 @@ static __exit void aha1740_exit (void) module_init (aha1740_init); module_exit (aha1740_exit); +MODULE_DESCRIPTION("Adaptec AHA1740 SCSI host adapter driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx index 4bc53eec4c83..863f0932ef59 100644 --- a/drivers/scsi/aic7xxx/Kconfig.aic79xx +++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx @@ -8,79 +8,80 @@ config SCSI_AIC79XX depends on PCI && HAS_IOPORT && SCSI select SCSI_SPI_ATTRS help - This driver supports all of Adaptec's Ultra 320 PCI-X - based SCSI controllers. + This driver supports all of Adaptec's Ultra 320 PCI-X + based SCSI controllers. config AIC79XX_CMDS_PER_DEVICE int "Maximum number of TCQ commands per device" depends on SCSI_AIC79XX default "32" help - Specify the number of commands you would like to allocate per SCSI - device when Tagged Command Queueing (TCQ) is enabled on that device. + Specify the number of commands you would like to allocate per SCSI + device when Tagged Command Queueing (TCQ) is enabled on that device. - This is an upper bound value for the number of tagged transactions - to be used for any device. The aic7xxx driver will automatically - vary this number based on device behavior. For devices with a - fixed maximum, the driver will eventually lock to this maximum - and display a console message indicating this value. + This is an upper bound value for the number of tagged transactions + to be used for any device. The aic7xxx driver will automatically + vary this number based on device behavior. For devices with a + fixed maximum, the driver will eventually lock to this maximum + and display a console message indicating this value. - Due to resource allocation issues in the Linux SCSI mid-layer, using - a high number of commands per device may result in memory allocation - failures when many devices are attached to the system. For this reason, - the default is set to 32. Higher values may result in higher performance - on some devices. The upper bound is 253. 0 disables tagged queueing. + Due to resource allocation issues in the Linux SCSI mid-layer, using + a high number of commands per device may result in memory allocation + failures when many devices are attached to the system. For this + reason, the default is set to 32. Higher values may result in higher + performance on some devices. The upper bound is 253. 0 disables + tagged queueing. - Per device tag depth can be controlled via the kernel command line - "tag_info" option. See Documentation/scsi/aic79xx.rst for details. + Per device tag depth can be controlled via the kernel command line + "tag_info" option. See Documentation/scsi/aic79xx.rst for details. config AIC79XX_RESET_DELAY_MS int "Initial bus reset delay in milli-seconds" depends on SCSI_AIC79XX default "5000" help - The number of milliseconds to delay after an initial bus reset. - The bus settle delay following all error recovery actions is - dictated by the SCSI layer and is not affected by this value. + The number of milliseconds to delay after an initial bus reset. + The bus settle delay following all error recovery actions is + dictated by the SCSI layer and is not affected by this value. - Default: 5000 (5 seconds) + Default: 5000 (5 seconds) config AIC79XX_BUILD_FIRMWARE bool "Build Adapter Firmware with Kernel Build" depends on SCSI_AIC79XX && !PREVENT_FIRMWARE_BUILD help - This option should only be enabled if you are modifying the firmware - source to the aic79xx driver and wish to have the generated firmware - include files updated during a normal kernel build. The assembler - for the firmware requires lex and yacc or their equivalents, as well - as the db v1 library. You may have to install additional packages - or modify the assembler Makefile or the files it includes if your - build environment is different than that of the author. + This option should only be enabled if you are modifying the firmware + source to the aic79xx driver and wish to have the generated firmware + include files updated during a normal kernel build. The assembler + for the firmware requires lex and yacc or their equivalents, as well + as the db v1 library. You may have to install additional packages + or modify the assembler Makefile or the files it includes if your + build environment is different than that of the author. config AIC79XX_DEBUG_ENABLE bool "Compile in Debugging Code" depends on SCSI_AIC79XX default y help - Compile in aic79xx debugging code that can be useful in diagnosing - driver errors. + Compile in aic79xx debugging code that can be useful in diagnosing + driver errors. config AIC79XX_DEBUG_MASK int "Debug code enable mask (16383 for all debugging)" depends on SCSI_AIC79XX default "0" help - Bit mask of debug options that is only valid if the - CONFIG_AIC79XX_DEBUG_ENABLE option is enabled. The bits in this mask - are defined in the drivers/scsi/aic7xxx/aic79xx.h - search for the - variable ahd_debug in that file to find them. + Bit mask of debug options that is only valid if the + CONFIG_AIC79XX_DEBUG_ENABLE option is enabled. The bits in this mask + are defined in the drivers/scsi/aic7xxx/aic79xx.h - search for the + variable ahd_debug in that file to find them. config AIC79XX_REG_PRETTY_PRINT bool "Decode registers during diagnostics" depends on SCSI_AIC79XX default y help - Compile in register value tables for the output of expanded register - contents in diagnostics. This make it much easier to understand debug - output without having to refer to a data book and/or the aic7xxx.reg - file. + Compile in register value tables for the output of expanded register + contents in diagnostics. This make it much easier to understand debug + output without having to refer to a data book and/or the aic7xxx.reg + file. diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx index f0425145a5f4..8f87f2d8ba9f 100644 --- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx +++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx @@ -8,84 +8,85 @@ config SCSI_AIC7XXX depends on (PCI || EISA) && HAS_IOPORT && SCSI select SCSI_SPI_ATTRS help - This driver supports all of Adaptec's Fast through Ultra 160 PCI - based SCSI controllers as well as the aic7770 based EISA and VLB - SCSI controllers (the 274x and 284x series). For AAA and ARO based - configurations, only SCSI functionality is provided. + This driver supports all of Adaptec's Fast through Ultra 160 PCI + based SCSI controllers as well as the aic7770 based EISA and VLB + SCSI controllers (the 274x and 284x series). For AAA and ARO based + configurations, only SCSI functionality is provided. - To compile this driver as a module, choose M here: the - module will be called aic7xxx. + To compile this driver as a module, choose M here: the + module will be called aic7xxx. config AIC7XXX_CMDS_PER_DEVICE int "Maximum number of TCQ commands per device" depends on SCSI_AIC7XXX default "32" help - Specify the number of commands you would like to allocate per SCSI - device when Tagged Command Queueing (TCQ) is enabled on that device. + Specify the number of commands you would like to allocate per SCSI + device when Tagged Command Queueing (TCQ) is enabled on that device. - This is an upper bound value for the number of tagged transactions - to be used for any device. The aic7xxx driver will automatically - vary this number based on device behavior. For devices with a - fixed maximum, the driver will eventually lock to this maximum - and display a console message indicating this value. + This is an upper bound value for the number of tagged transactions + to be used for any device. The aic7xxx driver will automatically + vary this number based on device behavior. For devices with a + fixed maximum, the driver will eventually lock to this maximum + and display a console message indicating this value. - Due to resource allocation issues in the Linux SCSI mid-layer, using - a high number of commands per device may result in memory allocation - failures when many devices are attached to the system. For this reason, - the default is set to 32. Higher values may result in higher performance - on some devices. The upper bound is 253. 0 disables tagged queueing. + Due to resource allocation issues in the Linux SCSI mid-layer, using + a high number of commands per device may result in memory allocation + failures when many devices are attached to the system. For this + reason, the default is set to 32. Higher values may result in higher + performance on some devices. The upper bound is 253. 0 disables tagged + queueing. - Per device tag depth can be controlled via the kernel command line - "tag_info" option. See Documentation/scsi/aic7xxx.rst for details. + Per device tag depth can be controlled via the kernel command line + "tag_info" option. See Documentation/scsi/aic7xxx.rst for details. config AIC7XXX_RESET_DELAY_MS int "Initial bus reset delay in milli-seconds" depends on SCSI_AIC7XXX default "5000" help - The number of milliseconds to delay after an initial bus reset. - The bus settle delay following all error recovery actions is - dictated by the SCSI layer and is not affected by this value. + The number of milliseconds to delay after an initial bus reset. + The bus settle delay following all error recovery actions is + dictated by the SCSI layer and is not affected by this value. - Default: 5000 (5 seconds) + Default: 5000 (5 seconds) config AIC7XXX_BUILD_FIRMWARE bool "Build Adapter Firmware with Kernel Build" depends on SCSI_AIC7XXX && !PREVENT_FIRMWARE_BUILD help - This option should only be enabled if you are modifying the firmware - source to the aic7xxx driver and wish to have the generated firmware - include files updated during a normal kernel build. The assembler - for the firmware requires lex and yacc or their equivalents, as well - as the db v1 library. You may have to install additional packages - or modify the assembler Makefile or the files it includes if your - build environment is different than that of the author. + This option should only be enabled if you are modifying the firmware + source to the aic7xxx driver and wish to have the generated firmware + include files updated during a normal kernel build. The assembler + for the firmware requires lex and yacc or their equivalents, as well + as the db v1 library. You may have to install additional packages + or modify the assembler Makefile or the files it includes if your + build environment is different than that of the author. config AIC7XXX_DEBUG_ENABLE bool "Compile in Debugging Code" depends on SCSI_AIC7XXX default y help - Compile in aic7xxx debugging code that can be useful in diagnosing - driver errors. + Compile in aic7xxx debugging code that can be useful in diagnosing + driver errors. config AIC7XXX_DEBUG_MASK - int "Debug code enable mask (2047 for all debugging)" - depends on SCSI_AIC7XXX - default "0" - help - Bit mask of debug options that is only valid if the - CONFIG_AIC7XXX_DEBUG_ENABLE option is enabled. The bits in this mask - are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the - variable ahc_debug in that file to find them. + int "Debug code enable mask (2047 for all debugging)" + depends on SCSI_AIC7XXX + default "0" + help + Bit mask of debug options that is only valid if the + CONFIG_AIC7XXX_DEBUG_ENABLE option is enabled. The bits in this mask + are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the + variable ahc_debug in that file to find them. config AIC7XXX_REG_PRETTY_PRINT - bool "Decode registers during diagnostics" - depends on SCSI_AIC7XXX + bool "Decode registers during diagnostics" + depends on SCSI_AIC7XXX default y - help - Compile in register value tables for the output of expanded register - contents in diagnostics. This make it much easier to understand debug - output without having to refer to a data book and/or the aic7xxx.reg - file. + help + Compile in register value tables for the output of expanded register + contents in diagnostics. This make it much easier to understand debug + output without having to refer to a data book and/or the aic7xxx.reg + file. diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile index e0188ecd85b2..853c72a81ae0 100644 --- a/drivers/scsi/aic7xxx/Makefile +++ b/drivers/scsi/aic7xxx/Makefile @@ -55,9 +55,9 @@ aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \ ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y) $(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm - $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic7xxx_reg.h \ + $(obj)/aicasm/aicasm -I $(src) -r $(obj)/aic7xxx_reg.h \ $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \ - $(srctree)/$(src)/aic7xxx.seq + $(src)/aic7xxx.seq $(aic7xxx-gen-y): $(objtree)/$(obj)/aic7xxx_seq.h @true @@ -73,9 +73,9 @@ aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \ ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y) $(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm - $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic79xx_reg.h \ + $(obj)/aicasm/aicasm -I $(src) -r $(obj)/aic79xx_reg.h \ $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \ - $(srctree)/$(src)/aic79xx.seq + $(src)/aic79xx.seq $(aic79xx-gen-y): $(objtree)/$(obj)/aic79xx_seq.h @true @@ -83,5 +83,5 @@ else $(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped endif -$(obj)/aicasm/aicasm: $(srctree)/$(src)/aicasm/*.[chyl] - $(MAKE) -C $(srctree)/$(src)/aicasm OUTDIR=$(shell pwd)/$(obj)/aicasm/ +$(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl] + $(MAKE) -C $(src)/aicasm OUTDIR=$(shell pwd)/$(obj)/aicasm/ diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c index 176704b24e6a..f1ce02cd569e 100644 --- a/drivers/scsi/aic7xxx/aic7770.c +++ b/drivers/scsi/aic7xxx/aic7770.c @@ -99,21 +99,6 @@ struct aic7770_identity aic7770_ident_table[] = ahc_aic7770_EISA_setup } }; -const int ahc_num_aic7770_devs = ARRAY_SIZE(aic7770_ident_table); - -struct aic7770_identity * -aic7770_find_device(uint32_t id) -{ - struct aic7770_identity *entry; - int i; - - for (i = 0; i < ahc_num_aic7770_devs; i++) { - entry = &aic7770_ident_table[i]; - if (entry->full_id == (id & entry->id_mask)) - return (entry); - } - return (NULL); -} int aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io) diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 3e3100dbfda3..f9372a81cd4e 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c @@ -6181,7 +6181,7 @@ ahd_shutdown(void *arg) /* * Stop periodic timer callbacks. */ - del_timer_sync(&ahd->stat_timer); + timer_delete_sync(&ahd->stat_timer); /* This will reset most registers to 0, but not all */ ahd_reset(ahd, /*reinit*/FALSE); @@ -6975,7 +6975,7 @@ static const char *termstat_strings[] = { static void ahd_timer_reset(struct timer_list *timer, int usec) { - del_timer(timer); + timer_delete(timer); timer->expires = jiffies + (usec * HZ)/1000000; add_timer(timer); } diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 4202059815a0..17dfc3c72110 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -672,7 +672,7 @@ ahd_linux_target_destroy(struct scsi_target *starget) } static int -ahd_linux_slave_alloc(struct scsi_device *sdev) +ahd_linux_sdev_init(struct scsi_device *sdev) { struct ahd_softc *ahd = *((struct ahd_softc **)sdev->host->hostdata); @@ -701,7 +701,7 @@ ahd_linux_slave_alloc(struct scsi_device *sdev) } static int -ahd_linux_slave_configure(struct scsi_device *sdev) +ahd_linux_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { if (bootverbose) sdev_printk(KERN_INFO, sdev, "Slave Configure\n"); @@ -906,8 +906,8 @@ struct scsi_host_template aic79xx_driver_template = { .this_id = -1, .max_sectors = 8192, .cmd_per_lun = 2, - .slave_alloc = ahd_linux_slave_alloc, - .slave_configure = ahd_linux_slave_configure, + .sdev_init = ahd_linux_sdev_init, + .sdev_configure = ahd_linux_sdev_configure, .target_alloc = ahd_linux_target_alloc, .target_destroy = ahd_linux_target_destroy, }; diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h index 9bc755a0a2d3..20857c213c72 100644 --- a/drivers/scsi/aic7xxx/aic7xxx.h +++ b/drivers/scsi/aic7xxx/aic7xxx.h @@ -1119,7 +1119,6 @@ struct aic7770_identity { ahc_device_setup_t *setup; }; extern struct aic7770_identity aic7770_ident_table[]; -extern const int ahc_num_aic7770_devs; #define AHC_EISA_SLOT_OFFSET 0xc00 #define AHC_EISA_IOSIZE 0x100 @@ -1135,7 +1134,6 @@ int ahc_pci_test_register_access(struct ahc_softc *); void __maybe_unused ahc_pci_resume(struct ahc_softc *ahc); /*************************** EISA/VL Front End ********************************/ -struct aic7770_identity *aic7770_find_device(uint32_t); int aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *, u_int port); diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index b0c4f2345321..cebf8c5d0caf 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -632,7 +632,7 @@ ahc_linux_target_destroy(struct scsi_target *starget) } static int -ahc_linux_slave_alloc(struct scsi_device *sdev) +ahc_linux_sdev_init(struct scsi_device *sdev) { struct ahc_softc *ahc = *((struct ahc_softc **)sdev->host->hostdata); @@ -664,7 +664,7 @@ ahc_linux_slave_alloc(struct scsi_device *sdev) } static int -ahc_linux_slave_configure(struct scsi_device *sdev) +ahc_linux_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { if (bootverbose) sdev_printk(KERN_INFO, sdev, "Slave Configure\n"); @@ -791,8 +791,8 @@ struct scsi_host_template aic7xxx_driver_template = { .this_id = -1, .max_sectors = 8192, .cmd_per_lun = 2, - .slave_alloc = ahc_linux_slave_alloc, - .slave_configure = ahc_linux_slave_configure, + .sdev_init = ahc_linux_sdev_init, + .sdev_configure = ahc_linux_sdev_configure, .target_alloc = ahc_linux_target_alloc, .target_destroy = ahc_linux_target_destroy, }; diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y index 65182ad9cdf8..b1c9ce477cbd 100644 --- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y @@ -102,6 +102,7 @@ static void add_conditional(symbol_t *symbol); static void add_version(const char *verstring); static int is_download_const(expression_t *immed); static int is_location_address(symbol_t *symbol); +int yylex(); void yyerror(const char *string); #define SRAM_SYMNAME "SRAM_BASE" diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y index 8c0479865f04..5c7350eb5b5c 100644 --- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y @@ -61,6 +61,7 @@ static symbol_t *macro_symbol; static void add_macro_arg(const char *argtext, int position); +int mmlex(); void mmerror(const char *string); %} diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l index c78d4f68eea5..fc7e6c58148d 100644 --- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l @@ -64,6 +64,9 @@ static char *string_buf_ptr; static int parren_count; static int quote_count; static char buf[255]; +void mm_switch_to_buffer(YY_BUFFER_STATE); +void mmparse(); +void mm_delete_buffer(YY_BUFFER_STATE); %} PATH ([/]*[-A-Za-z0-9_.])+ diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c index 9dda296c0152..e74393357025 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.c +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -731,7 +731,7 @@ static void asd_dl_tasklet_handler(unsigned long data) goto next_1; } else if (ascb->scb->header.opcode == EMPTY_SCB) { goto out; - } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) { + } else if (!ascb->uldd_timer && !timer_delete(&ascb->timer)) { goto next_1; } spin_lock_irqsave(&seq->pend_q_lock, flags); diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 8a3340d8d7ad..adf3d9145606 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -14,6 +14,7 @@ #include <linux/firmware.h> #include <linux/slab.h> +#include <scsi/sas_ata.h> #include <scsi/scsi_host.h> #include "aic94xx.h" @@ -24,6 +25,7 @@ /* The format is "version.release.patchlevel" */ #define ASD_DRIVER_VERSION "1.0.3" +#define DRV_NAME "aic94xx" static int use_msi = 0; module_param_named(use_msi, use_msi, int, S_IRUGO); @@ -34,32 +36,16 @@ MODULE_PARM_DESC(use_msi, "\n" static struct scsi_transport_template *aic94xx_transport_template; static int asd_scan_finished(struct Scsi_Host *, unsigned long); static void asd_scan_start(struct Scsi_Host *); +static const struct attribute_group *asd_sdev_groups[]; static const struct scsi_host_template aic94xx_sht = { - .module = THIS_MODULE, - /* .name is initialized */ - .name = "aic94xx", - .queuecommand = sas_queuecommand, - .dma_need_drain = ata_scsi_dma_need_drain, - .target_alloc = sas_target_alloc, - .slave_configure = sas_slave_configure, + LIBSAS_SHT_BASE .scan_finished = asd_scan_finished, .scan_start = asd_scan_start, - .change_queue_depth = sas_change_queue_depth, - .bios_param = sas_bios_param, .can_queue = 1, - .this_id = -1, .sg_tablesize = SG_ALL, - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, - .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_target_reset_handler = sas_eh_target_reset_handler, - .slave_alloc = sas_slave_alloc, - .target_destroy = sas_target_destroy, - .ioctl = sas_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = sas_ioctl, -#endif .track_queue_depth = 1, + .sdev_groups = asd_sdev_groups, }; static int asd_map_memio(struct asd_ha_struct *asd_ha) @@ -865,7 +851,7 @@ static void asd_free_queues(struct asd_ha_struct *asd_ha) * times out. Apparently we don't wait for the CONTROL PHY * to complete, so it doesn't matter if we kill the timer. */ - del_timer_sync(&ascb->timer); + timer_delete_sync(&ascb->timer); WARN_ON(ascb->scb->header.opcode != CONTROL_PHY); list_del_init(pos); @@ -951,6 +937,11 @@ static void asd_remove_driver_attrs(struct device_driver *driver) driver_remove_file(driver, &driver_attr_version); } +static const struct attribute_group *asd_sdev_groups[] = { + &sas_ata_sdev_attr_group, + NULL +}; + static struct sas_domain_function_template aic94xx_transport_functions = { .lldd_dev_found = asd_dev_found, .lldd_dev_gone = asd_dev_gone, diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index 27d32b8c2987..d45dbf98f25e 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -31,7 +31,7 @@ static int asd_enqueue_internal(struct asd_ascb *ascb, res = asd_post_ascb_list(ascb->ha, ascb, 1); if (unlikely(res)) - del_timer(&ascb->timer); + timer_delete(&ascb->timer); return res; } @@ -58,7 +58,7 @@ static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, { struct tasklet_completion_status *tcs = ascb->uldd_task; ASD_DPRINTK("%s: here\n", __func__); - if (!del_timer(&ascb->timer)) { + if (!timer_delete(&ascb->timer)) { ASD_DPRINTK("%s: couldn't delete timer\n", __func__); return; } @@ -303,7 +303,7 @@ static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, { struct tasklet_completion_status *tcs; - if (!del_timer(&ascb->timer)) + if (!timer_delete(&ascb->timer)) return; tcs = ascb->uldd_task; diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c index fbb29dbb1e50..003e61831e33 100644 --- a/drivers/scsi/am53c974.c +++ b/drivers/scsi/am53c974.c @@ -513,7 +513,7 @@ static void pci_esp_remove_one(struct pci_dev *pdev) scsi_host_put(esp->host); } -static struct pci_device_id am53c974_pci_tbl[] = { +static const struct pci_device_id am53c974_pci_tbl[] = { { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { } diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index baeb5e795690..8e3d4799ce93 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c @@ -60,7 +60,7 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin, + const struct bin_attribute *bin, char *buf, loff_t off, size_t count) { @@ -107,7 +107,7 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp, static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin, + const struct bin_attribute *bin, char *buf, loff_t off, size_t count) { @@ -155,7 +155,7 @@ static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp, static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin, + const struct bin_attribute *bin, char *buf, loff_t off, size_t count) { @@ -194,7 +194,7 @@ static const struct bin_attribute arcmsr_sysfs_message_read_attr = { .mode = S_IRUSR , }, .size = ARCMSR_API_DATA_BUFLEN, - .read = arcmsr_sysfs_iop_message_read, + .read_new = arcmsr_sysfs_iop_message_read, }; static const struct bin_attribute arcmsr_sysfs_message_write_attr = { @@ -203,7 +203,7 @@ static const struct bin_attribute arcmsr_sysfs_message_write_attr = { .mode = S_IWUSR, }, .size = ARCMSR_API_DATA_BUFLEN, - .write = arcmsr_sysfs_iop_message_write, + .write_new = arcmsr_sysfs_iop_message_write, }; static const struct bin_attribute arcmsr_sysfs_message_clear_attr = { @@ -212,7 +212,7 @@ static const struct bin_attribute arcmsr_sysfs_message_clear_attr = { .mode = S_IWUSR, }, .size = 1, - .write = arcmsr_sysfs_iop_message_clear, + .write_new = arcmsr_sysfs_iop_message_clear, }; int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index ad227e6cb10e..b450b1fc6bbb 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -143,7 +143,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *); static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb); static void arcmsr_set_iop_datetime(struct timer_list *); -static int arcmsr_slave_config(struct scsi_device *sdev); +static int arcmsr_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim); static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) { if (queue_depth > ARCMSR_MAX_CMD_PERLUN) @@ -160,7 +161,7 @@ static const struct scsi_host_template arcmsr_scsi_host_template = { .eh_abort_handler = arcmsr_abort, .eh_bus_reset_handler = arcmsr_bus_reset, .bios_param = arcmsr_bios_param, - .slave_configure = arcmsr_slave_config, + .sdev_configure = arcmsr_sdev_configure, .change_queue_depth = arcmsr_adjust_disk_queue_depth, .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD, .this_id = ARCMSR_SCSI_INITIATOR_ID, @@ -171,7 +172,7 @@ static const struct scsi_host_template arcmsr_scsi_host_template = { .no_write_same = 1, }; -static struct pci_device_id arcmsr_device_id_table[] = { +static const struct pci_device_id arcmsr_device_id_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120), @@ -1007,7 +1008,7 @@ msi_int0: goto msi_int1; } } - nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY); + nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX); if (nvec < 1) return FAILED; msi_int1: @@ -1044,7 +1045,7 @@ static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb) static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb) { timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0); - pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000); + pacb->refresh_timer.expires = jiffies + secs_to_jiffies(60); add_timer(&pacb->refresh_timer); } @@ -1160,8 +1161,8 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; out_free_sysfs: if (set_date_time) - del_timer_sync(&acb->refresh_timer); - del_timer_sync(&acb->eternal_timer); + timer_delete_sync(&acb->refresh_timer); + timer_delete_sync(&acb->eternal_timer); flush_work(&acb->arcmsr_do_message_isr_bh); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); @@ -1203,9 +1204,9 @@ static int __maybe_unused arcmsr_suspend(struct device *dev) arcmsr_disable_outbound_ints(acb); arcmsr_free_irq(pdev, acb); - del_timer_sync(&acb->eternal_timer); + timer_delete_sync(&acb->eternal_timer); if (set_date_time) - del_timer_sync(&acb->refresh_timer); + timer_delete_sync(&acb->refresh_timer); flush_work(&acb->arcmsr_do_message_isr_bh); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); @@ -1684,9 +1685,9 @@ static void arcmsr_free_pcidev(struct AdapterControlBlock *acb) arcmsr_free_sysfs_attr(acb); scsi_remove_host(host); flush_work(&acb->arcmsr_do_message_isr_bh); - del_timer_sync(&acb->eternal_timer); + timer_delete_sync(&acb->eternal_timer); if (set_date_time) - del_timer_sync(&acb->refresh_timer); + timer_delete_sync(&acb->refresh_timer); pdev = acb->pdev; arcmsr_free_irq(pdev, acb); arcmsr_free_ccb_pool(acb); @@ -1717,9 +1718,9 @@ static void arcmsr_remove(struct pci_dev *pdev) arcmsr_free_sysfs_attr(acb); scsi_remove_host(host); flush_work(&acb->arcmsr_do_message_isr_bh); - del_timer_sync(&acb->eternal_timer); + timer_delete_sync(&acb->eternal_timer); if (set_date_time) - del_timer_sync(&acb->refresh_timer); + timer_delete_sync(&acb->refresh_timer); arcmsr_disable_outbound_ints(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); @@ -1764,9 +1765,9 @@ static void arcmsr_shutdown(struct pci_dev *pdev) (struct AdapterControlBlock *)host->hostdata; if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) return; - del_timer_sync(&acb->eternal_timer); + timer_delete_sync(&acb->eternal_timer); if (set_date_time) - del_timer_sync(&acb->refresh_timer); + timer_delete_sync(&acb->refresh_timer); arcmsr_disable_outbound_ints(acb); arcmsr_free_irq(pdev, acb); flush_work(&acb->arcmsr_do_message_isr_bh); @@ -3344,7 +3345,8 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd) static DEF_SCSI_QCMD(arcmsr_queue_command) -static int arcmsr_slave_config(struct scsi_device *sdev) +static int arcmsr_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { unsigned int dev_timeout; diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 0b046e4b395c..ef21b85cf014 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -591,7 +591,7 @@ datadir_t acornscsi_datadirection(int command) case CHANGE_DEFINITION: case COMPARE: case COPY: case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT: case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER: - case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE: + case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE_6: case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW: case WRITE_6: case WRITE_10: case WRITE_VERIFY: case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME: @@ -2450,7 +2450,7 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt) return 0; } -DEF_SCSI_QCMD(acornscsi_queuecmd) +static DEF_SCSI_QCMD(acornscsi_queuecmd) enum res_abort { res_not_running, res_success, res_success_clear, res_snooze }; @@ -2552,7 +2552,7 @@ static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt) * Params : SCpnt - command to abort * Returns : one of SCSI_ABORT_ macros */ -int acornscsi_abort(struct scsi_cmnd *SCpnt) +static int acornscsi_abort(struct scsi_cmnd *SCpnt) { AS_Host *host = (AS_Host *) SCpnt->device->host->hostdata; int result; @@ -2634,7 +2634,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt) * Params : SCpnt - command causing reset * Returns : one of SCSI_RESET_ macros */ -int acornscsi_host_reset(struct scsi_cmnd *SCpnt) +static int acornscsi_host_reset(struct scsi_cmnd *SCpnt) { AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; struct scsi_cmnd *SCptr; @@ -2679,8 +2679,7 @@ int acornscsi_host_reset(struct scsi_cmnd *SCpnt) * Params : host - host to give information on * Returns : a constant string */ -const -char *acornscsi_info(struct Scsi_Host *host) +static const char *acornscsi_info(struct Scsi_Host *host) { static char string[100], *p; diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index c5d8f4313b31..e460068f6834 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c @@ -296,7 +296,7 @@ cumanascsi_2_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) * Params : host - driver host structure to return info for. * Returns : pointer to a static buffer containing null terminated string. */ -const char *cumanascsi_2_info(struct Scsi_Host *host) +static const char *cumanascsi_2_info(struct Scsi_Host *host) { struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; static char string[150]; diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c index b3ec7635bc72..99be9da8757f 100644 --- a/drivers/scsi/arm/eesox.c +++ b/drivers/scsi/arm/eesox.c @@ -381,7 +381,7 @@ eesoxscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) * Params : host - driver host structure to return info for. * Returns : pointer to a static buffer containing null terminated string. */ -const char *eesoxscsi_info(struct Scsi_Host *host) +static const char *eesoxscsi_info(struct Scsi_Host *host) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; static char string[150]; diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 4ce0b2d73614..e0b55d869a35 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -2331,7 +2331,7 @@ static void fas216_eh_timer(struct timer_list *t) fas216_log(info, LOG_ERROR, "error handling timed out\n"); - del_timer(&info->eh_timer); + timer_delete(&info->eh_timer); if (info->rst_bus_status == 0) info->rst_bus_status = -1; @@ -2532,7 +2532,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt) */ wait_event(info->eh_wait, info->rst_dev_status); - del_timer_sync(&info->eh_timer); + timer_delete_sync(&info->eh_timer); spin_lock_irqsave(&info->host_lock, flags); info->rstSCpnt = NULL; @@ -2622,7 +2622,7 @@ int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt) * Wait one second for the interrupt. */ wait_event(info->eh_wait, info->rst_bus_status); - del_timer_sync(&info->eh_timer); + timer_delete_sync(&info->eh_timer); fas216_log(info, LOG_ERROR, "bus reset complete: %s\n", info->rst_bus_status == 1 ? "success" : "failed"); diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c index 3b5991427886..823c65ff6c12 100644 --- a/drivers/scsi/arm/powertec.c +++ b/drivers/scsi/arm/powertec.c @@ -184,7 +184,7 @@ powertecscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) * Params : host - driver host structure to return info for. * Returns : pointer to a static buffer containing null terminated string. */ -const char *powertecscsi_info(struct Scsi_Host *host) +static const char *powertecscsi_info(struct Scsi_Host *host) { struct powertec_info *info = (struct powertec_info *)host->hostdata; static char string[150]; diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index d99e70914817..85055677666c 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -878,8 +878,14 @@ static void __exit atari_scsi_remove(struct platform_device *pdev) atari_stram_free(atari_dma_buffer); } -static struct platform_driver atari_scsi_driver = { - .remove_new = __exit_p(atari_scsi_remove), +/* + * atari_scsi_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver atari_scsi_driver __refdata = { + .remove = __exit_p(atari_scsi_remove), .driver = { .name = DRV_MODULE_NAME, }, @@ -888,4 +894,5 @@ static struct platform_driver atari_scsi_driver = { module_platform_driver_probe(atari_scsi_driver, atari_scsi_probe); MODULE_ALIAS("platform:" DRV_MODULE_NAME); +MODULE_DESCRIPTION("Atari TT/Falcon NCR5380 SCSI driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index 2a748af269c2..401242912855 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -1724,6 +1724,8 @@ static void atp870u_remove (struct pci_dev *pdev) atp870u_free_tables(pshost); scsi_host_put(pshost); } + +MODULE_DESCRIPTION("ACARD SCSI host adapter driver"); MODULE_LICENSE("GPL"); static const struct scsi_host_template atp870u_template = { @@ -1741,7 +1743,7 @@ static const struct scsi_host_template atp870u_template = { .max_sectors = ATP870U_MAX_SECTORS, }; -static struct pci_device_id atp870u_id_table[] = { +static const struct pci_device_id atp870u_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP885_DEVID) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) }, diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 06acb5ff609e..7d1b767d87fb 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -5448,7 +5448,7 @@ static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, "BM_%d : EEH error detected\n"); /* first stop UE detection when PCI error detected */ - del_timer_sync(&phba->hw_check); + timer_delete_sync(&phba->hw_check); cancel_delayed_work_sync(&phba->recover_port); /* sessions are no longer valid, so first fail the sessions */ @@ -5528,7 +5528,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, struct beiscsi_hba *phba = NULL; struct be_eq_obj *pbe_eq; unsigned int s_handle; - char wq_name[20]; int ret, i; ret = beiscsi_enable_pci(pcidev); @@ -5634,9 +5633,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; - snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq", - phba->shost->host_no); - phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name); + phba->wq = alloc_workqueue("beiscsi_%02x_wq", WQ_MEM_RECLAIM, 1, + phba->shost->host_no); if (!phba->wq) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : beiscsi_dev_probe-" @@ -5748,7 +5746,7 @@ static void beiscsi_remove(struct pci_dev *pcidev) } /* first stop UE detection before unloading */ - del_timer_sync(&phba->hw_check); + timer_delete_sync(&phba->hw_check); cancel_delayed_work_sync(&phba->recover_port); cancel_work_sync(&phba->sess_work); @@ -5778,7 +5776,7 @@ static void beiscsi_remove(struct pci_dev *pcidev) } -static struct pci_error_handlers beiscsi_eeh_handlers = { +static const struct pci_error_handlers beiscsi_eeh_handlers = { .error_detected = beiscsi_eeh_err_detected, .slot_reset = beiscsi_eeh_reset, .resume = beiscsi_eeh_resume, diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index 4cb9249e583c..a6b8c4ddea19 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h @@ -138,14 +138,6 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), } while (0) -/* - * PCI devices supported by the current BFA - */ -struct bfa_pciid_s { - u16 device_id; - u16 vendor_id; -}; - extern char bfa_version[]; struct bfa_iocfc_regs_s { @@ -408,9 +400,7 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, (((&(_bfa)->modules.dconf_mod)->min_cfg) \ ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status)) -void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids); void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg); -void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg); void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_s *bfa); diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 3438d0b8ba06..a99a101b95ef 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -1934,24 +1934,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) } /* - * Return the list of PCI vendor/device id lists supported by this - * BFA instance. - */ -void -bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) -{ - static struct bfa_pciid_s __pciids[] = { - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, - }; - - *npciids = ARRAY_SIZE(__pciids); - *pciids = __pciids; -} - -/* * Use this function query the default struct bfa_iocfc_cfg_s value (compiled * into BFA layer). The OS driver can then turn back and overwrite entries that * have been configured by the user. @@ -1987,20 +1969,3 @@ bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) cfg->drvcfg.delay_comp = BFA_FALSE; } - -void -bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) -{ - bfa_cfg_get_default(cfg); - cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; - cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; - cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; - cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; - cfg->fwcfg.num_rports = BFA_RPORT_MIN; - cfg->fwcfg.num_fwtio_reqs = 0; - - cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; - cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; - cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; - cfg->drvcfg.min_cfg = BFA_TRUE; -} diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h index 5e36620425ac..760606062c66 100644 --- a/drivers/scsi/bfa/bfa_defs_fcs.h +++ b/drivers/scsi/bfa/bfa_defs_fcs.h @@ -125,28 +125,6 @@ enum bfa_lport_offline_reason { }; /* - * FCS lport info. - */ -struct bfa_lport_info_s { - u8 port_type; /* bfa_lport_type_t : physical or - * virtual */ - u8 port_state; /* one of bfa_lport_state values */ - u8 offline_reason; /* one of bfa_lport_offline_reason_t - * values */ - wwn_t port_wwn; - wwn_t node_wwn; - - /* - * following 4 feilds are valid for Physical Ports only - */ - u32 max_vports_supp; /* Max supported vports */ - u32 num_vports_inuse; /* Num of in use vports */ - u32 max_rports_supp; /* Max supported rports */ - u32 num_rports_inuse; /* Num of doscovered rports */ - -}; - -/* * FCS port statistics */ struct bfa_lport_stats_s { diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c index 52303e8c716d..c44fd096ee68 100644 --- a/drivers/scsi/bfa/bfa_fcbuild.c +++ b/drivers/scsi/bfa/bfa_fcbuild.c @@ -220,44 +220,6 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, } u16 -fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, - u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, - u8 set_npiv, u8 set_auth, u16 local_bb_credits) -{ - u32 d_id = bfa_hton3b(FC_FABRIC_PORT); - __be32 *vvl_info; - - memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); - - flogi->els_cmd.els_code = FC_ELS_FLOGI; - fc_els_req_build(fchs, d_id, s_id, ox_id); - - flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); - flogi->port_name = port_name; - flogi->node_name = node_name; - - /* - * Set the NPIV Capability Bit ( word 1, bit 31) of Common - * Service Parameters. - */ - flogi->csp.ciro = set_npiv; - - /* set AUTH capability */ - flogi->csp.security = set_auth; - - flogi->csp.bbcred = cpu_to_be16(local_bb_credits); - - /* Set brcd token in VVL */ - vvl_info = (u32 *)&flogi->vvl[0]; - - /* set the flag to indicate the presence of VVL */ - flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ - vvl_info[0] = cpu_to_be32(FLOGI_VVL_BRCD); - - return sizeof(struct fc_logi_s); -} - -u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, u16 local_bb_credits, u8 bb_scn) @@ -280,24 +242,6 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, } u16 -fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, - u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size) -{ - u32 d_id = bfa_hton3b(FC_FABRIC_PORT); - - memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); - - flogi->els_cmd.els_code = FC_ELS_FDISC; - fc_els_req_build(fchs, d_id, s_id, ox_id); - - flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); - flogi->port_name = port_name; - flogi->node_name = node_name; - - return sizeof(struct fc_logi_s); -} - -u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, u16 bb_cr) @@ -316,40 +260,6 @@ fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, } enum fc_parse_status -fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) -{ - struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); - struct fc_logi_s *plogi; - struct fc_ls_rjt_s *ls_rjt; - - switch (els_cmd->els_code) { - case FC_ELS_LS_RJT: - ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1); - if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) - return FC_PARSE_BUSY; - else - return FC_PARSE_FAILURE; - case FC_ELS_ACC: - plogi = (struct fc_logi_s *) (fchs + 1); - if (len < sizeof(struct fc_logi_s)) - return FC_PARSE_FAILURE; - - if (!wwn_is_equal(plogi->port_name, port_name)) - return FC_PARSE_FAILURE; - - if (!plogi->class3.class_valid) - return FC_PARSE_FAILURE; - - if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) - return FC_PARSE_FAILURE; - - return FC_PARSE_OK; - default: - return FC_PARSE_FAILURE; - } -} - -enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs) { struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1); @@ -421,21 +331,6 @@ fc_prli_rsp_parse(struct fc_prli_s *prli, int len) return FC_PARSE_OK; } -enum fc_parse_status -fc_prli_parse(struct fc_prli_s *prli) -{ - if (prli->parampage.type != FC_TYPE_FCP) - return FC_PARSE_FAILURE; - - if (!prli->parampage.imagepair) - return FC_PARSE_FAILURE; - - if (!prli->parampage.servparams.initiator) - return FC_PARSE_FAILURE; - - return FC_PARSE_OK; -} - u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name) @@ -506,84 +401,6 @@ fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name, return FC_PARSE_OK; } -enum fc_parse_status -fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, wwn_t node_name, - wwn_t port_name) -{ - struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld; - - if (adisc->els_cmd.els_code != FC_ELS_ACC) - return FC_PARSE_FAILURE; - - if ((adisc->nport_id == (host_dap)) - && wwn_is_equal(adisc->orig_port_name, port_name) - && wwn_is_equal(adisc->orig_node_name, node_name)) - return FC_PARSE_OK; - - return FC_PARSE_FAILURE; -} - -enum fc_parse_status -fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name) -{ - struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); - - if (pdisc->class3.class_valid != 1) - return FC_PARSE_FAILURE; - - if ((be16_to_cpu(pdisc->class3.rxsz) < - (FC_MIN_PDUSZ - sizeof(struct fchs_s))) - || (pdisc->class3.rxsz == 0)) - return FC_PARSE_FAILURE; - - if (!wwn_is_equal(pdisc->port_name, port_name)) - return FC_PARSE_FAILURE; - - if (!wwn_is_equal(pdisc->node_name, node_name)) - return FC_PARSE_FAILURE; - - return FC_PARSE_OK; -} - -u16 -fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) -{ - memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s)); - fchs->cat_info = FC_CAT_ABTS; - fchs->d_id = (d_id); - fchs->s_id = (s_id); - fchs->ox_id = cpu_to_be16(ox_id); - - return sizeof(struct fchs_s); -} - -enum fc_parse_status -fc_abts_rsp_parse(struct fchs_s *fchs, int len) -{ - if ((fchs->cat_info == FC_CAT_BA_ACC) - || (fchs->cat_info == FC_CAT_BA_RJT)) - return FC_PARSE_OK; - - return FC_PARSE_FAILURE; -} - -u16 -fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id, - u16 ox_id, u16 rrq_oxid) -{ - fc_els_req_build(fchs, d_id, s_id, ox_id); - - /* - * build rrq payload - */ - memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s)); - rrq->s_id = (s_id); - rrq->ox_id = cpu_to_be16(rrq_oxid); - rrq->rx_id = FC_RXID_ANY; - - return sizeof(struct fc_rrq_s); -} - u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, __be16 ox_id) @@ -659,30 +476,6 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code) } u16 -fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, - u32 d_id, u32 s_id, __be16 ox_id, int num_pages) -{ - int page; - - fc_els_rsp_build(fchs, d_id, s_id, ox_id); - - memset(tprlo_acc, 0, (num_pages * 16) + 4); - tprlo_acc->command = FC_ELS_ACC; - - tprlo_acc->page_len = 0x10; - tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); - - for (page = 0; page < num_pages; page++) { - tprlo_acc->tprlo_acc_params[page].opa_valid = 0; - tprlo_acc->tprlo_acc_params[page].rpa_valid = 0; - tprlo_acc->tprlo_acc_params[page].fc4type_csp = FC_TYPE_FCP; - tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; - tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; - } - return be16_to_cpu(tprlo_acc->payload_len); -} - -u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, u32 s_id, __be16 ox_id, int num_pages) { @@ -707,20 +500,6 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, } u16 -fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id, - u32 s_id, u16 ox_id, u32 data_format) -{ - fc_els_req_build(fchs, d_id, s_id, ox_id); - - memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); - - rnid->els_cmd.els_code = FC_ELS_RNID; - rnid->node_id_data_format = data_format; - - return sizeof(struct fc_rnid_cmd_s); -} - -u16 fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id, __be16 ox_id, u32 data_format, struct fc_rnid_common_id_data_s *common_id_data, @@ -749,18 +528,6 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id, } u16 -fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id, - u32 s_id, u16 ox_id) -{ - fc_els_req_build(fchs, d_id, s_id, ox_id); - - memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); - - rpsc->els_cmd.els_code = FC_ELS_RPSC; - return sizeof(struct fc_rpsc_cmd_s); -} - -u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id, u32 s_id, u32 *pid_list, u16 npids) { @@ -801,115 +568,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, return sizeof(struct fc_rpsc_acc_s); } -u16 -fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, - wwn_t port_name, wwn_t node_name, u16 pdu_size) -{ - struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); - - memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); - - pdisc->els_cmd.els_code = FC_ELS_PDISC; - fc_els_req_build(fchs, d_id, s_id, ox_id); - - pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size); - pdisc->port_name = port_name; - pdisc->node_name = node_name; - - return sizeof(struct fc_logi_s); -} - -u16 -fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) -{ - struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); - - if (len < sizeof(struct fc_logi_s)) - return FC_PARSE_LEN_INVAL; - - if (pdisc->els_cmd.els_code != FC_ELS_ACC) - return FC_PARSE_ACC_INVAL; - - if (!wwn_is_equal(pdisc->port_name, port_name)) - return FC_PARSE_PWWN_NOT_EQUAL; - - if (!pdisc->class3.class_valid) - return FC_PARSE_NWWN_NOT_EQUAL; - - if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) - return FC_PARSE_RXSZ_INVAL; - - return FC_PARSE_OK; -} - -u16 -fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, - int num_pages) -{ - struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1); - int page; - - fc_els_req_build(fchs, d_id, s_id, ox_id); - memset(prlo, 0, (num_pages * 16) + 4); - prlo->command = FC_ELS_PRLO; - prlo->page_len = 0x10; - prlo->payload_len = cpu_to_be16((num_pages * 16) + 4); - - for (page = 0; page < num_pages; page++) { - prlo->prlo_params[page].type = FC_TYPE_FCP; - prlo->prlo_params[page].opa_valid = 0; - prlo->prlo_params[page].rpa_valid = 0; - prlo->prlo_params[page].orig_process_assc = 0; - prlo->prlo_params[page].resp_process_assc = 0; - } - - return be16_to_cpu(prlo->payload_len); -} - -u16 -fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, - int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id) -{ - struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1); - int page; - - fc_els_req_build(fchs, d_id, s_id, ox_id); - memset(tprlo, 0, (num_pages * 16) + 4); - tprlo->command = FC_ELS_TPRLO; - tprlo->page_len = 0x10; - tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4); - - for (page = 0; page < num_pages; page++) { - tprlo->tprlo_params[page].type = FC_TYPE_FCP; - tprlo->tprlo_params[page].opa_valid = 0; - tprlo->tprlo_params[page].rpa_valid = 0; - tprlo->tprlo_params[page].orig_process_assc = 0; - tprlo->tprlo_params[page].resp_process_assc = 0; - if (tprlo_type == FC_GLOBAL_LOGO) { - tprlo->tprlo_params[page].global_process_logout = 1; - } else if (tprlo_type == FC_TPR_LOGO) { - tprlo->tprlo_params[page].tpo_nport_valid = 1; - tprlo->tprlo_params[page].tpo_nport_id = (tpr_id); - } - } - - return be16_to_cpu(tprlo->payload_len); -} - -u16 -fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id, - u32 reason_code, u32 reason_expl) -{ - struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1); - - fc_bls_rsp_build(fchs, d_id, s_id, ox_id); - - fchs->cat_info = FC_CAT_BA_RJT; - ba_rjt->reason_code = reason_code; - ba_rjt->reason_expl = reason_expl; - return sizeof(struct fc_ba_rjt_s); -} - static void fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) { @@ -974,35 +632,6 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, } u16 -fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, - u32 port_id) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1); - u32 d_id = bfa_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); - fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID); - - memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); - gnnid->dap = port_id; - return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s); -} - -u16 -fc_ct_rsp_parse(struct ct_hdr_s *cthdr) -{ - if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) { - if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY) - return FC_PARSE_BUSY; - else - return FC_PARSE_FAILURE; - } - - return FC_PARSE_OK; -} - -u16 fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr, u32 d_id, u32 s_id, u16 ox_id, u8 reason_code, u8 reason_code_expl) @@ -1035,26 +664,6 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, } u16 -fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, - u32 s_id, u16 ox_id) -{ - u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER); - u16 payldlen; - - fc_els_req_build(fchs, d_id, s_id, ox_id); - rscn->command = FC_ELS_RSCN; - rscn->pagelen = sizeof(rscn->event[0]); - - payldlen = sizeof(u32) + rscn->pagelen; - rscn->payldlen = cpu_to_be16(payldlen); - - rscn->event[0].format = FC_RSCN_FORMAT_PORTID; - rscn->event[0].portid = s_id; - - return struct_size(rscn, event, 1); -} - -u16 fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, enum bfa_lport_role roles) { @@ -1079,26 +688,6 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, } u16 -fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, - u8 *fc4_bitmap, u32 bitmap_size) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); - u32 d_id = bfa_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); - fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); - - memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); - - rftid->dap = s_id; - memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, - (bitmap_size < 32 ? bitmap_size : 32)); - - return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); -} - -u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u8 fc4_type, u8 fc4_ftrs) { @@ -1182,24 +771,6 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type) } u16 -fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, - wwn_t port_name) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1); - u32 d_id = bfa_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID); - - memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s)); - rpnid->port_id = port_id; - rpnid->port_name = port_name; - - return sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s); -} - -u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, wwn_t node_name) { @@ -1217,59 +788,6 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, return sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s); } -u16 -fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, - u32 cos) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_rcsid_req_s *rcsid = - (struct fcgs_rcsid_req_s *) (cthdr + 1); - u32 d_id = bfa_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID); - - memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s)); - rcsid->port_id = port_id; - rcsid->cos = cos; - - return sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s); -} - -u16 -fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, - u8 port_type) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1); - u32 d_id = bfa_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID); - - memset(rptid, 0, sizeof(struct fcgs_rptid_req_s)); - rptid->port_id = port_id; - rptid->port_type = port_type; - - return sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s); -} - -u16 -fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1); - u32 d_id = bfa_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT); - - memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); - ganxt->port_id = port_id; - - return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s); -} - /* * Builds fc hdr and ct hdr for FDMI requests. */ diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h index 49e0ee4a7334..51da37b2ae6b 100644 --- a/drivers/scsi/bfa/bfa_fcbuild.h +++ b/drivers/scsi/bfa/bfa_fcbuild.h @@ -127,15 +127,6 @@ struct fc_templates_s { void fcbuild_init(void); -u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, - u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size, u8 set_npiv, u8 set_auth, - u16 local_bb_credits); - -u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id, - u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size); - u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name, @@ -148,14 +139,6 @@ u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs); -u16 fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id, - u16 ox_id); - -enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len); - -u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id, - u32 s_id, u16 ox_id, u16 rrq_oxid); - u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id, u16 ox_id, u8 *name); u16 fc_rsnn_nn_build(struct fchs_s *fchs, void *pld, u32 s_id, @@ -164,10 +147,6 @@ u16 fc_rsnn_nn_build(struct fchs_s *fchs, void *pld, u32 s_id, u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id, u16 ox_id, enum bfa_lport_role role); -u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, - u16 ox_id, u8 *fc4_bitmap, - u32 bitmap_size); - u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u8 fc4_type, u8 fc4_ftrs); @@ -193,9 +172,6 @@ u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name); -enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld, - u32 host_dap, wwn_t node_name, wwn_t port_name); - enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name, wwn_t node_name); @@ -216,10 +192,6 @@ u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, __be16 ox_id, enum bfa_lport_role role); -u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, - u32 d_id, u32 s_id, u16 ox_id, - u32 data_format); - u16 fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id, __be16 ox_id, u32 data_format, @@ -228,29 +200,15 @@ u16 fc_rnid_acc_build(struct fchs_s *fchs, u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c, u32 d_id, u32 s_id, u32 *pid_list, u16 npids); -u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, - u32 d_id, u32 s_id, u16 ox_id); u16 fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id, __be16 ox_id, struct fc_rpsc_speed_info_s *oper_speed); u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id, u8 fc4_type); -u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u32 port_id, wwn_t port_name); - u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, wwn_t node_name); -u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u32 port_id, u32 cos); - -u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u32 port_id, u8 port_type); - -u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u32 port_id); - u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name); @@ -267,46 +225,16 @@ void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask); void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id); -enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len, - wwn_t port_name); - -enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli); - -enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, - wwn_t port_name); - u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, u32 s_id, __be16 ox_id, u16 rx_id); int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code); -u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, - u32 d_id, u32 s_id, __be16 ox_id, int num_pages); - u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, u32 s_id, __be16 ox_id, int num_pages); -u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size); - -u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name); - -u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id, int num_pages); - u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id); -u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - __be16 ox_id, u32 reason_code, u32 reason_expl); - -u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, - u32 port_id); - -u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr); - -u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id, - u16 ox_id); #endif diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 28ae4dc14dc9..2518e36d70d3 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -3413,15 +3413,6 @@ bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim) } /* - * Notification on completions from related ioim. - */ -void -bfa_tskim_iodone(struct bfa_tskim_s *tskim) -{ - bfa_wc_down(&tskim->wc); -} - -/* * Handle IOC h/w failure notification from itnim. */ void diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h index 4499f84c2d81..64e4485b226e 100644 --- a/drivers/scsi/bfa/bfa_fcpim.h +++ b/drivers/scsi/bfa/bfa_fcpim.h @@ -339,7 +339,6 @@ void bfa_ioim_tov(struct bfa_ioim_s *ioim); void bfa_tskim_attach(struct bfa_fcpim_s *fcpim); void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); -void bfa_tskim_iodone(struct bfa_tskim_s *tskim); void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim); void bfa_tskim_cleanup(struct bfa_tskim_s *tskim); void bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw); diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index 5023c0ab4277..e52ce9b01f49 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -1431,7 +1431,7 @@ bfa_cb_lps_flogo_comp(void *bfad, void *uarg) * param[in] vf_id - VF_ID * * return - * If lookup succeeds, retuns fcs vf object, otherwise returns NULL + * If lookup succeeds, returns fcs vf object, otherwise returns NULL */ bfa_fcs_vf_t * bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 9788354b90da..19903539a08d 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -373,15 +373,11 @@ bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port); struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs); void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port, struct bfa_rport_qualifier_s rport[], int *nrports); -wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, - int index, int nrports, bfa_boolean_t bwwn); struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn); void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname); -void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, - struct bfa_lport_info_s *port_info); void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port, struct bfa_lport_attr_s *port_attr); void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, @@ -416,8 +412,6 @@ struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_old_pid( struct bfa_fcs_lport_s *port, u32 pid); struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn( struct bfa_fcs_lport_s *port, wwn_t pwwn); -struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn( - struct bfa_fcs_lport_s *port, wwn_t nwwn); struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_qualifier( struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 pid); void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port, @@ -486,7 +480,6 @@ bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, u16 vf_id, struct bfa_lport_cfg_s *port_cfg, struct bfad_vport_s *vport_drv); -bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport); bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport); bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport); bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport); @@ -494,7 +487,6 @@ void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, struct bfa_vport_attr_s *vport_attr); struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn); -void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); @@ -623,8 +615,6 @@ void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, struct bfa_rport_attr_s *attr); struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn); -struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( - struct bfa_fcs_lport_s *port, wwn_t rnwwn); void bfa_fcs_rport_set_del_timeout(u8 rport_tmo); void bfa_fcs_rport_set_max_logins(u32 max_logins); void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, @@ -633,8 +623,6 @@ void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 pid); -void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, - struct fc_logi_s *plogi_rsp); void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, struct fc_logi_s *plogi); diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 966bf6cc6dd9..9a85f417018f 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -938,25 +938,6 @@ bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn) } /* - * NWWN based Lookup for a R-Port in the Port R-Port Queue - */ -struct bfa_fcs_rport_s * -bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn) -{ - struct bfa_fcs_rport_s *rport; - struct list_head *qe; - - list_for_each(qe, &port->rport_q) { - rport = (struct bfa_fcs_rport_s *) qe; - if (wwn_is_equal(rport->nwwn, nwwn)) - return rport; - } - - bfa_trc(port->fcs, nwwn); - return NULL; -} - -/* * PWWN & PID based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * @@ -5645,54 +5626,6 @@ bfa_fcs_get_base_port(struct bfa_fcs_s *fcs) return &fcs->fabric.bport; } -wwn_t -bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index, - int nrports, bfa_boolean_t bwwn) -{ - struct list_head *qh, *qe; - struct bfa_fcs_rport_s *rport = NULL; - int i; - struct bfa_fcs_s *fcs; - - if (port == NULL || nrports == 0) - return (wwn_t) 0; - - fcs = port->fcs; - bfa_trc(fcs, (u32) nrports); - - i = 0; - qh = &port->rport_q; - qe = bfa_q_first(qh); - - while ((qe != qh) && (i < nrports)) { - rport = (struct bfa_fcs_rport_s *) qe; - if (bfa_ntoh3b(rport->pid) > 0xFFF000) { - qe = bfa_q_next(qe); - bfa_trc(fcs, (u32) rport->pwwn); - bfa_trc(fcs, rport->pid); - bfa_trc(fcs, i); - continue; - } - - if (bwwn) { - if (!memcmp(&wwn, &rport->pwwn, 8)) - break; - } else { - if (i == index) - break; - } - - i++; - qe = bfa_q_next(qe); - } - - bfa_trc(fcs, i); - if (rport) - return rport->pwwn; - else - return (wwn_t) 0; -} - void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port, struct bfa_rport_qualifier_s rports[], int *nrports) @@ -5823,54 +5756,6 @@ bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn) return NULL; } -/* - * API corresponding to NPIV_VPORT_GETINFO. - */ -void -bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, - struct bfa_lport_info_s *port_info) -{ - - bfa_trc(port->fcs, port->fabric->fabric_name); - - if (port->vport == NULL) { - /* - * This is a Physical port - */ - port_info->port_type = BFA_LPORT_TYPE_PHYSICAL; - - /* - * @todo : need to fix the state & reason - */ - port_info->port_state = 0; - port_info->offline_reason = 0; - - port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); - port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); - - port_info->max_vports_supp = - bfa_lps_get_max_vport(port->fcs->bfa); - port_info->num_vports_inuse = - port->fabric->num_vports; - port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; - port_info->num_rports_inuse = port->num_rports; - } else { - /* - * This is a virtual port - */ - port_info->port_type = BFA_LPORT_TYPE_VIRTUAL; - - /* - * @todo : need to fix the state & reason - */ - port_info->port_state = 0; - port_info->offline_reason = 0; - - port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); - port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); - } -} - void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, struct bfa_lport_stats_s *port_stats) @@ -6568,15 +6453,6 @@ bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport) } /* - * Cleanup notification from fabric SM on link timer expiry. - */ -void -bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport) -{ - vport->vport_stats.fab_cleanup++; -} - -/* * Stop notification from fabric SM. To be invoked from within FCS. */ void @@ -6699,24 +6575,6 @@ bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, } /* - * Use this function to findout if this is a pbc vport or not. - * - * @param[in] vport - pointer to bfa_fcs_vport_t. - * - * @returns None - */ -bfa_boolean_t -bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport) -{ - - if (vport && (vport->lport.port_cfg.preboot_vp == BFA_TRUE)) - return BFA_TRUE; - else - return BFA_FALSE; - -} - -/* * Use this function initialize the vport. * * @param[in] vport - pointer to bfa_fcs_vport_t. diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index ce52a9c88ae6..d4bde9bbe75b 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -2646,27 +2646,6 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn) bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); return rport; } -/* - * Called by bport in private loop topology to indicate that a - * rport has been discovered and plogi has been completed. - * - * @param[in] port - base port or vport - * @param[in] rpid - remote port ID - */ -void -bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, - struct fc_logi_s *plogi) -{ - struct bfa_fcs_rport_s *rport; - - rport = bfa_fcs_rport_alloc(port, WWN_NULL, fchs->s_id); - if (!rport) - return; - - bfa_fcs_rport_update(rport, plogi); - - bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); -} /* * Called by bport/vport to handle PLOGI received from a new remote port. @@ -3089,21 +3068,6 @@ bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) return rport; } -struct bfa_fcs_rport_s * -bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn) -{ - struct bfa_fcs_rport_s *rport; - - rport = bfa_fcs_lport_get_rport_by_nwwn(port, rnwwn); - if (rport == NULL) { - /* - * TBD Error handling - */ - } - - return rport; -} - /* * Remote port features (RPF) implementation. */ diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index ea2f107f564c..aa68d61a2d0d 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -2254,17 +2254,6 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) return status; } -/* - * Enable/disable IOC failure auto recovery. - */ -void -bfa_ioc_auto_recover(bfa_boolean_t auto_recover) -{ - bfa_auto_recover = auto_recover; -} - - - bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc) { @@ -2272,16 +2261,6 @@ bfa_ioc_is_operational(struct bfa_ioc_s *ioc) } bfa_boolean_t -bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) -{ - u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc); - - return ((r32 != BFI_IOC_UNINIT) && - (r32 != BFI_IOC_INITING) && - (r32 != BFI_IOC_MEMTEST)); -} - -bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) { __be32 *msgp = mbmsg; diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 3ec10503caff..d70332e9ad6d 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -919,7 +919,6 @@ void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc); void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod); -void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); void bfa_ioc_detach(struct bfa_ioc_s *ioc); void bfa_ioc_suspend(struct bfa_ioc_s *ioc); void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, @@ -934,7 +933,6 @@ bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); -bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index 578e7678b056..ed29ebda30da 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h @@ -113,7 +113,6 @@ void bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, struct bfa_s *); void bfa_sgpg_attach(struct bfa_s *, void *bfad, struct bfa_iocfc_cfg_s *, struct bfa_pcidev_s *); -void bfa_uf_iocdisable(struct bfa_s *); void bfa_uf_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, struct bfa_s *); void bfa_uf_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 9f33aa303b18..df33afaaa673 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -913,14 +913,6 @@ bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp) return reqbuf; } -u32 -bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp) -{ - struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; - - return mod->req_pld_sz; -} - /* * Get the internal response buffer pointer * @@ -1023,21 +1015,6 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, bfa_fcxp_queue(fcxp, send_req); } -/* - * Abort a BFA FCXP - * - * @param[in] fcxp BFA fcxp pointer - * - * @return void - */ -bfa_status_t -bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) -{ - bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag); - WARN_ON(1); - return BFA_STATUS_OK; -} - void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg, @@ -3857,15 +3834,6 @@ bfa_fcport_clr_hardalpa(struct bfa_s *bfa) return BFA_STATUS_OK; } -bfa_boolean_t -bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - *alpa = fcport->cfg.hardalpa; - return fcport->cfg.cfg_hardalpa; -} - u8 bfa_fcport_get_myalpa(struct bfa_s *bfa) { @@ -3923,17 +3891,6 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) /* * Get port attributes. */ - -wwn_t -bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - if (node) - return fcport->nwwn; - else - return fcport->pwwn; -} - void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) { @@ -4106,18 +4063,6 @@ bfa_fcport_is_ratelim(struct bfa_s *bfa) } /* - * Enable/Disable FAA feature in port config - */ -void -bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, state); - fcport->cfg.faa_state = state; -} - -/* * Get default minimum ratelim speed */ enum bfa_port_speed @@ -5529,23 +5474,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) } void -bfa_uf_iocdisable(struct bfa_s *bfa) -{ - struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); - struct bfa_uf_s *uf; - struct list_head *qe, *qen; - - /* Enqueue unused uf resources to free_q */ - list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q); - - list_for_each_safe(qe, qen, &ufm->uf_posted_q) { - uf = (struct bfa_uf_s *) qe; - list_del(&uf->qe); - bfa_uf_put(ufm, uf); - } -} - -void bfa_uf_start(struct bfa_s *bfa) { bfa_uf_post_all(BFA_UF_MOD(bfa)); diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index 26eeee82bedc..99a960a8a2db 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h @@ -587,14 +587,12 @@ bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa); enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa); bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); -bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa); bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa); u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa); void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr); -wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); void bfa_fcport_event_register(struct bfa_s *bfa, void (*event_cbfn) (void *cbarg, enum bfa_port_linkstate event), void *event_cbarg); @@ -619,7 +617,6 @@ bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); void bfa_fcport_dportenable(struct bfa_s *bfa); void bfa_fcport_dportdisable(struct bfa_s *bfa); bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); -void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); bfa_status_t bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn); bfa_status_t bfa_fcport_get_bbcr_attr(struct bfa_s *bfa, @@ -687,8 +684,6 @@ void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, bfa_cb_fcxp_send_t cbfn, void *cbarg, u32 rsp_maxlen, u8 rsp_timeout); -bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp); -u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp); u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa); void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw); diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 62cb7a864fd5..598f2fc93ef2 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -327,7 +327,7 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event) case BFAD_E_EXIT_COMP: bfa_sm_set_state(bfad, bfad_sm_uninit); bfad_remove_intr(bfad); - del_timer_sync(&bfad->hal_tmo); + timer_delete_sync(&bfad->hal_tmo); break; default: @@ -376,7 +376,7 @@ bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event) case BFAD_E_EXIT_COMP: bfa_sm_set_state(bfad, bfad_sm_uninit); bfad_remove_intr(bfad); - del_timer_sync(&bfad->hal_tmo); + timer_delete_sync(&bfad->hal_tmo); bfad_im_probe_undo(bfad); bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; bfad_uncfg_pport(bfad); @@ -843,26 +843,6 @@ bfad_drv_init(struct bfad_s *bfad) } void -bfad_drv_uninit(struct bfad_s *bfad) -{ - unsigned long flags; - - spin_lock_irqsave(&bfad->bfad_lock, flags); - init_completion(&bfad->comp); - bfa_iocfc_stop(&bfad->bfa); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - wait_for_completion(&bfad->comp); - - del_timer_sync(&bfad->hal_tmo); - bfa_isr_disable(&bfad->bfa); - bfa_detach(&bfad->bfa); - bfad_remove_intr(bfad); - bfad_hal_mem_release(bfad); - - bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; -} - -void bfad_drv_start(struct bfad_s *bfad) { unsigned long flags; @@ -1441,7 +1421,7 @@ bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) /* Suspend/fail all bfa operations */ bfa_ioc_suspend(&bfad->bfa.ioc); spin_unlock_irqrestore(&bfad->bfad_lock, flags); - del_timer_sync(&bfad->hal_tmo); + timer_delete_sync(&bfad->hal_tmo); ret = PCI_ERS_RESULT_CAN_RECOVER; break; case pci_channel_io_frozen: /* fatal error */ @@ -1455,7 +1435,7 @@ bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) wait_for_completion(&bfad->comp); bfad_remove_intr(bfad); - del_timer_sync(&bfad->hal_tmo); + timer_delete_sync(&bfad->hal_tmo); pci_disable_device(pdev); ret = PCI_ERS_RESULT_NEED_RESET; break; @@ -1586,7 +1566,7 @@ bfad_pci_mmio_enabled(struct pci_dev *pdev) wait_for_completion(&bfad->comp); bfad_remove_intr(bfad); - del_timer_sync(&bfad->hal_tmo); + timer_delete_sync(&bfad->hal_tmo); pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; @@ -1662,7 +1642,7 @@ MODULE_DEVICE_TABLE(pci, bfad_id_table); /* * PCI error recovery handlers. */ -static struct pci_error_handlers bfad_err_handler = { +static const struct pci_error_handlers bfad_err_handler = { .error_detected = bfad_pci_error_detected, .slot_reset = bfad_pci_slot_reset, .mmio_enabled = bfad_pci_mmio_enabled, @@ -1693,9 +1673,8 @@ bfad_init(void) error = bfad_im_module_init(); if (error) { - error = -ENOMEM; printk(KERN_WARNING "bfad_im_module_init failure\n"); - goto ext; + return -ENOMEM; } if (strcmp(FCPI_NAME, " fcpim") == 0) diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index e96e4b6df265..54bc1539e1e9 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -854,13 +854,6 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr, } static ssize_t -bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%s\n", BFAD_DRIVER_VERSION); -} - -static ssize_t bfad_im_optionrom_version_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -902,13 +895,6 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr, } static ssize_t -bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%s\n", BFAD_DRIVER_NAME); -} - -static ssize_t bfad_im_num_of_discovered_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -944,15 +930,15 @@ static DEVICE_ATTR(symbolic_name, S_IRUGO, bfad_im_symbolic_name_show, NULL); static DEVICE_ATTR(hardware_version, S_IRUGO, bfad_im_hw_version_show, NULL); -static DEVICE_ATTR(driver_version, S_IRUGO, - bfad_im_drv_version_show, NULL); +static DEVICE_STRING_ATTR_RO(driver_version, S_IRUGO, + BFAD_DRIVER_VERSION); static DEVICE_ATTR(option_rom_version, S_IRUGO, bfad_im_optionrom_version_show, NULL); static DEVICE_ATTR(firmware_version, S_IRUGO, bfad_im_fw_version_show, NULL); static DEVICE_ATTR(number_of_ports, S_IRUGO, bfad_im_num_of_ports_show, NULL); -static DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL); +static DEVICE_STRING_ATTR_RO(driver_name, S_IRUGO, BFAD_DRIVER_NAME); static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO, bfad_im_num_of_discovered_ports_show, NULL); @@ -963,11 +949,11 @@ static struct attribute *bfad_im_host_attrs[] = { &dev_attr_node_name.attr, &dev_attr_symbolic_name.attr, &dev_attr_hardware_version.attr, - &dev_attr_driver_version.attr, + &dev_attr_driver_version.attr.attr, &dev_attr_option_rom_version.attr, &dev_attr_firmware_version.attr, &dev_attr_number_of_ports.attr, - &dev_attr_driver_name.attr, + &dev_attr_driver_name.attr.attr, &dev_attr_number_of_discovered_ports.attr, NULL, }; @@ -988,11 +974,11 @@ static struct attribute *bfad_im_vport_attrs[] = { &dev_attr_node_name.attr, &dev_attr_symbolic_name.attr, &dev_attr_hardware_version.attr, - &dev_attr_driver_version.attr, + &dev_attr_driver_version.attr.attr, &dev_attr_option_rom_version.attr, &dev_attr_firmware_version.attr, &dev_attr_number_of_ports.attr, - &dev_attr_driver_name.attr, + &dev_attr_driver_name.attr.attr, &dev_attr_number_of_discovered_ports.attr, NULL, }; diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 52db147d9979..f6dd077d47c9 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -250,7 +250,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, unsigned long flags; void *kern_buf; - kern_buf = memdup_user(buf, nbytes); + kern_buf = memdup_user_nul(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); @@ -317,7 +317,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf, unsigned long flags; void *kern_buf; - kern_buf = memdup_user(buf, nbytes); + kern_buf = memdup_user_nul(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index da42e3261237..ce2ec5108947 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -312,7 +312,6 @@ void bfad_bfa_tmo(struct timer_list *t); void bfad_init_timer(struct bfad_s *bfad); int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad); void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad); -void bfad_drv_uninit(struct bfad_s *bfad); int bfad_worker(void *ptr); void bfad_debugfs_init(struct bfad_port_s *port); void bfad_debugfs_exit(struct bfad_port_s *port); diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index a9d3d8562d3c..a719a18f0fbc 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -25,7 +25,7 @@ struct scsi_transport_template *bfad_im_scsi_transport_template; struct scsi_transport_template *bfad_im_scsi_vport_transport_template; static void bfad_im_itnim_work_handler(struct work_struct *work); static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd); -static int bfad_im_slave_alloc(struct scsi_device *sdev); +static int bfad_im_sdev_init(struct scsi_device *sdev); static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim); @@ -404,10 +404,10 @@ bfad_im_reset_target_handler(struct scsi_cmnd *cmnd) } /* - * Scsi_Host template entry slave_destroy. + * Scsi_Host template entry sdev_destroy. */ static void -bfad_im_slave_destroy(struct scsi_device *sdev) +bfad_im_sdev_destroy(struct scsi_device *sdev) { sdev->hostdata = NULL; return; @@ -766,9 +766,8 @@ bfad_thread_workq(struct bfad_s *bfad) struct bfad_im_s *im = bfad->im; bfa_trc(bfad, 0); - snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d", - bfad->inst_no); - im->drv_workq = create_singlethread_workqueue(im->drv_workq_name); + im->drv_workq = alloc_ordered_workqueue("bfad_wq_%d", WQ_MEM_RECLAIM, + bfad->inst_no); if (!im->drv_workq) return BFA_STATUS_FAILED; @@ -784,7 +783,7 @@ bfad_thread_workq(struct bfad_s *bfad) * Return non-zero if fails. */ static int -bfad_im_slave_configure(struct scsi_device *sdev) +bfad_im_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { scsi_change_queue_depth(sdev, bfa_lun_queue_depth); return 0; @@ -801,9 +800,9 @@ struct scsi_host_template bfad_im_scsi_host_template = { .eh_device_reset_handler = bfad_im_reset_lun_handler, .eh_target_reset_handler = bfad_im_reset_target_handler, - .slave_alloc = bfad_im_slave_alloc, - .slave_configure = bfad_im_slave_configure, - .slave_destroy = bfad_im_slave_destroy, + .sdev_init = bfad_im_sdev_init, + .sdev_configure = bfad_im_sdev_configure, + .sdev_destroy = bfad_im_sdev_destroy, .this_id = -1, .sg_tablesize = BFAD_IO_MAX_SGE, @@ -824,9 +823,9 @@ struct scsi_host_template bfad_im_vport_template = { .eh_device_reset_handler = bfad_im_reset_lun_handler, .eh_target_reset_handler = bfad_im_reset_target_handler, - .slave_alloc = bfad_im_slave_alloc, - .slave_configure = bfad_im_slave_configure, - .slave_destroy = bfad_im_slave_destroy, + .sdev_init = bfad_im_sdev_init, + .sdev_configure = bfad_im_sdev_configure, + .sdev_destroy = bfad_im_sdev_destroy, .this_id = -1, .sg_tablesize = BFAD_IO_MAX_SGE, @@ -916,7 +915,7 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id) } /* - * Function is invoked from the SCSI Host Template slave_alloc() entry point. + * Function is invoked from the SCSI Host Template sdev_init() entry point. * Has the logic to query the LUN Mask database to check if this LUN needs to * be made visible to the SCSI mid-layer or not. * @@ -947,10 +946,10 @@ bfad_im_check_if_make_lun_visible(struct scsi_device *sdev, } /* - * Scsi_Host template entry slave_alloc + * Scsi_Host template entry sdev_init */ static int -bfad_im_slave_alloc(struct scsi_device *sdev) +bfad_im_sdev_init(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct bfad_itnim_data_s *itnim_data; diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index 4353feedf76a..0884af04bd1f 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -134,7 +134,6 @@ struct bfad_fcp_binding { struct bfad_im_s { struct bfad_s *bfad; struct workqueue_struct *drv_workq; - char drv_workq_name[KOBJ_NAME_LEN]; struct work_struct aen_im_notify_work; }; diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index 41e6b4dac056..e1e0e967bcc3 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h @@ -1148,7 +1148,7 @@ struct bfi_diag_dport_scn_testcomp_s { u16 numbuffer; /* from switch */ u8 subtest_status[DPORT_TEST_MAX]; /* 4 bytes */ u32 latency; /* from switch */ - u32 distance; /* from swtich unit in meters */ + u32 distance; /* from switch unit in meters */ /* Buffers required to saturate the link */ u16 frm_sz; /* from switch for buf_reqd */ u8 rsvd[2]; diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 7e74f77da14f..6d47a4d8eed6 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -358,18 +358,12 @@ struct bnx2fc_rport { dma_addr_t lcq_dma; u32 lcq_mem_size; - void *ofld_req[4]; - dma_addr_t ofld_req_dma[4]; - void *enbl_req; - dma_addr_t enbl_req_dma; - spinlock_t tgt_lock; spinlock_t cq_lock; atomic_t num_active_ios; u32 flush_in_prog; unsigned long timestamp; unsigned long retry_delay_timestamp; - struct list_head free_task_list; struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; struct list_head active_cmd_queue; struct list_head els_queue; diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 1078c20c5ef6..de6574cccf58 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -1599,7 +1599,7 @@ static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface) struct bnx2fc_hba *hba = interface->hba; /* Stop the transmit retry timer */ - del_timer_sync(&port->timer); + timer_delete_sync(&port->timer); /* Free existing transmit skbs */ fcoe_clean_pending_queue(lport); @@ -1938,7 +1938,7 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) if (signal_pending(current)) flush_signals(current); - del_timer_sync(&hba->destroy_timer); + timer_delete_sync(&hba->destroy_timer); } bnx2fc_unbind_adapter_devices(hba); } @@ -2363,8 +2363,8 @@ static int _bnx2fc_create(struct net_device *netdev, interface->vlan_id = vlan_id; interface->tm_timeout = BNX2FC_TM_TIMEOUT; - interface->timer_work_queue = - create_singlethread_workqueue("bnx2fc_timer_wq"); + interface->timer_work_queue = alloc_ordered_workqueue( + "%s", WQ_MEM_RECLAIM, "bnx2fc_timer_wq"); if (!interface->timer_work_queue) { printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); rc = -EINVAL; @@ -2610,14 +2610,11 @@ static int bnx2fc_cpu_online(unsigned int cpu) p = &per_cpu(bnx2fc_percpu, cpu); - thread = kthread_create_on_node(bnx2fc_percpu_io_thread, - (void *)p, cpu_to_node(cpu), - "bnx2fc_thread/%d", cpu); + thread = kthread_create_on_cpu(bnx2fc_percpu_io_thread, + (void *)p, cpu, "bnx2fc_thread/%d"); if (IS_ERR(thread)) return PTR_ERR(thread); - /* bind thread to the cpu */ - kthread_bind(thread, cpu); p->iothread = thread; wake_up_process(thread); return 0; @@ -2652,7 +2649,8 @@ static int bnx2fc_cpu_offline(unsigned int cpu) return 0; } -static int bnx2fc_slave_configure(struct scsi_device *sdev) +static int bnx2fc_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { if (!bnx2fc_queue_depth) return 0; @@ -2951,7 +2949,7 @@ static struct scsi_host_template bnx2fc_shost_template = { .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */ .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */ .eh_host_reset_handler = fc_eh_host_reset, - .slave_alloc = fc_slave_alloc, + .sdev_init = fc_sdev_init, .change_queue_depth = scsi_change_queue_depth, .this_id = -1, .cmd_per_lun = 3, @@ -2959,7 +2957,7 @@ static struct scsi_host_template bnx2fc_shost_template = { .dma_boundary = 0x7fff, .max_sectors = 0x3fbf, .track_queue_depth = 1, - .slave_configure = bnx2fc_slave_configure, + .sdev_configure = bnx2fc_sdev_configure, .shost_groups = bnx2fc_host_groups, .cmd_size = sizeof(struct bnx2fc_priv), }; diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index d91659811eb3..b8227cfef64f 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -74,7 +74,7 @@ static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt) &tgt->flags))); if (signal_pending(current)) flush_signals(current); - del_timer_sync(&tgt->ofld_timer); + timer_delete_sync(&tgt->ofld_timer); } static void bnx2fc_offload_session(struct fcoe_port *port, @@ -128,10 +128,8 @@ retry_ofld: BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, " "retry ofld..%d\n", i++); msleep_interruptible(1000); - if (i > 3) { - i = 0; + if (i > 3) goto ofld_err; - } goto retry_ofld; } goto ofld_err; @@ -285,7 +283,7 @@ static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt) &tgt->flags))); if (signal_pending(current)) flush_signals(current); - del_timer_sync(&tgt->upld_timer); + timer_delete_sync(&tgt->upld_timer); } static void bnx2fc_upload_session(struct fcoe_port *port, diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index df7d04afce05..7030efee5c46 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -815,11 +815,6 @@ extern struct bnx2i_hba *get_adapter_list_head(void); struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, u16 iscsi_cid); -int bnx2i_alloc_ep_pool(void); -void bnx2i_release_ep_pool(void); -struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba); -struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba); - struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic); struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic); @@ -869,12 +864,6 @@ extern int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action); extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep); -/* Debug related function prototypes */ -extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn); -extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn); -extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn); -extern void bnx2i_print_recv_state(struct bnx2i_conn *conn); - extern int bnx2i_percpu_io_thread(void *arg); extern int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 872ad37e2a6e..cecc3a026762 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -415,14 +415,11 @@ static int bnx2i_cpu_online(unsigned int cpu) p = &per_cpu(bnx2i_percpu, cpu); - thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, - cpu_to_node(cpu), - "bnx2i_thread/%d", cpu); + thread = kthread_create_on_cpu(bnx2i_percpu_io_thread, (void *)p, + cpu, "bnx2i_thread/%d"); if (IS_ERR(thread)) return PTR_ERR(thread); - /* bind thread to the cpu */ - kthread_bind(thread, cpu); p->iothread = thread; wake_up_process(thread); return 0; diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 9971f32a663c..6c80e5b514fd 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1626,7 +1626,7 @@ static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn) if (signal_pending(current)) flush_signals(current); - del_timer_sync(&bnx2i_conn->ep->ofld_timer); + timer_delete_sync(&bnx2i_conn->ep->ofld_timer); iscsi_conn_start(cls_conn); return 0; @@ -1749,7 +1749,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, if (signal_pending(current)) flush_signals(current); - del_timer_sync(&ep->ofld_timer); + timer_delete_sync(&ep->ofld_timer); bnx2i_ep_destroy_list_del(hba, ep); @@ -1861,7 +1861,7 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, if (signal_pending(current)) flush_signals(current); - del_timer_sync(&bnx2i_ep->ofld_timer); + timer_delete_sync(&bnx2i_ep->ofld_timer); bnx2i_ep_ofld_list_del(hba, bnx2i_ep); @@ -2100,7 +2100,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) if (signal_pending(current)) flush_signals(current); - del_timer_sync(&bnx2i_ep->ofld_timer); + timer_delete_sync(&bnx2i_ep->ofld_timer); destroy_conn: bnx2i_ep_active_list_del(hba, bnx2i_ep); diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c index f893e9779e9d..baf5f4e47937 100644 --- a/drivers/scsi/bvme6000_scsi.c +++ b/drivers/scsi/bvme6000_scsi.c @@ -106,7 +106,7 @@ static struct platform_driver bvme6000_scsi_driver = { .name = "bvme6000-scsi", }, .probe = bvme6000_probe, - .remove_new = bvme6000_device_remove, + .remove = bvme6000_device_remove, }; static int __init bvme6000_scsi_init(void) diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index e43c5413ce29..beded091dff1 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -3701,7 +3701,7 @@ csio_mberr_worker(void *data) struct csio_mb *mbp_next; int rv; - del_timer_sync(&mbm->timer); + timer_delete_sync(&mbm->timer); spin_lock_irq(&hw->lock); if (list_empty(&mbm->cbfn_q)) { @@ -4210,7 +4210,7 @@ csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) static void csio_mgmtm_exit(struct csio_mgmtm *mgmtm) { - del_timer_sync(&mgmtm->mgmt_timer); + timer_delete_sync(&mgmtm->mgmt_timer); } diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index d649b7a2a879..79c8dafdd49e 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -1162,7 +1162,7 @@ err_resume_exit: dev_err(&pdev->dev, "resume of device failed: %d\n", rv); } -static struct pci_error_handlers csio_err_handler = { +static const struct pci_error_handlers csio_err_handler = { .error_detected = csio_pci_error_detected, .slot_reset = csio_pci_slot_reset, .resume = csio_pci_resume, @@ -1185,9 +1185,6 @@ static struct pci_error_handlers csio_err_handler = { static struct pci_driver csio_pci_driver = { .name = KBUILD_MODNAME, - .driver = { - .owner = THIS_MODULE, - }, .id_table = csio_pci_tbl, .probe = csio_probe_one, .remove = csio_remove_one, diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index 5b3ffefae476..6cc1d53165a0 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -38,7 +38,7 @@ #include <linux/utsname.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport_fc.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/fc/fc_els.h> #include <scsi/fc/fc_fs.h> #include <scsi/fc/fc_gs.h> diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c index 94810b19e747..c7b4c464f6b8 100644 --- a/drivers/scsi/csiostor/csio_mb.c +++ b/drivers/scsi/csiostor/csio_mb.c @@ -1619,7 +1619,7 @@ csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q) mbp = mbm->mcurrent; /* Stop mailbox completion timer */ - del_timer_sync(&mbm->timer); + timer_delete_sync(&mbm->timer); /* Add completion to tail of cbfn queue */ list_add_tail(&mbp->list, cbfn_q); @@ -1682,7 +1682,7 @@ csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw, void csio_mbm_exit(struct csio_mbm *mbm) { - del_timer_sync(&mbm->timer); + timer_delete_sync(&mbm->timer); CSIO_DB_ASSERT(mbm->mcurrent == NULL); CSIO_DB_ASSERT(list_empty(&mbm->req_q)); diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 05e1a63e00c3..34bde6650fae 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -41,7 +41,7 @@ #include <linux/compiler.h> #include <linux/export.h> #include <linux/module.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <asm/page.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -800,7 +800,7 @@ csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt) rn = req->rnode; /* * FW says remote device is lost, but rnode - * doesnt reflect it. + * doesn't reflect it. */ if (csio_scsi_itnexus_loss_error(req->wr_status) && csio_is_rnode_ready(rn)) { @@ -2224,7 +2224,7 @@ fail: } static int -csio_slave_alloc(struct scsi_device *sdev) +csio_sdev_init(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); @@ -2237,14 +2237,14 @@ csio_slave_alloc(struct scsi_device *sdev) } static int -csio_slave_configure(struct scsi_device *sdev) +csio_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { scsi_change_queue_depth(sdev, csio_lun_qdepth); return 0; } static void -csio_slave_destroy(struct scsi_device *sdev) +csio_sdev_destroy(struct scsi_device *sdev) { sdev->hostdata = NULL; } @@ -2276,9 +2276,9 @@ struct scsi_host_template csio_fcoe_shost_template = { .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = csio_eh_abort_handler, .eh_device_reset_handler = csio_eh_lun_reset_handler, - .slave_alloc = csio_slave_alloc, - .slave_configure = csio_slave_configure, - .slave_destroy = csio_slave_destroy, + .sdev_init = csio_sdev_init, + .sdev_configure = csio_sdev_configure, + .sdev_destroy = csio_sdev_destroy, .scan_finished = csio_scan_finished, .this_id = -1, .sg_tablesize = CSIO_SCSI_MAX_SGE, @@ -2295,9 +2295,9 @@ struct scsi_host_template csio_fcoe_shost_vport_template = { .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = csio_eh_abort_handler, .eh_device_reset_handler = csio_eh_lun_reset_handler, - .slave_alloc = csio_slave_alloc, - .slave_configure = csio_slave_configure, - .slave_destroy = csio_slave_destroy, + .sdev_init = csio_sdev_init, + .sdev_configure = csio_sdev_configure, + .sdev_destroy = csio_sdev_destroy, .scan_finished = csio_scan_finished, .this_id = -1, .sg_tablesize = CSIO_SCSI_MAX_SGE, diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index ec6530240707..461d38e2fb19 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -495,7 +495,7 @@ static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) spin_lock_bh(&csk->lock); if (csk->retry_timer.function) { - del_timer(&csk->retry_timer); + timer_delete(&csk->retry_timer); csk->retry_timer.function = NULL; } diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index c07d2e3b4bcf..aaba294ecb58 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -930,7 +930,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) csk, csk->state, csk->flags, csk->tid); if (csk->retry_timer.function) { - del_timer(&csk->retry_timer); + timer_delete(&csk->retry_timer); csk->retry_timer.function = NULL; } diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index d92cf1dccc2f..0909b03e2497 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -485,7 +485,6 @@ struct cxgbi_device { unsigned char nmtus; unsigned char nports; struct pci_dev *pdev; - struct dentry *debugfs_root; struct iscsi_transport *itp; struct module *owner; @@ -499,7 +498,6 @@ struct cxgbi_device { unsigned int rxq_idx_cntr; struct cxgbi_ports_map pmap; - void (*dev_ddp_cleanup)(struct cxgbi_device *); struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *); int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *, struct cxgbi_task_tag_info *); @@ -512,7 +510,6 @@ struct cxgbi_device { unsigned int, int); void (*csk_release_offload_resources)(struct cxgbi_sock *); - int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32); int (*csk_push_tx_frames)(struct cxgbi_sock *, int); void (*csk_send_abort_req)(struct cxgbi_sock *); diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig deleted file mode 100644 index 5533bdcb0458..000000000000 --- a/drivers/scsi/cxlflash/Kconfig +++ /dev/null @@ -1,13 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# IBM CXL-attached Flash Accelerator SCSI Driver -# - -config CXLFLASH - tristate "Support for IBM CAPI Flash" - depends on PCI && SCSI && (CXL || OCXL) && EEH - select IRQ_POLL - default m - help - Allows CAPI Accelerated IO to Flash - If unsure, say N. diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile deleted file mode 100644 index fd2f0dd9daf9..000000000000 --- a/drivers/scsi/cxlflash/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_CXLFLASH) += cxlflash.o -cxlflash-y += main.o superpipe.o lunmgt.o vlun.o -cxlflash-$(CONFIG_CXL) += cxl_hw.o -cxlflash-$(CONFIG_OCXL) += ocxl_hw.o diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h deleted file mode 100644 index 181e0445ed42..000000000000 --- a/drivers/scsi/cxlflash/backend.h +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * CXL Flash Device Driver - * - * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2018 IBM Corporation - */ - -#ifndef _CXLFLASH_BACKEND_H -#define _CXLFLASH_BACKEND_H - -extern const struct cxlflash_backend_ops cxlflash_cxl_ops; -extern const struct cxlflash_backend_ops cxlflash_ocxl_ops; - -struct cxlflash_backend_ops { - struct module *module; - void __iomem * (*psa_map)(void *ctx_cookie); - void (*psa_unmap)(void __iomem *addr); - int (*process_element)(void *ctx_cookie); - int (*map_afu_irq)(void *ctx_cookie, int num, irq_handler_t handler, - void *cookie, char *name); - void (*unmap_afu_irq)(void *ctx_cookie, int num, void *cookie); - u64 (*get_irq_objhndl)(void *ctx_cookie, int irq); - int (*start_context)(void *ctx_cookie); - int (*stop_context)(void *ctx_cookie); - int (*afu_reset)(void *ctx_cookie); - void (*set_master)(void *ctx_cookie); - void * (*get_context)(struct pci_dev *dev, void *afu_cookie); - void * (*dev_context_init)(struct pci_dev *dev, void *afu_cookie); - int (*release_context)(void *ctx_cookie); - void (*perst_reloads_same_image)(void *afu_cookie, bool image); - ssize_t (*read_adapter_vpd)(struct pci_dev *dev, void *buf, - size_t count); - int (*allocate_afu_irqs)(void *ctx_cookie, int num); - void (*free_afu_irqs)(void *ctx_cookie); - void * (*create_afu)(struct pci_dev *dev); - void (*destroy_afu)(void *afu_cookie); - struct file * (*get_fd)(void *ctx_cookie, struct file_operations *fops, - int *fd); - void * (*fops_get_context)(struct file *file); - int (*start_work)(void *ctx_cookie, u64 irqs); - int (*fd_mmap)(struct file *file, struct vm_area_struct *vm); - int (*fd_release)(struct inode *inode, struct file *file); -}; - -#endif /* _CXLFLASH_BACKEND_H */ diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h deleted file mode 100644 index de6229e27b48..000000000000 --- a/drivers/scsi/cxlflash/common.h +++ /dev/null @@ -1,340 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * CXL Flash Device Driver - * - * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation - * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2015 IBM Corporation - */ - -#ifndef _CXLFLASH_COMMON_H -#define _CXLFLASH_COMMON_H - -#include <linux/async.h> -#include <linux/cdev.h> -#include <linux/irq_poll.h> -#include <linux/list.h> -#include <linux/rwsem.h> -#include <linux/types.h> -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_device.h> - -#include "backend.h" - -extern const struct file_operations cxlflash_cxl_fops; - -#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */ -#define MAX_FC_PORTS CXLFLASH_MAX_FC_PORTS /* max ports per AFU */ -#define LEGACY_FC_PORTS 2 /* legacy ports per AFU */ - -#define CHAN2PORTBANK(_x) ((_x) >> ilog2(CXLFLASH_NUM_FC_PORTS_PER_BANK)) -#define CHAN2BANKPORT(_x) ((_x) & (CXLFLASH_NUM_FC_PORTS_PER_BANK - 1)) - -#define CHAN2PORTMASK(_x) (1 << (_x)) /* channel to port mask */ -#define PORTMASK2CHAN(_x) (ilog2((_x))) /* port mask to channel */ -#define PORTNUM2CHAN(_x) ((_x) - 1) /* port number to channel */ - -#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */ -#define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */ -#define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants - * max_sectors - * in units of - * 512 byte - * sectors - */ - -#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry)) - -/* AFU command retry limit */ -#define MC_RETRY_CNT 5 /* Sufficient for SCSI and certain AFU errors */ - -/* Command management definitions */ -#define CXLFLASH_MAX_CMDS 256 -#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS - -/* RRQ for master issued cmds */ -#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS - -/* SQ for master issued cmds */ -#define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS - -/* Hardware queue definitions */ -#define CXLFLASH_DEF_HWQS 1 -#define CXLFLASH_MAX_HWQS 8 -#define PRIMARY_HWQ 0 - - -static inline void check_sizes(void) -{ - BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_FC_PORTS_PER_BANK); - BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_MAX_CMDS); -} - -/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */ -#define CMD_BUFSIZE SIZE_4K - -enum cxlflash_lr_state { - LINK_RESET_INVALID, - LINK_RESET_REQUIRED, - LINK_RESET_COMPLETE -}; - -enum cxlflash_init_state { - INIT_STATE_NONE, - INIT_STATE_PCI, - INIT_STATE_AFU, - INIT_STATE_SCSI, - INIT_STATE_CDEV -}; - -enum cxlflash_state { - STATE_PROBING, /* Initial state during probe */ - STATE_PROBED, /* Temporary state, probe completed but EEH occurred */ - STATE_NORMAL, /* Normal running state, everything good */ - STATE_RESET, /* Reset state, trying to reset/recover */ - STATE_FAILTERM /* Failed/terminating state, error out users/threads */ -}; - -enum cxlflash_hwq_mode { - HWQ_MODE_RR, /* Roundrobin (default) */ - HWQ_MODE_TAG, /* Distribute based on block MQ tag */ - HWQ_MODE_CPU, /* CPU affinity */ - MAX_HWQ_MODE -}; - -/* - * Each context has its own set of resource handles that is visible - * only from that context. - */ - -struct cxlflash_cfg { - struct afu *afu; - - const struct cxlflash_backend_ops *ops; - struct pci_dev *dev; - struct pci_device_id *dev_id; - struct Scsi_Host *host; - int num_fc_ports; - struct cdev cdev; - struct device *chardev; - - ulong cxlflash_regs_pci; - - struct work_struct work_q; - enum cxlflash_init_state init_state; - enum cxlflash_lr_state lr_state; - int lr_port; - atomic_t scan_host_needed; - - void *afu_cookie; - - atomic_t recovery_threads; - struct mutex ctx_recovery_mutex; - struct mutex ctx_tbl_list_mutex; - struct rw_semaphore ioctl_rwsem; - struct ctx_info *ctx_tbl[MAX_CONTEXT]; - struct list_head ctx_err_recovery; /* contexts w/ recovery pending */ - struct file_operations cxl_fops; - - /* Parameters that are LUN table related */ - int last_lun_index[MAX_FC_PORTS]; - int promote_lun_index; - struct list_head lluns; /* list of llun_info structs */ - - wait_queue_head_t tmf_waitq; - spinlock_t tmf_slock; - bool tmf_active; - bool ws_unmap; /* Write-same unmap supported */ - wait_queue_head_t reset_waitq; - enum cxlflash_state state; - async_cookie_t async_reset_cookie; -}; - -struct afu_cmd { - struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */ - struct sisl_ioasa sa; /* IOASA must follow IOARCB */ - struct afu *parent; - struct scsi_cmnd *scp; - struct completion cevent; - struct list_head queue; - u32 hwq_index; - - u8 cmd_tmf:1, - cmd_aborted:1; - - struct list_head list; /* Pending commands link */ - - /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned. - * However for performance reasons the IOARCB/IOASA should be - * cache line aligned. - */ -} __aligned(cache_line_size()); - -static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc) -{ - return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd)); -} - -static inline struct afu_cmd *sc_to_afuci(struct scsi_cmnd *sc) -{ - struct afu_cmd *afuc = sc_to_afuc(sc); - - INIT_LIST_HEAD(&afuc->queue); - return afuc; -} - -static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc) -{ - struct afu_cmd *afuc = sc_to_afuc(sc); - - memset(afuc, 0, sizeof(*afuc)); - return sc_to_afuci(sc); -} - -struct hwq { - /* Stuff requiring alignment go first. */ - struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */ - u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */ - - /* Beware of alignment till here. Preferably introduce new - * fields after this point - */ - struct afu *afu; - void *ctx_cookie; - struct sisl_host_map __iomem *host_map; /* MC host map */ - struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */ - ctx_hndl_t ctx_hndl; /* master's context handle */ - u32 index; /* Index of this hwq */ - int num_irqs; /* Number of interrupts requested for context */ - struct list_head pending_cmds; /* Commands pending completion */ - - atomic_t hsq_credits; - spinlock_t hsq_slock; /* Hardware send queue lock */ - struct sisl_ioarcb *hsq_start; - struct sisl_ioarcb *hsq_end; - struct sisl_ioarcb *hsq_curr; - spinlock_t hrrq_slock; - u64 *hrrq_start; - u64 *hrrq_end; - u64 *hrrq_curr; - bool toggle; - bool hrrq_online; - - s64 room; - - struct irq_poll irqpoll; -} __aligned(cache_line_size()); - -struct afu { - struct hwq hwqs[CXLFLASH_MAX_HWQS]; - int (*send_cmd)(struct afu *afu, struct afu_cmd *cmd); - int (*context_reset)(struct hwq *hwq); - - /* AFU HW */ - struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */ - - atomic_t cmds_active; /* Number of currently active AFU commands */ - struct mutex sync_active; /* Mutex to serialize AFU commands */ - u64 hb; - u32 internal_lun; /* User-desired LUN mode for this AFU */ - - u32 num_hwqs; /* Number of hardware queues */ - u32 desired_hwqs; /* Desired h/w queues, effective on AFU reset */ - enum cxlflash_hwq_mode hwq_mode; /* Steering mode for h/w queues */ - u32 hwq_rr_count; /* Count to distribute traffic for roundrobin */ - - char version[16]; - u64 interface_version; - - u32 irqpoll_weight; - struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */ -}; - -static inline struct hwq *get_hwq(struct afu *afu, u32 index) -{ - WARN_ON(index >= CXLFLASH_MAX_HWQS); - - return &afu->hwqs[index]; -} - -static inline bool afu_is_irqpoll_enabled(struct afu *afu) -{ - return !!afu->irqpoll_weight; -} - -static inline bool afu_has_cap(struct afu *afu, u64 cap) -{ - u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT; - - return afu_cap & cap; -} - -static inline bool afu_is_ocxl_lisn(struct afu *afu) -{ - return afu_has_cap(afu, SISL_INTVER_CAP_OCXL_LISN); -} - -static inline bool afu_is_afu_debug(struct afu *afu) -{ - return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG); -} - -static inline bool afu_is_lun_provision(struct afu *afu) -{ - return afu_has_cap(afu, SISL_INTVER_CAP_LUN_PROVISION); -} - -static inline bool afu_is_sq_cmd_mode(struct afu *afu) -{ - return afu_has_cap(afu, SISL_INTVER_CAP_SQ_CMD_MODE); -} - -static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu) -{ - return afu_has_cap(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE); -} - -static inline u64 lun_to_lunid(u64 lun) -{ - __be64 lun_id; - - int_to_scsilun(lun, (struct scsi_lun *)&lun_id); - return be64_to_cpu(lun_id); -} - -static inline struct fc_port_bank __iomem *get_fc_port_bank( - struct cxlflash_cfg *cfg, int i) -{ - struct afu *afu = cfg->afu; - - return &afu->afu_map->global.bank[CHAN2PORTBANK(i)]; -} - -static inline __be64 __iomem *get_fc_port_regs(struct cxlflash_cfg *cfg, int i) -{ - struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i); - - return &fcpb->fc_port_regs[CHAN2BANKPORT(i)][0]; -} - -static inline __be64 __iomem *get_fc_port_luns(struct cxlflash_cfg *cfg, int i) -{ - struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i); - - return &fcpb->fc_port_luns[CHAN2BANKPORT(i)][0]; -} - -int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode); -void cxlflash_list_init(void); -void cxlflash_term_global_luns(void); -void cxlflash_free_errpage(void); -int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, - void __user *arg); -void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg); -int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg); -void cxlflash_term_local_luns(struct cxlflash_cfg *cfg); -void cxlflash_restore_luntable(struct cxlflash_cfg *cfg); - -#endif /* ifndef _CXLFLASH_COMMON_H */ diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c deleted file mode 100644 index b814130f3f5c..000000000000 --- a/drivers/scsi/cxlflash/cxl_hw.c +++ /dev/null @@ -1,177 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * CXL Flash Device Driver - * - * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2018 IBM Corporation - */ - -#include <misc/cxl.h> - -#include "backend.h" - -/* - * The following routines map the cxlflash backend operations to existing CXL - * kernel API function and are largely simple shims that provide an abstraction - * for converting generic context and AFU cookies into cxl_context or cxl_afu - * pointers. - */ - -static void __iomem *cxlflash_psa_map(void *ctx_cookie) -{ - return cxl_psa_map(ctx_cookie); -} - -static void cxlflash_psa_unmap(void __iomem *addr) -{ - cxl_psa_unmap(addr); -} - -static int cxlflash_process_element(void *ctx_cookie) -{ - return cxl_process_element(ctx_cookie); -} - -static int cxlflash_map_afu_irq(void *ctx_cookie, int num, - irq_handler_t handler, void *cookie, char *name) -{ - return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name); -} - -static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie) -{ - cxl_unmap_afu_irq(ctx_cookie, num, cookie); -} - -static u64 cxlflash_get_irq_objhndl(void *ctx_cookie, int irq) -{ - /* Dummy fop for cxl */ - return 0; -} - -static int cxlflash_start_context(void *ctx_cookie) -{ - return cxl_start_context(ctx_cookie, 0, NULL); -} - -static int cxlflash_stop_context(void *ctx_cookie) -{ - return cxl_stop_context(ctx_cookie); -} - -static int cxlflash_afu_reset(void *ctx_cookie) -{ - return cxl_afu_reset(ctx_cookie); -} - -static void cxlflash_set_master(void *ctx_cookie) -{ - cxl_set_master(ctx_cookie); -} - -static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie) -{ - return cxl_get_context(dev); -} - -static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie) -{ - return cxl_dev_context_init(dev); -} - -static int cxlflash_release_context(void *ctx_cookie) -{ - return cxl_release_context(ctx_cookie); -} - -static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image) -{ - cxl_perst_reloads_same_image(afu_cookie, image); -} - -static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev, - void *buf, size_t count) -{ - return cxl_read_adapter_vpd(dev, buf, count); -} - -static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num) -{ - return cxl_allocate_afu_irqs(ctx_cookie, num); -} - -static void cxlflash_free_afu_irqs(void *ctx_cookie) -{ - cxl_free_afu_irqs(ctx_cookie); -} - -static void *cxlflash_create_afu(struct pci_dev *dev) -{ - return cxl_pci_to_afu(dev); -} - -static void cxlflash_destroy_afu(void *afu) -{ - /* Dummy fop for cxl */ -} - -static struct file *cxlflash_get_fd(void *ctx_cookie, - struct file_operations *fops, int *fd) -{ - return cxl_get_fd(ctx_cookie, fops, fd); -} - -static void *cxlflash_fops_get_context(struct file *file) -{ - return cxl_fops_get_context(file); -} - -static int cxlflash_start_work(void *ctx_cookie, u64 irqs) -{ - struct cxl_ioctl_start_work work = { 0 }; - - work.num_interrupts = irqs; - work.flags = CXL_START_WORK_NUM_IRQS; - - return cxl_start_work(ctx_cookie, &work); -} - -static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm) -{ - return cxl_fd_mmap(file, vm); -} - -static int cxlflash_fd_release(struct inode *inode, struct file *file) -{ - return cxl_fd_release(inode, file); -} - -const struct cxlflash_backend_ops cxlflash_cxl_ops = { - .module = THIS_MODULE, - .psa_map = cxlflash_psa_map, - .psa_unmap = cxlflash_psa_unmap, - .process_element = cxlflash_process_element, - .map_afu_irq = cxlflash_map_afu_irq, - .unmap_afu_irq = cxlflash_unmap_afu_irq, - .get_irq_objhndl = cxlflash_get_irq_objhndl, - .start_context = cxlflash_start_context, - .stop_context = cxlflash_stop_context, - .afu_reset = cxlflash_afu_reset, - .set_master = cxlflash_set_master, - .get_context = cxlflash_get_context, - .dev_context_init = cxlflash_dev_context_init, - .release_context = cxlflash_release_context, - .perst_reloads_same_image = cxlflash_perst_reloads_same_image, - .read_adapter_vpd = cxlflash_read_adapter_vpd, - .allocate_afu_irqs = cxlflash_allocate_afu_irqs, - .free_afu_irqs = cxlflash_free_afu_irqs, - .create_afu = cxlflash_create_afu, - .destroy_afu = cxlflash_destroy_afu, - .get_fd = cxlflash_get_fd, - .fops_get_context = cxlflash_fops_get_context, - .start_work = cxlflash_start_work, - .fd_mmap = cxlflash_fd_mmap, - .fd_release = cxlflash_fd_release, -}; diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c deleted file mode 100644 index e0e15b44a2e6..000000000000 --- a/drivers/scsi/cxlflash/lunmgt.c +++ /dev/null @@ -1,278 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * CXL Flash Device Driver - * - * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation - * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2015 IBM Corporation - */ - -#include <asm/unaligned.h> - -#include <linux/interrupt.h> -#include <linux/pci.h> - -#include <scsi/scsi_host.h> -#include <uapi/scsi/cxlflash_ioctl.h> - -#include "sislite.h" -#include "common.h" -#include "vlun.h" -#include "superpipe.h" - -/** - * create_local() - allocate and initialize a local LUN information structure - * @sdev: SCSI device associated with LUN. - * @wwid: World Wide Node Name for LUN. - * - * Return: Allocated local llun_info structure on success, NULL on failure - */ -static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct llun_info *lli = NULL; - - lli = kzalloc(sizeof(*lli), GFP_KERNEL); - if (unlikely(!lli)) { - dev_err(dev, "%s: could not allocate lli\n", __func__); - goto out; - } - - lli->sdev = sdev; - lli->host_no = sdev->host->host_no; - lli->in_table = false; - - memcpy(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN); -out: - return lli; -} - -/** - * create_global() - allocate and initialize a global LUN information structure - * @sdev: SCSI device associated with LUN. - * @wwid: World Wide Node Name for LUN. - * - * Return: Allocated global glun_info structure on success, NULL on failure - */ -static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct glun_info *gli = NULL; - - gli = kzalloc(sizeof(*gli), GFP_KERNEL); - if (unlikely(!gli)) { - dev_err(dev, "%s: could not allocate gli\n", __func__); - goto out; - } - - mutex_init(&gli->mutex); - memcpy(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN); -out: - return gli; -} - -/** - * lookup_local() - find a local LUN information structure by WWID - * @cfg: Internal structure associated with the host. - * @wwid: WWID associated with LUN. - * - * Return: Found local lun_info structure on success, NULL on failure - */ -static struct llun_info *lookup_local(struct cxlflash_cfg *cfg, u8 *wwid) -{ - struct llun_info *lli, *temp; - - list_for_each_entry_safe(lli, temp, &cfg->lluns, list) - if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN)) - return lli; - - return NULL; -} - -/** - * lookup_global() - find a global LUN information structure by WWID - * @wwid: WWID associated with LUN. - * - * Return: Found global lun_info structure on success, NULL on failure - */ -static struct glun_info *lookup_global(u8 *wwid) -{ - struct glun_info *gli, *temp; - - list_for_each_entry_safe(gli, temp, &global.gluns, list) - if (!memcmp(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN)) - return gli; - - return NULL; -} - -/** - * find_and_create_lun() - find or create a local LUN information structure - * @sdev: SCSI device associated with LUN. - * @wwid: WWID associated with LUN. - * - * The LUN is kept both in a local list (per adapter) and in a global list - * (across all adapters). Certain attributes of the LUN are local to the - * adapter (such as index, port selection mask, etc.). - * - * The block allocation map is shared across all adapters (i.e. associated - * wih the global list). Since different attributes are associated with - * the per adapter and global entries, allocate two separate structures for each - * LUN (one local, one global). - * - * Keep a pointer back from the local to the global entry. - * - * This routine assumes the caller holds the global mutex. - * - * Return: Found/Allocated local lun_info structure on success, NULL on failure - */ -static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct llun_info *lli = NULL; - struct glun_info *gli = NULL; - - if (unlikely(!wwid)) - goto out; - - lli = lookup_local(cfg, wwid); - if (lli) - goto out; - - lli = create_local(sdev, wwid); - if (unlikely(!lli)) - goto out; - - gli = lookup_global(wwid); - if (gli) { - lli->parent = gli; - list_add(&lli->list, &cfg->lluns); - goto out; - } - - gli = create_global(sdev, wwid); - if (unlikely(!gli)) { - kfree(lli); - lli = NULL; - goto out; - } - - lli->parent = gli; - list_add(&lli->list, &cfg->lluns); - - list_add(&gli->list, &global.gluns); - -out: - dev_dbg(dev, "%s: returning lli=%p, gli=%p\n", __func__, lli, gli); - return lli; -} - -/** - * cxlflash_term_local_luns() - Delete all entries from local LUN list, free. - * @cfg: Internal structure associated with the host. - */ -void cxlflash_term_local_luns(struct cxlflash_cfg *cfg) -{ - struct llun_info *lli, *temp; - - mutex_lock(&global.mutex); - list_for_each_entry_safe(lli, temp, &cfg->lluns, list) { - list_del(&lli->list); - kfree(lli); - } - mutex_unlock(&global.mutex); -} - -/** - * cxlflash_list_init() - initializes the global LUN list - */ -void cxlflash_list_init(void) -{ - INIT_LIST_HEAD(&global.gluns); - mutex_init(&global.mutex); - global.err_page = NULL; -} - -/** - * cxlflash_term_global_luns() - frees resources associated with global LUN list - */ -void cxlflash_term_global_luns(void) -{ - struct glun_info *gli, *temp; - - mutex_lock(&global.mutex); - list_for_each_entry_safe(gli, temp, &global.gluns, list) { - list_del(&gli->list); - cxlflash_ba_terminate(&gli->blka.ba_lun); - kfree(gli); - } - mutex_unlock(&global.mutex); -} - -/** - * cxlflash_manage_lun() - handles LUN management activities - * @sdev: SCSI device associated with LUN. - * @manage: Manage ioctl data structure. - * - * This routine is used to notify the driver about a LUN's WWID and associate - * SCSI devices (sdev) with a global LUN instance. Additionally it serves to - * change a LUN's operating mode: legacy or superpipe. - * - * Return: 0 on success, -errno on failure - */ -int cxlflash_manage_lun(struct scsi_device *sdev, - struct dk_cxlflash_manage_lun *manage) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct llun_info *lli = NULL; - int rc = 0; - u64 flags = manage->hdr.flags; - u32 chan = sdev->channel; - - mutex_lock(&global.mutex); - lli = find_and_create_lun(sdev, manage->wwid); - dev_dbg(dev, "%s: WWID=%016llx%016llx, flags=%016llx lli=%p\n", - __func__, get_unaligned_be64(&manage->wwid[0]), - get_unaligned_be64(&manage->wwid[8]), manage->hdr.flags, lli); - if (unlikely(!lli)) { - rc = -ENOMEM; - goto out; - } - - if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) { - /* - * Update port selection mask based upon channel, store off LUN - * in unpacked, AFU-friendly format, and hang LUN reference in - * the sdev. - */ - lli->port_sel |= CHAN2PORTMASK(chan); - lli->lun_id[chan] = lun_to_lunid(sdev->lun); - sdev->hostdata = lli; - } else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) { - if (lli->parent->mode != MODE_NONE) - rc = -EBUSY; - else { - /* - * Clean up local LUN for this port and reset table - * tracking when no more references exist. - */ - sdev->hostdata = NULL; - lli->port_sel &= ~CHAN2PORTMASK(chan); - if (lli->port_sel == 0U) - lli->in_table = false; - } - } - - dev_dbg(dev, "%s: port_sel=%08x chan=%u lun_id=%016llx\n", - __func__, lli->port_sel, chan, lli->lun_id[chan]); - -out: - mutex_unlock(&global.mutex); - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c deleted file mode 100644 index e8382cc5cf23..000000000000 --- a/drivers/scsi/cxlflash/main.c +++ /dev/null @@ -1,3970 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * CXL Flash Device Driver - * - * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation - * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2015 IBM Corporation - */ - -#include <linux/delay.h> -#include <linux/list.h> -#include <linux/module.h> -#include <linux/pci.h> - -#include <asm/unaligned.h> - -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_host.h> -#include <uapi/scsi/cxlflash_ioctl.h> - -#include "main.h" -#include "sislite.h" -#include "common.h" - -MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); -MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); -MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); -MODULE_LICENSE("GPL"); - -static char *cxlflash_devnode(const struct device *dev, umode_t *mode); -static const struct class cxlflash_class = { - .name = "cxlflash", - .devnode = cxlflash_devnode, -}; - -static u32 cxlflash_major; -static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS); - -/** - * process_cmd_err() - command error handler - * @cmd: AFU command that experienced the error. - * @scp: SCSI command associated with the AFU command in error. - * - * Translates error bits from AFU command to SCSI command results. - */ -static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) -{ - struct afu *afu = cmd->parent; - struct cxlflash_cfg *cfg = afu->parent; - struct device *dev = &cfg->dev->dev; - struct sisl_ioasa *ioasa; - u32 resid; - - ioasa = &(cmd->sa); - - if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { - resid = ioasa->resid; - scsi_set_resid(scp, resid); - dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n", - __func__, cmd, scp, resid); - } - - if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { - dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n", - __func__, cmd, scp); - scp->result = (DID_ERROR << 16); - } - - dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x " - "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__, - ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc, - ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra); - - if (ioasa->rc.scsi_rc) { - /* We have a SCSI status */ - if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { - memcpy(scp->sense_buffer, ioasa->sense_data, - SISL_SENSE_DATA_LEN); - scp->result = ioasa->rc.scsi_rc; - } else - scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); - } - - /* - * We encountered an error. Set scp->result based on nature - * of error. - */ - if (ioasa->rc.fc_rc) { - /* We have an FC status */ - switch (ioasa->rc.fc_rc) { - case SISL_FC_RC_LINKDOWN: - scp->result = (DID_REQUEUE << 16); - break; - case SISL_FC_RC_RESID: - /* This indicates an FCP resid underrun */ - if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { - /* If the SISL_RC_FLAGS_OVERRUN flag was set, - * then we will handle this error else where. - * If not then we must handle it here. - * This is probably an AFU bug. - */ - scp->result = (DID_ERROR << 16); - } - break; - case SISL_FC_RC_RESIDERR: - /* Resid mismatch between adapter and device */ - case SISL_FC_RC_TGTABORT: - case SISL_FC_RC_ABORTOK: - case SISL_FC_RC_ABORTFAIL: - case SISL_FC_RC_NOLOGI: - case SISL_FC_RC_ABORTPEND: - case SISL_FC_RC_WRABORTPEND: - case SISL_FC_RC_NOEXP: - case SISL_FC_RC_INUSE: - scp->result = (DID_ERROR << 16); - break; - } - } - - if (ioasa->rc.afu_rc) { - /* We have an AFU error */ - switch (ioasa->rc.afu_rc) { - case SISL_AFU_RC_NO_CHANNELS: - scp->result = (DID_NO_CONNECT << 16); - break; - case SISL_AFU_RC_DATA_DMA_ERR: - switch (ioasa->afu_extra) { - case SISL_AFU_DMA_ERR_PAGE_IN: - /* Retry */ - scp->result = (DID_IMM_RETRY << 16); - break; - case SISL_AFU_DMA_ERR_INVALID_EA: - default: - scp->result = (DID_ERROR << 16); - } - break; - case SISL_AFU_RC_OUT_OF_DATA_BUFS: - /* Retry */ - scp->result = (DID_ERROR << 16); - break; - default: - scp->result = (DID_ERROR << 16); - } - } -} - -/** - * cmd_complete() - command completion handler - * @cmd: AFU command that has completed. - * - * For SCSI commands this routine prepares and submits commands that have - * either completed or timed out to the SCSI stack. For internal commands - * (TMF or AFU), this routine simply notifies the originator that the - * command has completed. - */ -static void cmd_complete(struct afu_cmd *cmd) -{ - struct scsi_cmnd *scp; - ulong lock_flags; - struct afu *afu = cmd->parent; - struct cxlflash_cfg *cfg = afu->parent; - struct device *dev = &cfg->dev->dev; - struct hwq *hwq = get_hwq(afu, cmd->hwq_index); - - spin_lock_irqsave(&hwq->hsq_slock, lock_flags); - list_del(&cmd->list); - spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); - - if (cmd->scp) { - scp = cmd->scp; - if (unlikely(cmd->sa.ioasc)) - process_cmd_err(cmd, scp); - else - scp->result = (DID_OK << 16); - - dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n", - __func__, scp, scp->result, cmd->sa.ioasc); - scsi_done(scp); - } else if (cmd->cmd_tmf) { - spin_lock_irqsave(&cfg->tmf_slock, lock_flags); - cfg->tmf_active = false; - wake_up_all_locked(&cfg->tmf_waitq); - spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); - } else - complete(&cmd->cevent); -} - -/** - * flush_pending_cmds() - flush all pending commands on this hardware queue - * @hwq: Hardware queue to flush. - * - * The hardware send queue lock associated with this hardware queue must be - * held when calling this routine. - */ -static void flush_pending_cmds(struct hwq *hwq) -{ - struct cxlflash_cfg *cfg = hwq->afu->parent; - struct afu_cmd *cmd, *tmp; - struct scsi_cmnd *scp; - ulong lock_flags; - - list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) { - /* Bypass command when on a doneq, cmd_complete() will handle */ - if (!list_empty(&cmd->queue)) - continue; - - list_del(&cmd->list); - - if (cmd->scp) { - scp = cmd->scp; - scp->result = (DID_IMM_RETRY << 16); - scsi_done(scp); - } else { - cmd->cmd_aborted = true; - - if (cmd->cmd_tmf) { - spin_lock_irqsave(&cfg->tmf_slock, lock_flags); - cfg->tmf_active = false; - wake_up_all_locked(&cfg->tmf_waitq); - spin_unlock_irqrestore(&cfg->tmf_slock, - lock_flags); - } else - complete(&cmd->cevent); - } - } -} - -/** - * context_reset() - reset context via specified register - * @hwq: Hardware queue owning the context to be reset. - * @reset_reg: MMIO register to perform reset. - * - * When the reset is successful, the SISLite specification guarantees that - * the AFU has aborted all currently pending I/O. Accordingly, these commands - * must be flushed. - * - * Return: 0 on success, -errno on failure - */ -static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) -{ - struct cxlflash_cfg *cfg = hwq->afu->parent; - struct device *dev = &cfg->dev->dev; - int rc = -ETIMEDOUT; - int nretry = 0; - u64 val = 0x1; - ulong lock_flags; - - dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq); - - spin_lock_irqsave(&hwq->hsq_slock, lock_flags); - - writeq_be(val, reset_reg); - do { - val = readq_be(reset_reg); - if ((val & 0x1) == 0x0) { - rc = 0; - break; - } - - /* Double delay each time */ - udelay(1 << nretry); - } while (nretry++ < MC_ROOM_RETRY_CNT); - - if (!rc) - flush_pending_cmds(hwq); - - spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); - - dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n", - __func__, rc, val, nretry); - return rc; -} - -/** - * context_reset_ioarrin() - reset context via IOARRIN register - * @hwq: Hardware queue owning the context to be reset. - * - * Return: 0 on success, -errno on failure - */ -static int context_reset_ioarrin(struct hwq *hwq) -{ - return context_reset(hwq, &hwq->host_map->ioarrin); -} - -/** - * context_reset_sq() - reset context via SQ_CONTEXT_RESET register - * @hwq: Hardware queue owning the context to be reset. - * - * Return: 0 on success, -errno on failure - */ -static int context_reset_sq(struct hwq *hwq) -{ - return context_reset(hwq, &hwq->host_map->sq_ctx_reset); -} - -/** - * send_cmd_ioarrin() - sends an AFU command via IOARRIN register - * @afu: AFU associated with the host. - * @cmd: AFU command to send. - * - * Return: - * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure - */ -static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) -{ - struct cxlflash_cfg *cfg = afu->parent; - struct device *dev = &cfg->dev->dev; - struct hwq *hwq = get_hwq(afu, cmd->hwq_index); - int rc = 0; - s64 room; - ulong lock_flags; - - /* - * To avoid the performance penalty of MMIO, spread the update of - * 'room' over multiple commands. - */ - spin_lock_irqsave(&hwq->hsq_slock, lock_flags); - if (--hwq->room < 0) { - room = readq_be(&hwq->host_map->cmd_room); - if (room <= 0) { - dev_dbg_ratelimited(dev, "%s: no cmd_room to send " - "0x%02X, room=0x%016llX\n", - __func__, cmd->rcb.cdb[0], room); - hwq->room = 0; - rc = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - hwq->room = room - 1; - } - - list_add(&cmd->list, &hwq->pending_cmds); - writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin); -out: - spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); - dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", - __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); - return rc; -} - -/** - * send_cmd_sq() - sends an AFU command via SQ ring - * @afu: AFU associated with the host. - * @cmd: AFU command to send. - * - * Return: - * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure - */ -static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) -{ - struct cxlflash_cfg *cfg = afu->parent; - struct device *dev = &cfg->dev->dev; - struct hwq *hwq = get_hwq(afu, cmd->hwq_index); - int rc = 0; - int newval; - ulong lock_flags; - - newval = atomic_dec_if_positive(&hwq->hsq_credits); - if (newval <= 0) { - rc = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - - cmd->rcb.ioasa = &cmd->sa; - - spin_lock_irqsave(&hwq->hsq_slock, lock_flags); - - *hwq->hsq_curr = cmd->rcb; - if (hwq->hsq_curr < hwq->hsq_end) - hwq->hsq_curr++; - else - hwq->hsq_curr = hwq->hsq_start; - - list_add(&cmd->list, &hwq->pending_cmds); - writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail); - - spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); -out: - dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p " - "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len, - cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr, - readq_be(&hwq->host_map->sq_head), - readq_be(&hwq->host_map->sq_tail)); - return rc; -} - -/** - * wait_resp() - polls for a response or timeout to a sent AFU command - * @afu: AFU associated with the host. - * @cmd: AFU command that was sent. - * - * Return: 0 on success, -errno on failure - */ -static int wait_resp(struct afu *afu, struct afu_cmd *cmd) -{ - struct cxlflash_cfg *cfg = afu->parent; - struct device *dev = &cfg->dev->dev; - int rc = 0; - ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); - - timeout = wait_for_completion_timeout(&cmd->cevent, timeout); - if (!timeout) - rc = -ETIMEDOUT; - - if (cmd->cmd_aborted) - rc = -EAGAIN; - - if (unlikely(cmd->sa.ioasc != 0)) { - dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n", - __func__, cmd->rcb.cdb[0], cmd->sa.ioasc); - rc = -EIO; - } - - return rc; -} - -/** - * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command - * @host: SCSI host associated with device. - * @scp: SCSI command to send. - * @afu: SCSI command to send. - * - * Hashes a command based upon the hardware queue mode. - * - * Return: Trusted index of target hardware queue - */ -static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp, - struct afu *afu) -{ - u32 tag; - u32 hwq = 0; - - if (afu->num_hwqs == 1) - return 0; - - switch (afu->hwq_mode) { - case HWQ_MODE_RR: - hwq = afu->hwq_rr_count++ % afu->num_hwqs; - break; - case HWQ_MODE_TAG: - tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp)); - hwq = blk_mq_unique_tag_to_hwq(tag); - break; - case HWQ_MODE_CPU: - hwq = smp_processor_id() % afu->num_hwqs; - break; - default: - WARN_ON_ONCE(1); - } - - return hwq; -} - -/** - * send_tmf() - sends a Task Management Function (TMF) - * @cfg: Internal structure associated with the host. - * @sdev: SCSI device destined for TMF. - * @tmfcmd: TMF command to send. - * - * Return: - * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure - */ -static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev, - u64 tmfcmd) -{ - struct afu *afu = cfg->afu; - struct afu_cmd *cmd = NULL; - struct device *dev = &cfg->dev->dev; - struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); - bool needs_deletion = false; - char *buf = NULL; - ulong lock_flags; - int rc = 0; - ulong to; - - buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); - if (unlikely(!buf)) { - dev_err(dev, "%s: no memory for command\n", __func__); - rc = -ENOMEM; - goto out; - } - - cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); - INIT_LIST_HEAD(&cmd->queue); - - /* When Task Management Function is active do not send another */ - spin_lock_irqsave(&cfg->tmf_slock, lock_flags); - if (cfg->tmf_active) - wait_event_interruptible_lock_irq(cfg->tmf_waitq, - !cfg->tmf_active, - cfg->tmf_slock); - cfg->tmf_active = true; - spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); - - cmd->parent = afu; - cmd->cmd_tmf = true; - cmd->hwq_index = hwq->index; - - cmd->rcb.ctx_id = hwq->ctx_hndl; - cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; - cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel); - cmd->rcb.lun_id = lun_to_lunid(sdev->lun); - cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | - SISL_REQ_FLAGS_SUP_UNDERRUN | - SISL_REQ_FLAGS_TMF_CMD); - memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); - - rc = afu->send_cmd(afu, cmd); - if (unlikely(rc)) { - spin_lock_irqsave(&cfg->tmf_slock, lock_flags); - cfg->tmf_active = false; - spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); - goto out; - } - - spin_lock_irqsave(&cfg->tmf_slock, lock_flags); - to = msecs_to_jiffies(5000); - to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, - !cfg->tmf_active, - cfg->tmf_slock, - to); - if (!to) { - dev_err(dev, "%s: TMF timed out\n", __func__); - rc = -ETIMEDOUT; - needs_deletion = true; - } else if (cmd->cmd_aborted) { - dev_err(dev, "%s: TMF aborted\n", __func__); - rc = -EAGAIN; - } else if (cmd->sa.ioasc) { - dev_err(dev, "%s: TMF failed ioasc=%08x\n", - __func__, cmd->sa.ioasc); - rc = -EIO; - } - cfg->tmf_active = false; - spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); - - if (needs_deletion) { - spin_lock_irqsave(&hwq->hsq_slock, lock_flags); - list_del(&cmd->list); - spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); - } -out: - kfree(buf); - return rc; -} - -/** - * cxlflash_driver_info() - information handler for this host driver - * @host: SCSI host associated with device. - * - * Return: A string describing the device. - */ -static const char *cxlflash_driver_info(struct Scsi_Host *host) -{ - return CXLFLASH_ADAPTER_NAME; -} - -/** - * cxlflash_queuecommand() - sends a mid-layer request - * @host: SCSI host associated with device. - * @scp: SCSI command to send. - * - * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure - */ -static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) -{ - struct cxlflash_cfg *cfg = shost_priv(host); - struct afu *afu = cfg->afu; - struct device *dev = &cfg->dev->dev; - struct afu_cmd *cmd = sc_to_afuci(scp); - struct scatterlist *sg = scsi_sglist(scp); - int hwq_index = cmd_to_target_hwq(host, scp, afu); - struct hwq *hwq = get_hwq(afu, hwq_index); - u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN; - ulong lock_flags; - int rc = 0; - - dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " - "cdb=(%08x-%08x-%08x-%08x)\n", - __func__, scp, host->host_no, scp->device->channel, - scp->device->id, scp->device->lun, - get_unaligned_be32(&((u32 *)scp->cmnd)[0]), - get_unaligned_be32(&((u32 *)scp->cmnd)[1]), - get_unaligned_be32(&((u32 *)scp->cmnd)[2]), - get_unaligned_be32(&((u32 *)scp->cmnd)[3])); - - /* - * If a Task Management Function is active, wait for it to complete - * before continuing with regular commands. - */ - spin_lock_irqsave(&cfg->tmf_slock, lock_flags); - if (cfg->tmf_active) { - spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); - rc = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); - - switch (cfg->state) { - case STATE_PROBING: - case STATE_PROBED: - case STATE_RESET: - dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__); - rc = SCSI_MLQUEUE_HOST_BUSY; - goto out; - case STATE_FAILTERM: - dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__); - scp->result = (DID_NO_CONNECT << 16); - scsi_done(scp); - rc = 0; - goto out; - default: - atomic_inc(&afu->cmds_active); - break; - } - - if (likely(sg)) { - cmd->rcb.data_len = sg->length; - cmd->rcb.data_ea = (uintptr_t)sg_virt(sg); - } - - cmd->scp = scp; - cmd->parent = afu; - cmd->hwq_index = hwq_index; - - cmd->sa.ioasc = 0; - cmd->rcb.ctx_id = hwq->ctx_hndl; - cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; - cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); - cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); - - if (scp->sc_data_direction == DMA_TO_DEVICE) - req_flags |= SISL_REQ_FLAGS_HOST_WRITE; - - cmd->rcb.req_flags = req_flags; - memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); - - rc = afu->send_cmd(afu, cmd); - atomic_dec(&afu->cmds_active); -out: - return rc; -} - -/** - * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe - * @cfg: Internal structure associated with the host. - */ -static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) -{ - struct pci_dev *pdev = cfg->dev; - - if (pci_channel_offline(pdev)) - wait_event_timeout(cfg->reset_waitq, - !pci_channel_offline(pdev), - CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); -} - -/** - * free_mem() - free memory associated with the AFU - * @cfg: Internal structure associated with the host. - */ -static void free_mem(struct cxlflash_cfg *cfg) -{ - struct afu *afu = cfg->afu; - - if (cfg->afu) { - free_pages((ulong)afu, get_order(sizeof(struct afu))); - cfg->afu = NULL; - } -} - -/** - * cxlflash_reset_sync() - synchronizing point for asynchronous resets - * @cfg: Internal structure associated with the host. - */ -static void cxlflash_reset_sync(struct cxlflash_cfg *cfg) -{ - if (cfg->async_reset_cookie == 0) - return; - - /* Wait until all async calls prior to this cookie have completed */ - async_synchronize_cookie(cfg->async_reset_cookie + 1); - cfg->async_reset_cookie = 0; -} - -/** - * stop_afu() - stops the AFU command timers and unmaps the MMIO space - * @cfg: Internal structure associated with the host. - * - * Safe to call with AFU in a partially allocated/initialized state. - * - * Cancels scheduled worker threads, waits for any active internal AFU - * commands to timeout, disables IRQ polling and then unmaps the MMIO space. - */ -static void stop_afu(struct cxlflash_cfg *cfg) -{ - struct afu *afu = cfg->afu; - struct hwq *hwq; - int i; - - cancel_work_sync(&cfg->work_q); - if (!current_is_async()) - cxlflash_reset_sync(cfg); - - if (likely(afu)) { - while (atomic_read(&afu->cmds_active)) - ssleep(1); - - if (afu_is_irqpoll_enabled(afu)) { - for (i = 0; i < afu->num_hwqs; i++) { - hwq = get_hwq(afu, i); - - irq_poll_disable(&hwq->irqpoll); - } - } - - if (likely(afu->afu_map)) { - cfg->ops->psa_unmap(afu->afu_map); - afu->afu_map = NULL; - } - } -} - -/** - * term_intr() - disables all AFU interrupts - * @cfg: Internal structure associated with the host. - * @level: Depth of allocation, where to begin waterfall tear down. - * @index: Index of the hardware queue. - * - * Safe to call with AFU/MC in partially allocated/initialized state. - */ -static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level, - u32 index) -{ - struct afu *afu = cfg->afu; - struct device *dev = &cfg->dev->dev; - struct hwq *hwq; - - if (!afu) { - dev_err(dev, "%s: returning with NULL afu\n", __func__); - return; - } - - hwq = get_hwq(afu, index); - - if (!hwq->ctx_cookie) { - dev_err(dev, "%s: returning with NULL MC\n", __func__); - return; - } - - switch (level) { - case UNMAP_THREE: - /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ - if (index == PRIMARY_HWQ) - cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq); - fallthrough; - case UNMAP_TWO: - cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq); - fallthrough; - case UNMAP_ONE: - cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq); - fallthrough; - case FREE_IRQ: - cfg->ops->free_afu_irqs(hwq->ctx_cookie); - fallthrough; - case UNDO_NOOP: - /* No action required */ - break; - } -} - -/** - * term_mc() - terminates the master context - * @cfg: Internal structure associated with the host. - * @index: Index of the hardware queue. - * - * Safe to call with AFU/MC in partially allocated/initialized state. - */ -static void term_mc(struct cxlflash_cfg *cfg, u32 index) -{ - struct afu *afu = cfg->afu; - struct device *dev = &cfg->dev->dev; - struct hwq *hwq; - ulong lock_flags; - - if (!afu) { - dev_err(dev, "%s: returning with NULL afu\n", __func__); - return; - } - - hwq = get_hwq(afu, index); - - if (!hwq->ctx_cookie) { - dev_err(dev, "%s: returning with NULL MC\n", __func__); - return; - } - - WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie)); - if (index != PRIMARY_HWQ) - WARN_ON(cfg->ops->release_context(hwq->ctx_cookie)); - hwq->ctx_cookie = NULL; - - spin_lock_irqsave(&hwq->hrrq_slock, lock_flags); - hwq->hrrq_online = false; - spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags); - - spin_lock_irqsave(&hwq->hsq_slock, lock_flags); - flush_pending_cmds(hwq); - spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); -} - -/** - * term_afu() - terminates the AFU - * @cfg: Internal structure associated with the host. - * - * Safe to call with AFU/MC in partially allocated/initialized state. - */ -static void term_afu(struct cxlflash_cfg *cfg) -{ - struct device *dev = &cfg->dev->dev; - int k; - - /* - * Tear down is carefully orchestrated to ensure - * no interrupts can come in when the problem state - * area is unmapped. - * - * 1) Disable all AFU interrupts for each master - * 2) Unmap the problem state area - * 3) Stop each master context - */ - for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) - term_intr(cfg, UNMAP_THREE, k); - - stop_afu(cfg); - - for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) - term_mc(cfg, k); - - dev_dbg(dev, "%s: returning\n", __func__); -} - -/** - * notify_shutdown() - notifies device of pending shutdown - * @cfg: Internal structure associated with the host. - * @wait: Whether to wait for shutdown processing to complete. - * - * This function will notify the AFU that the adapter is being shutdown - * and will wait for shutdown processing to complete if wait is true. - * This notification should flush pending I/Os to the device and halt - * further I/Os until the next AFU reset is issued and device restarted. - */ -static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) -{ - struct afu *afu = cfg->afu; - struct device *dev = &cfg->dev->dev; - struct dev_dependent_vals *ddv; - __be64 __iomem *fc_port_regs; - u64 reg, status; - int i, retry_cnt = 0; - - ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; - if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) - return; - - if (!afu || !afu->afu_map) { - dev_dbg(dev, "%s: Problem state area not mapped\n", __func__); - return; - } - - /* Notify AFU */ - for (i = 0; i < cfg->num_fc_ports; i++) { - fc_port_regs = get_fc_port_regs(cfg, i); - - reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); - reg |= SISL_FC_SHUTDOWN_NORMAL; - writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); - } - - if (!wait) - return; - - /* Wait up to 1.5 seconds for shutdown processing to complete */ - for (i = 0; i < cfg->num_fc_ports; i++) { - fc_port_regs = get_fc_port_regs(cfg, i); - retry_cnt = 0; - - while (true) { - status = readq_be(&fc_port_regs[FC_STATUS / 8]); - if (status & SISL_STATUS_SHUTDOWN_COMPLETE) - break; - if (++retry_cnt >= MC_RETRY_CNT) { - dev_dbg(dev, "%s: port %d shutdown processing " - "not yet completed\n", __func__, i); - break; - } - msleep(100 * retry_cnt); - } - } -} - -/** - * cxlflash_get_minor() - gets the first available minor number - * - * Return: Unique minor number that can be used to create the character device. - */ -static int cxlflash_get_minor(void) -{ - int minor; - long bit; - - bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS); - if (bit >= CXLFLASH_MAX_ADAPTERS) - return -1; - - minor = bit & MINORMASK; - set_bit(minor, cxlflash_minor); - return minor; -} - -/** - * cxlflash_put_minor() - releases the minor number - * @minor: Minor number that is no longer needed. - */ -static void cxlflash_put_minor(int minor) -{ - clear_bit(minor, cxlflash_minor); -} - -/** - * cxlflash_release_chrdev() - release the character device for the host - * @cfg: Internal structure associated with the host. - */ -static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg) -{ - device_unregister(cfg->chardev); - cfg->chardev = NULL; - cdev_del(&cfg->cdev); - cxlflash_put_minor(MINOR(cfg->cdev.dev)); -} - -/** - * cxlflash_remove() - PCI entry point to tear down host - * @pdev: PCI device associated with the host. - * - * Safe to use as a cleanup in partially allocated/initialized state. Note that - * the reset_waitq is flushed as part of the stop/termination of user contexts. - */ -static void cxlflash_remove(struct pci_dev *pdev) -{ - struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); - struct device *dev = &pdev->dev; - ulong lock_flags; - - if (!pci_is_enabled(pdev)) { - dev_dbg(dev, "%s: Device is disabled\n", __func__); - return; - } - - /* Yield to running recovery threads before continuing with remove */ - wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && - cfg->state != STATE_PROBING); - spin_lock_irqsave(&cfg->tmf_slock, lock_flags); - if (cfg->tmf_active) - wait_event_interruptible_lock_irq(cfg->tmf_waitq, - !cfg->tmf_active, - cfg->tmf_slock); - spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); - - /* Notify AFU and wait for shutdown processing to complete */ - notify_shutdown(cfg, true); - - cfg->state = STATE_FAILTERM; - cxlflash_stop_term_user_contexts(cfg); - - switch (cfg->init_state) { - case INIT_STATE_CDEV: - cxlflash_release_chrdev(cfg); - fallthrough; - case INIT_STATE_SCSI: - cxlflash_term_local_luns(cfg); - scsi_remove_host(cfg->host); - fallthrough; - case INIT_STATE_AFU: - term_afu(cfg); - fallthrough; - case INIT_STATE_PCI: - cfg->ops->destroy_afu(cfg->afu_cookie); - pci_disable_device(pdev); - fallthrough; - case INIT_STATE_NONE: - free_mem(cfg); - scsi_host_put(cfg->host); - break; - } - - dev_dbg(dev, "%s: returning\n", __func__); -} - -/** - * alloc_mem() - allocates the AFU and its command pool - * @cfg: Internal structure associated with the host. - * - * A partially allocated state remains on failure. - * - * Return: - * 0 on success - * -ENOMEM on failure to allocate memory - */ -static int alloc_mem(struct cxlflash_cfg *cfg) -{ - int rc = 0; - struct device *dev = &cfg->dev->dev; - - /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */ - cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(sizeof(struct afu))); - if (unlikely(!cfg->afu)) { - dev_err(dev, "%s: cannot get %d free pages\n", - __func__, get_order(sizeof(struct afu))); - rc = -ENOMEM; - goto out; - } - cfg->afu->parent = cfg; - cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS; - cfg->afu->afu_map = NULL; -out: - return rc; -} - -/** - * init_pci() - initializes the host as a PCI device - * @cfg: Internal structure associated with the host. - * - * Return: 0 on success, -errno on failure - */ -static int init_pci(struct cxlflash_cfg *cfg) -{ - struct pci_dev *pdev = cfg->dev; - struct device *dev = &cfg->dev->dev; - int rc = 0; - - rc = pci_enable_device(pdev); - if (rc || pci_channel_offline(pdev)) { - if (pci_channel_offline(pdev)) { - cxlflash_wait_for_pci_err_recovery(cfg); - rc = pci_enable_device(pdev); - } - - if (rc) { - dev_err(dev, "%s: Cannot enable adapter\n", __func__); - cxlflash_wait_for_pci_err_recovery(cfg); - goto out; - } - } - -out: - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * init_scsi() - adds the host to the SCSI stack and kicks off host scan - * @cfg: Internal structure associated with the host. - * - * Return: 0 on success, -errno on failure - */ -static int init_scsi(struct cxlflash_cfg *cfg) -{ - struct pci_dev *pdev = cfg->dev; - struct device *dev = &cfg->dev->dev; - int rc = 0; - - rc = scsi_add_host(cfg->host, &pdev->dev); - if (rc) { - dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc); - goto out; - } - - scsi_scan_host(cfg->host); - -out: - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * set_port_online() - transitions the specified host FC port to online state - * @fc_regs: Top of MMIO region defined for specified port. - * - * The provided MMIO region must be mapped prior to call. Online state means - * that the FC link layer has synced, completed the handshaking process, and - * is ready for login to start. - */ -static void set_port_online(__be64 __iomem *fc_regs) -{ - u64 cmdcfg; - - cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); - cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ - cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ - writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); -} - -/** - * set_port_offline() - transitions the specified host FC port to offline state - * @fc_regs: Top of MMIO region defined for specified port. - * - * The provided MMIO region must be mapped prior to call. - */ -static void set_port_offline(__be64 __iomem *fc_regs) -{ - u64 cmdcfg; - - cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); - cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ - cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ - writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); -} - -/** - * wait_port_online() - waits for the specified host FC port come online - * @fc_regs: Top of MMIO region defined for specified port. - * @delay_us: Number of microseconds to delay between reading port status. - * @nretry: Number of cycles to retry reading port status. - * - * The provided MMIO region must be mapped prior to call. This will timeout - * when the cable is not plugged in. - * - * Return: - * TRUE (1) when the specified port is online - * FALSE (0) when the specified port fails to come online after timeout - */ -static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) -{ - u64 status; - - WARN_ON(delay_us < 1000); - - do { - msleep(delay_us / 1000); - status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); - if (status == U64_MAX) - nretry /= 2; - } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && - nretry--); - - return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); -} - -/** - * wait_port_offline() - waits for the specified host FC port go offline - * @fc_regs: Top of MMIO region defined for specified port. - * @delay_us: Number of microseconds to delay between reading port status. - * @nretry: Number of cycles to retry reading port status. - * - * The provided MMIO region must be mapped prior to call. - * - * Return: - * TRUE (1) when the specified port is offline - * FALSE (0) when the specified port fails to go offline after timeout - */ -static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) -{ - u64 status; - - WARN_ON(delay_us < 1000); - - do { - msleep(delay_us / 1000); - status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); - if (status == U64_MAX) - nretry /= 2; - } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && - nretry--); - - return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); -} - -/** - * afu_set_wwpn() - configures the WWPN for the specified host FC port - * @afu: AFU associated with the host that owns the specified FC port. - * @port: Port number being configured. - * @fc_regs: Top of MMIO region defined for specified port. - * @wwpn: The world-wide-port-number previously discovered for port. - * - * The provided MMIO region must be mapped prior to call. As part of the - * sequence to configure the WWPN, the port is toggled offline and then back - * online. This toggling action can cause this routine to delay up to a few - * seconds. When configured to use the internal LUN feature of the AFU, a - * failure to come online is overridden. - */ -static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, - u64 wwpn) -{ - struct cxlflash_cfg *cfg = afu->parent; - struct device *dev = &cfg->dev->dev; - - set_port_offline(fc_regs); - if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, - FC_PORT_STATUS_RETRY_CNT)) { - dev_dbg(dev, "%s: wait on port %d to go offline timed out\n", - __func__, port); - } - - writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); - - set_port_online(fc_regs); - if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, - FC_PORT_STATUS_RETRY_CNT)) { - dev_dbg(dev, "%s: wait on port %d to go online timed out\n", - __func__, port); - } -} - -/** - * afu_link_reset() - resets the specified host FC port - * @afu: AFU associated with the host that owns the specified FC port. - * @port: Port number being configured. - * @fc_regs: Top of MMIO region defined for specified port. - * - * The provided MMIO region must be mapped prior to call. The sequence to - * reset the port involves toggling it offline and then back online. This - * action can cause this routine to delay up to a few seconds. An effort - * is made to maintain link with the device by switching to host to use - * the alternate port exclusively while the reset takes place. - * failure to come online is overridden. - */ -static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) -{ - struct cxlflash_cfg *cfg = afu->parent; - struct device *dev = &cfg->dev->dev; - u64 port_sel; - - /* first switch the AFU to the other links, if any */ - port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); - port_sel &= ~(1ULL << port); - writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); - cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); - - set_port_offline(fc_regs); - if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, - FC_PORT_STATUS_RETRY_CNT)) - dev_err(dev, "%s: wait on port %d to go offline timed out\n", - __func__, port); - - set_port_online(fc_regs); - if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, - FC_PORT_STATUS_RETRY_CNT)) - dev_err(dev, "%s: wait on port %d to go online timed out\n", - __func__, port); - - /* switch back to include this port */ - port_sel |= (1ULL << port); - writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); - cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); - - dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel); -} - -/** - * afu_err_intr_init() - clears and initializes the AFU for error interrupts - * @afu: AFU associated with the host. - */ -static void afu_err_intr_init(struct afu *afu) -{ - struct cxlflash_cfg *cfg = afu->parent; - __be64 __iomem *fc_port_regs; - int i; - struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); - u64 reg; - - /* global async interrupts: AFU clears afu_ctrl on context exit - * if async interrupts were sent to that context. This prevents - * the AFU form sending further async interrupts when - * there is - * nobody to receive them. - */ - - /* mask all */ - writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); - /* set LISN# to send and point to primary master context */ - reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); - - if (afu->internal_lun) - reg |= 1; /* Bit 63 indicates local lun */ - writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); - /* clear all */ - writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); - /* unmask bits that are of interest */ - /* note: afu can send an interrupt after this step */ - writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); - /* clear again in case a bit came on after previous clear but before */ - /* unmask */ - writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); - - /* Clear/Set internal lun bits */ - fc_port_regs = get_fc_port_regs(cfg, 0); - reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); - reg &= SISL_FC_INTERNAL_MASK; - if (afu->internal_lun) - reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); - writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); - - /* now clear FC errors */ - for (i = 0; i < cfg->num_fc_ports; i++) { - fc_port_regs = get_fc_port_regs(cfg, i); - - writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]); - writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); - } - - /* sync interrupts for master's IOARRIN write */ - /* note that unlike asyncs, there can be no pending sync interrupts */ - /* at this time (this is a fresh context and master has not written */ - /* IOARRIN yet), so there is nothing to clear. */ - - /* set LISN#, it is always sent to the context that wrote IOARRIN */ - for (i = 0; i < afu->num_hwqs; i++) { - hwq = get_hwq(afu, i); - - reg = readq_be(&hwq->host_map->ctx_ctrl); - WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0); - reg |= SISL_MSI_SYNC_ERROR; - writeq_be(reg, &hwq->host_map->ctx_ctrl); - writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask); - } -} - -/** - * cxlflash_sync_err_irq() - interrupt handler for synchronous errors - * @irq: Interrupt number. - * @data: Private data provided at interrupt registration, the AFU. - * - * Return: Always return IRQ_HANDLED. - */ -static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) -{ - struct hwq *hwq = (struct hwq *)data; - struct cxlflash_cfg *cfg = hwq->afu->parent; - struct device *dev = &cfg->dev->dev; - u64 reg; - u64 reg_unmasked; - - reg = readq_be(&hwq->host_map->intr_status); - reg_unmasked = (reg & SISL_ISTATUS_UNMASK); - - if (reg_unmasked == 0UL) { - dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n", - __func__, reg); - goto cxlflash_sync_err_irq_exit; - } - - dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n", - __func__, reg); - - writeq_be(reg_unmasked, &hwq->host_map->intr_clear); - -cxlflash_sync_err_irq_exit: - return IRQ_HANDLED; -} - -/** - * process_hrrq() - process the read-response queue - * @hwq: HWQ associated with the host. - * @doneq: Queue of commands harvested from the RRQ. - * @budget: Threshold of RRQ entries to process. - * - * This routine must be called holding the disabled RRQ spin lock. - * - * Return: The number of entries processed. - */ -static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget) -{ - struct afu *afu = hwq->afu; - struct afu_cmd *cmd; - struct sisl_ioasa *ioasa; - struct sisl_ioarcb *ioarcb; - bool toggle = hwq->toggle; - int num_hrrq = 0; - u64 entry, - *hrrq_start = hwq->hrrq_start, - *hrrq_end = hwq->hrrq_end, - *hrrq_curr = hwq->hrrq_curr; - - /* Process ready RRQ entries up to the specified budget (if any) */ - while (true) { - entry = *hrrq_curr; - - if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) - break; - - entry &= ~SISL_RESP_HANDLE_T_BIT; - - if (afu_is_sq_cmd_mode(afu)) { - ioasa = (struct sisl_ioasa *)entry; - cmd = container_of(ioasa, struct afu_cmd, sa); - } else { - ioarcb = (struct sisl_ioarcb *)entry; - cmd = container_of(ioarcb, struct afu_cmd, rcb); - } - - list_add_tail(&cmd->queue, doneq); - - /* Advance to next entry or wrap and flip the toggle bit */ - if (hrrq_curr < hrrq_end) - hrrq_curr++; - else { - hrrq_curr = hrrq_start; - toggle ^= SISL_RESP_HANDLE_T_BIT; - } - - atomic_inc(&hwq->hsq_credits); - num_hrrq++; - - if (budget > 0 && num_hrrq >= budget) - break; - } - - hwq->hrrq_curr = hrrq_curr; - hwq->toggle = toggle; - - return num_hrrq; -} - -/** - * process_cmd_doneq() - process a queue of harvested RRQ commands - * @doneq: Queue of completed commands. - * - * Note that upon return the queue can no longer be trusted. - */ -static void process_cmd_doneq(struct list_head *doneq) -{ - struct afu_cmd *cmd, *tmp; - - WARN_ON(list_empty(doneq)); - - list_for_each_entry_safe(cmd, tmp, doneq, queue) - cmd_complete(cmd); -} - -/** - * cxlflash_irqpoll() - process a queue of harvested RRQ commands - * @irqpoll: IRQ poll structure associated with queue to poll. - * @budget: Threshold of RRQ entries to process per poll. - * - * Return: The number of entries processed. - */ -static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget) -{ - struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll); - unsigned long hrrq_flags; - LIST_HEAD(doneq); - int num_entries = 0; - - spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); - - num_entries = process_hrrq(hwq, &doneq, budget); - if (num_entries < budget) - irq_poll_complete(irqpoll); - - spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); - - process_cmd_doneq(&doneq); - return num_entries; -} - -/** - * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) - * @irq: Interrupt number. - * @data: Private data provided at interrupt registration, the AFU. - * - * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found. - */ -static irqreturn_t cxlflash_rrq_irq(int irq, void *data) -{ - struct hwq *hwq = (struct hwq *)data; - struct afu *afu = hwq->afu; - unsigned long hrrq_flags; - LIST_HEAD(doneq); - int num_entries = 0; - - spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); - - /* Silently drop spurious interrupts when queue is not online */ - if (!hwq->hrrq_online) { - spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); - return IRQ_HANDLED; - } - - if (afu_is_irqpoll_enabled(afu)) { - irq_poll_sched(&hwq->irqpoll); - spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); - return IRQ_HANDLED; - } - - num_entries = process_hrrq(hwq, &doneq, -1); - spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); - - if (num_entries == 0) - return IRQ_NONE; - - process_cmd_doneq(&doneq); - return IRQ_HANDLED; -} - -/* - * Asynchronous interrupt information table - * - * NOTE: - * - Order matters here as this array is indexed by bit position. - * - * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro - * as complex and complains due to a lack of parentheses/braces. - */ -#define ASTATUS_FC(_a, _b, _c, _d) \ - { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) } - -#define BUILD_SISL_ASTATUS_FC_PORT(_a) \ - ASTATUS_FC(_a, LINK_UP, "link up", 0), \ - ASTATUS_FC(_a, LINK_DN, "link down", 0), \ - ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \ - ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \ - ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \ - ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \ - ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \ - ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET) - -static const struct asyc_intr_info ainfo[] = { - BUILD_SISL_ASTATUS_FC_PORT(1), - BUILD_SISL_ASTATUS_FC_PORT(0), - BUILD_SISL_ASTATUS_FC_PORT(3), - BUILD_SISL_ASTATUS_FC_PORT(2) -}; - -/** - * cxlflash_async_err_irq() - interrupt handler for asynchronous errors - * @irq: Interrupt number. - * @data: Private data provided at interrupt registration, the AFU. - * - * Return: Always return IRQ_HANDLED. - */ -static irqreturn_t cxlflash_async_err_irq(int irq, void *data) -{ - struct hwq *hwq = (struct hwq *)data; - struct afu *afu = hwq->afu; - struct cxlflash_cfg *cfg = afu->parent; - struct device *dev = &cfg->dev->dev; - const struct asyc_intr_info *info; - struct sisl_global_map __iomem *global = &afu->afu_map->global; - __be64 __iomem *fc_port_regs; - u64 reg_unmasked; - u64 reg; - u64 bit; - u8 port; - - reg = readq_be(&global->regs.aintr_status); - reg_unmasked = (reg & SISL_ASTATUS_UNMASK); - - if (unlikely(reg_unmasked == 0)) { - dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n", - __func__, reg); - goto out; - } - - /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ - writeq_be(reg_unmasked, &global->regs.aintr_clear); - - /* Check each bit that is on */ - for_each_set_bit(bit, (ulong *)®_unmasked, BITS_PER_LONG) { - if (unlikely(bit >= ARRAY_SIZE(ainfo))) { - WARN_ON_ONCE(1); - continue; - } - - info = &ainfo[bit]; - if (unlikely(info->status != 1ULL << bit)) { - WARN_ON_ONCE(1); - continue; - } - - port = info->port; - fc_port_regs = get_fc_port_regs(cfg, port); - - dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n", - __func__, port, info->desc, - readq_be(&fc_port_regs[FC_STATUS / 8])); - - /* - * Do link reset first, some OTHER errors will set FC_ERROR - * again if cleared before or w/o a reset - */ - if (info->action & LINK_RESET) { - dev_err(dev, "%s: FC Port %d: resetting link\n", - __func__, port); - cfg->lr_state = LINK_RESET_REQUIRED; - cfg->lr_port = port; - schedule_work(&cfg->work_q); - } - - if (info->action & CLR_FC_ERROR) { - reg = readq_be(&fc_port_regs[FC_ERROR / 8]); - - /* - * Since all errors are unmasked, FC_ERROR and FC_ERRCAP - * should be the same and tracing one is sufficient. - */ - - dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n", - __func__, port, reg); - - writeq_be(reg, &fc_port_regs[FC_ERROR / 8]); - writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); - } - - if (info->action & SCAN_HOST) { - atomic_inc(&cfg->scan_host_needed); - schedule_work(&cfg->work_q); - } - } - -out: - return IRQ_HANDLED; -} - -/** - * read_vpd() - obtains the WWPNs from VPD - * @cfg: Internal structure associated with the host. - * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs - * - * Return: 0 on success, -errno on failure - */ -static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) -{ - struct device *dev = &cfg->dev->dev; - struct pci_dev *pdev = cfg->dev; - int i, k, rc = 0; - unsigned int kw_size; - ssize_t vpd_size; - char vpd_data[CXLFLASH_VPD_LEN]; - char tmp_buf[WWPN_BUF_LEN] = { 0 }; - const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *) - cfg->dev_id->driver_data; - const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED; - const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" }; - - /* Get the VPD data from the device */ - vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); - if (unlikely(vpd_size <= 0)) { - dev_err(dev, "%s: Unable to read VPD (size = %ld)\n", - __func__, vpd_size); - rc = -ENODEV; - goto out; - } - - /* - * Find the offset of the WWPN tag within the read only - * VPD data and validate the found field (partials are - * no good to us). Convert the ASCII data to an integer - * value. Note that we must copy to a temporary buffer - * because the conversion service requires that the ASCII - * string be terminated. - * - * Allow for WWPN not being found for all devices, setting - * the returned WWPN to zero when not found. Notify with a - * log error for cards that should have had WWPN keywords - * in the VPD - cards requiring WWPN will not have their - * ports programmed and operate in an undefined state. - */ - for (k = 0; k < cfg->num_fc_ports; k++) { - i = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, - wwpn_vpd_tags[k], &kw_size); - if (i == -ENOENT) { - if (wwpn_vpd_required) - dev_err(dev, "%s: Port %d WWPN not found\n", - __func__, k); - wwpn[k] = 0ULL; - continue; - } - - if (i < 0 || kw_size != WWPN_LEN) { - dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n", - __func__, k); - rc = -ENODEV; - goto out; - } - - memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); - rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); - if (unlikely(rc)) { - dev_err(dev, "%s: WWPN conversion failed for port %d\n", - __func__, k); - rc = -ENODEV; - goto out; - } - - dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]); - } - -out: - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * init_pcr() - initialize the provisioning and control registers - * @cfg: Internal structure associated with the host. - * - * Also sets up fast access to the mapped registers and initializes AFU - * command fields that never change. - */ -static void init_pcr(struct cxlflash_cfg *cfg) -{ - struct afu *afu = cfg->afu; - struct sisl_ctrl_map __iomem *ctrl_map; - struct hwq *hwq; - void *cookie; - int i; - - for (i = 0; i < MAX_CONTEXT; i++) { - ctrl_map = &afu->afu_map->ctrls[i].ctrl; - /* Disrupt any clients that could be running */ - /* e.g. clients that survived a master restart */ - writeq_be(0, &ctrl_map->rht_start); - writeq_be(0, &ctrl_map->rht_cnt_id); - writeq_be(0, &ctrl_map->ctx_cap); - } - - /* Copy frequently used fields into hwq */ - for (i = 0; i < afu->num_hwqs; i++) { - hwq = get_hwq(afu, i); - cookie = hwq->ctx_cookie; - - hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie); - hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host; - hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl; - - /* Program the Endian Control for the master context */ - writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl); - } -} - -/** - * init_global() - initialize AFU global registers - * @cfg: Internal structure associated with the host. - */ -static int init_global(struct cxlflash_cfg *cfg) -{ - struct afu *afu = cfg->afu; - struct device *dev = &cfg->dev->dev; - struct hwq *hwq; - struct sisl_host_map __iomem *hmap; - __be64 __iomem *fc_port_regs; - u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */ - int i = 0, num_ports = 0; - int rc = 0; - int j; - void *ctx; - u64 reg; - - rc = read_vpd(cfg, &wwpn[0]); - if (rc) { - dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); - goto out; - } - - /* Set up RRQ and SQ in HWQ for master issued cmds */ - for (i = 0; i < afu->num_hwqs; i++) { - hwq = get_hwq(afu, i); - hmap = hwq->host_map; - - writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start); - writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end); - hwq->hrrq_online = true; - - if (afu_is_sq_cmd_mode(afu)) { - writeq_be((u64)hwq->hsq_start, &hmap->sq_start); - writeq_be((u64)hwq->hsq_end, &hmap->sq_end); - } - } - - /* AFU configuration */ - reg = readq_be(&afu->afu_map->global.regs.afu_config); - reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; - /* enable all auto retry options and control endianness */ - /* leave others at default: */ - /* CTX_CAP write protected, mbox_r does not clear on read and */ - /* checker on if dual afu */ - writeq_be(reg, &afu->afu_map->global.regs.afu_config); - - /* Global port select: select either port */ - if (afu->internal_lun) { - /* Only use port 0 */ - writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); - num_ports = 0; - } else { - writeq_be(PORT_MASK(cfg->num_fc_ports), - &afu->afu_map->global.regs.afu_port_sel); - num_ports = cfg->num_fc_ports; - } - - for (i = 0; i < num_ports; i++) { - fc_port_regs = get_fc_port_regs(cfg, i); - - /* Unmask all errors (but they are still masked at AFU) */ - writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]); - /* Clear CRC error cnt & set a threshold */ - (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]); - writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]); - - /* Set WWPNs. If already programmed, wwpn[i] is 0 */ - if (wwpn[i] != 0) - afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]); - /* Programming WWPN back to back causes additional - * offline/online transitions and a PLOGI - */ - msleep(100); - } - - if (afu_is_ocxl_lisn(afu)) { - /* Set up the LISN effective address for each master */ - for (i = 0; i < afu->num_hwqs; i++) { - hwq = get_hwq(afu, i); - ctx = hwq->ctx_cookie; - - for (j = 0; j < hwq->num_irqs; j++) { - reg = cfg->ops->get_irq_objhndl(ctx, j); - writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]); - } - - reg = hwq->ctx_hndl; - writeq_be(SISL_LISN_PASID(reg, reg), - &hwq->ctrl_map->lisn_pasid[0]); - writeq_be(SISL_LISN_PASID(0UL, reg), - &hwq->ctrl_map->lisn_pasid[1]); - } - } - - /* Set up master's own CTX_CAP to allow real mode, host translation */ - /* tables, afu cmds and read/write GSCSI cmds. */ - /* First, unlock ctx_cap write by reading mbox */ - for (i = 0; i < afu->num_hwqs; i++) { - hwq = get_hwq(afu, i); - - (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */ - writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | - SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | - SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), - &hwq->ctrl_map->ctx_cap); - } - - /* - * Determine write-same unmap support for host by evaluating the unmap - * sector support bit of the context control register associated with - * the primary hardware queue. Note that while this status is reflected - * in a context register, the outcome can be assumed to be host-wide. - */ - hwq = get_hwq(afu, PRIMARY_HWQ); - reg = readq_be(&hwq->host_map->ctx_ctrl); - if (reg & SISL_CTX_CTRL_UNMAP_SECTOR) - cfg->ws_unmap = true; - - /* Initialize heartbeat */ - afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); -out: - return rc; -} - -/** - * start_afu() - initializes and starts the AFU - * @cfg: Internal structure associated with the host. - */ -static int start_afu(struct cxlflash_cfg *cfg) -{ - struct afu *afu = cfg->afu; - struct device *dev = &cfg->dev->dev; - struct hwq *hwq; - int rc = 0; - int i; - - init_pcr(cfg); - - /* Initialize each HWQ */ - for (i = 0; i < afu->num_hwqs; i++) { - hwq = get_hwq(afu, i); - - /* After an AFU reset, RRQ entries are stale, clear them */ - memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry)); - - /* Initialize RRQ pointers */ - hwq->hrrq_start = &hwq->rrq_entry[0]; - hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1]; - hwq->hrrq_curr = hwq->hrrq_start; - hwq->toggle = 1; - - /* Initialize spin locks */ - spin_lock_init(&hwq->hrrq_slock); - spin_lock_init(&hwq->hsq_slock); - - /* Initialize SQ */ - if (afu_is_sq_cmd_mode(afu)) { - memset(&hwq->sq, 0, sizeof(hwq->sq)); - hwq->hsq_start = &hwq->sq[0]; - hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1]; - hwq->hsq_curr = hwq->hsq_start; - - atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1); - } - - /* Initialize IRQ poll */ - if (afu_is_irqpoll_enabled(afu)) - irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight, - cxlflash_irqpoll); - - } - - rc = init_global(cfg); - - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * init_intr() - setup interrupt handlers for the master context - * @cfg: Internal structure associated with the host. - * @hwq: Hardware queue to initialize. - * - * Return: 0 on success, -errno on failure - */ -static enum undo_level init_intr(struct cxlflash_cfg *cfg, - struct hwq *hwq) -{ - struct device *dev = &cfg->dev->dev; - void *ctx = hwq->ctx_cookie; - int rc = 0; - enum undo_level level = UNDO_NOOP; - bool is_primary_hwq = (hwq->index == PRIMARY_HWQ); - int num_irqs = hwq->num_irqs; - - rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs); - if (unlikely(rc)) { - dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n", - __func__, rc); - level = UNDO_NOOP; - goto out; - } - - rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq, - "SISL_MSI_SYNC_ERROR"); - if (unlikely(rc <= 0)) { - dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__); - level = FREE_IRQ; - goto out; - } - - rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq, - "SISL_MSI_RRQ_UPDATED"); - if (unlikely(rc <= 0)) { - dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__); - level = UNMAP_ONE; - goto out; - } - - /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ - if (!is_primary_hwq) - goto out; - - rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq, - "SISL_MSI_ASYNC_ERROR"); - if (unlikely(rc <= 0)) { - dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__); - level = UNMAP_TWO; - goto out; - } -out: - return level; -} - -/** - * init_mc() - create and register as the master context - * @cfg: Internal structure associated with the host. - * @index: HWQ Index of the master context. - * - * Return: 0 on success, -errno on failure - */ -static int init_mc(struct cxlflash_cfg *cfg, u32 index) -{ - void *ctx; - struct device *dev = &cfg->dev->dev; - struct hwq *hwq = get_hwq(cfg->afu, index); - int rc = 0; - int num_irqs; - enum undo_level level; - - hwq->afu = cfg->afu; - hwq->index = index; - INIT_LIST_HEAD(&hwq->pending_cmds); - - if (index == PRIMARY_HWQ) { - ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie); - num_irqs = 3; - } else { - ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); - num_irqs = 2; - } - if (IS_ERR_OR_NULL(ctx)) { - rc = -ENOMEM; - goto err1; - } - - WARN_ON(hwq->ctx_cookie); - hwq->ctx_cookie = ctx; - hwq->num_irqs = num_irqs; - - /* Set it up as a master with the CXL */ - cfg->ops->set_master(ctx); - - /* Reset AFU when initializing primary context */ - if (index == PRIMARY_HWQ) { - rc = cfg->ops->afu_reset(ctx); - if (unlikely(rc)) { - dev_err(dev, "%s: AFU reset failed rc=%d\n", - __func__, rc); - goto err1; - } - } - - level = init_intr(cfg, hwq); - if (unlikely(level)) { - dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc); - goto err2; - } - - /* Finally, activate the context by starting it */ - rc = cfg->ops->start_context(hwq->ctx_cookie); - if (unlikely(rc)) { - dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); - level = UNMAP_THREE; - goto err2; - } - -out: - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -err2: - term_intr(cfg, level, index); - if (index != PRIMARY_HWQ) - cfg->ops->release_context(ctx); -err1: - hwq->ctx_cookie = NULL; - goto out; -} - -/** - * get_num_afu_ports() - determines and configures the number of AFU ports - * @cfg: Internal structure associated with the host. - * - * This routine determines the number of AFU ports by converting the global - * port selection mask. The converted value is only valid following an AFU - * reset (explicit or power-on). This routine must be invoked shortly after - * mapping as other routines are dependent on the number of ports during the - * initialization sequence. - * - * To support legacy AFUs that might not have reflected an initial global - * port mask (value read is 0), default to the number of ports originally - * supported by the cxlflash driver (2) before hardware with other port - * offerings was introduced. - */ -static void get_num_afu_ports(struct cxlflash_cfg *cfg) -{ - struct afu *afu = cfg->afu; - struct device *dev = &cfg->dev->dev; - u64 port_mask; - int num_fc_ports = LEGACY_FC_PORTS; - - port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel); - if (port_mask != 0ULL) - num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS); - - dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n", - __func__, port_mask, num_fc_ports); - - cfg->num_fc_ports = num_fc_ports; - cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports); -} - -/** - * init_afu() - setup as master context and start AFU - * @cfg: Internal structure associated with the host. - * - * This routine is a higher level of control for configuring the - * AFU on probe and reset paths. - * - * Return: 0 on success, -errno on failure - */ -static int init_afu(struct cxlflash_cfg *cfg) -{ - u64 reg; - int rc = 0; - struct afu *afu = cfg->afu; - struct device *dev = &cfg->dev->dev; - struct hwq *hwq; - int i; - - cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true); - - mutex_init(&afu->sync_active); - afu->num_hwqs = afu->desired_hwqs; - for (i = 0; i < afu->num_hwqs; i++) { - rc = init_mc(cfg, i); - if (rc) { - dev_err(dev, "%s: init_mc failed rc=%d index=%d\n", - __func__, rc, i); - goto err1; - } - } - - /* Map the entire MMIO space of the AFU using the first context */ - hwq = get_hwq(afu, PRIMARY_HWQ); - afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie); - if (!afu->afu_map) { - dev_err(dev, "%s: psa_map failed\n", __func__); - rc = -ENOMEM; - goto err1; - } - - /* No byte reverse on reading afu_version or string will be backwards */ - reg = readq(&afu->afu_map->global.regs.afu_version); - memcpy(afu->version, ®, sizeof(reg)); - afu->interface_version = - readq_be(&afu->afu_map->global.regs.interface_version); - if ((afu->interface_version + 1) == 0) { - dev_err(dev, "Back level AFU, please upgrade. AFU version %s " - "interface version %016llx\n", afu->version, - afu->interface_version); - rc = -EINVAL; - goto err1; - } - - if (afu_is_sq_cmd_mode(afu)) { - afu->send_cmd = send_cmd_sq; - afu->context_reset = context_reset_sq; - } else { - afu->send_cmd = send_cmd_ioarrin; - afu->context_reset = context_reset_ioarrin; - } - - dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__, - afu->version, afu->interface_version); - - get_num_afu_ports(cfg); - - rc = start_afu(cfg); - if (rc) { - dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc); - goto err1; - } - - afu_err_intr_init(cfg->afu); - for (i = 0; i < afu->num_hwqs; i++) { - hwq = get_hwq(afu, i); - - hwq->room = readq_be(&hwq->host_map->cmd_room); - } - - /* Restore the LUN mappings */ - cxlflash_restore_luntable(cfg); -out: - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; - -err1: - for (i = afu->num_hwqs - 1; i >= 0; i--) { - term_intr(cfg, UNMAP_THREE, i); - term_mc(cfg, i); - } - goto out; -} - -/** - * afu_reset() - resets the AFU - * @cfg: Internal structure associated with the host. - * - * Return: 0 on success, -errno on failure - */ -static int afu_reset(struct cxlflash_cfg *cfg) -{ - struct device *dev = &cfg->dev->dev; - int rc = 0; - - /* Stop the context before the reset. Since the context is - * no longer available restart it after the reset is complete - */ - term_afu(cfg); - - rc = init_afu(cfg); - - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * drain_ioctls() - wait until all currently executing ioctls have completed - * @cfg: Internal structure associated with the host. - * - * Obtain write access to read/write semaphore that wraps ioctl - * handling to 'drain' ioctls currently executing. - */ -static void drain_ioctls(struct cxlflash_cfg *cfg) -{ - down_write(&cfg->ioctl_rwsem); - up_write(&cfg->ioctl_rwsem); -} - -/** - * cxlflash_async_reset_host() - asynchronous host reset handler - * @data: Private data provided while scheduling reset. - * @cookie: Cookie that can be used for checkpointing. - */ -static void cxlflash_async_reset_host(void *data, async_cookie_t cookie) -{ - struct cxlflash_cfg *cfg = data; - struct device *dev = &cfg->dev->dev; - int rc = 0; - - if (cfg->state != STATE_RESET) { - dev_dbg(dev, "%s: Not performing a reset, state=%d\n", - __func__, cfg->state); - goto out; - } - - drain_ioctls(cfg); - cxlflash_mark_contexts_error(cfg); - rc = afu_reset(cfg); - if (rc) - cfg->state = STATE_FAILTERM; - else - cfg->state = STATE_NORMAL; - wake_up_all(&cfg->reset_waitq); - -out: - scsi_unblock_requests(cfg->host); -} - -/** - * cxlflash_schedule_async_reset() - schedule an asynchronous host reset - * @cfg: Internal structure associated with the host. - */ -static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg) -{ - struct device *dev = &cfg->dev->dev; - - if (cfg->state != STATE_NORMAL) { - dev_dbg(dev, "%s: Not performing reset state=%d\n", - __func__, cfg->state); - return; - } - - cfg->state = STATE_RESET; - scsi_block_requests(cfg->host); - cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host, - cfg); -} - -/** - * send_afu_cmd() - builds and sends an internal AFU command - * @afu: AFU associated with the host. - * @rcb: Pre-populated IOARCB describing command to send. - * - * The AFU can only take one internal AFU command at a time. This limitation is - * enforced by using a mutex to provide exclusive access to the AFU during the - * operation. This design point requires calling threads to not be on interrupt - * context due to the possibility of sleeping during concurrent AFU operations. - * - * The command status is optionally passed back to the caller when the caller - * populates the IOASA field of the IOARCB with a pointer to an IOASA structure. - * - * Return: - * 0 on success, -errno on failure - */ -static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb) -{ - struct cxlflash_cfg *cfg = afu->parent; - struct device *dev = &cfg->dev->dev; - struct afu_cmd *cmd = NULL; - struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); - ulong lock_flags; - char *buf = NULL; - int rc = 0; - int nretry = 0; - - if (cfg->state != STATE_NORMAL) { - dev_dbg(dev, "%s: Sync not required state=%u\n", - __func__, cfg->state); - return 0; - } - - mutex_lock(&afu->sync_active); - atomic_inc(&afu->cmds_active); - buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); - if (unlikely(!buf)) { - dev_err(dev, "%s: no memory for command\n", __func__); - rc = -ENOMEM; - goto out; - } - - cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); - -retry: - memset(cmd, 0, sizeof(*cmd)); - memcpy(&cmd->rcb, rcb, sizeof(*rcb)); - INIT_LIST_HEAD(&cmd->queue); - init_completion(&cmd->cevent); - cmd->parent = afu; - cmd->hwq_index = hwq->index; - cmd->rcb.ctx_id = hwq->ctx_hndl; - - dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n", - __func__, afu, cmd, cmd->rcb.cdb[0], nretry); - - rc = afu->send_cmd(afu, cmd); - if (unlikely(rc)) { - rc = -ENOBUFS; - goto out; - } - - rc = wait_resp(afu, cmd); - switch (rc) { - case -ETIMEDOUT: - rc = afu->context_reset(hwq); - if (rc) { - /* Delete the command from pending_cmds list */ - spin_lock_irqsave(&hwq->hsq_slock, lock_flags); - list_del(&cmd->list); - spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); - - cxlflash_schedule_async_reset(cfg); - break; - } - fallthrough; /* to retry */ - case -EAGAIN: - if (++nretry < 2) - goto retry; - fallthrough; /* to exit */ - default: - break; - } - - if (rcb->ioasa) - *rcb->ioasa = cmd->sa; -out: - atomic_dec(&afu->cmds_active); - mutex_unlock(&afu->sync_active); - kfree(buf); - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * cxlflash_afu_sync() - builds and sends an AFU sync command - * @afu: AFU associated with the host. - * @ctx: Identifies context requesting sync. - * @res: Identifies resource requesting sync. - * @mode: Type of sync to issue (lightweight, heavyweight, global). - * - * AFU sync operations are only necessary and allowed when the device is - * operating normally. When not operating normally, sync requests can occur as - * part of cleaning up resources associated with an adapter prior to removal. - * In this scenario, these requests are simply ignored (safe due to the AFU - * going away). - * - * Return: - * 0 on success, -errno on failure - */ -int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode) -{ - struct cxlflash_cfg *cfg = afu->parent; - struct device *dev = &cfg->dev->dev; - struct sisl_ioarcb rcb = { 0 }; - - dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n", - __func__, afu, ctx, res, mode); - - rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; - rcb.msi = SISL_MSI_RRQ_UPDATED; - rcb.timeout = MC_AFU_SYNC_TIMEOUT; - - rcb.cdb[0] = SISL_AFU_CMD_SYNC; - rcb.cdb[1] = mode; - put_unaligned_be16(ctx, &rcb.cdb[2]); - put_unaligned_be32(res, &rcb.cdb[4]); - - return send_afu_cmd(afu, &rcb); -} - -/** - * cxlflash_eh_abort_handler() - abort a SCSI command - * @scp: SCSI command to abort. - * - * CXL Flash devices do not support a single command abort. Reset the context - * as per SISLite specification. Flush any pending commands in the hardware - * queue before the reset. - * - * Return: SUCCESS/FAILED as defined in scsi/scsi.h - */ -static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp) -{ - int rc = FAILED; - struct Scsi_Host *host = scp->device->host; - struct cxlflash_cfg *cfg = shost_priv(host); - struct afu_cmd *cmd = sc_to_afuc(scp); - struct device *dev = &cfg->dev->dev; - struct afu *afu = cfg->afu; - struct hwq *hwq = get_hwq(afu, cmd->hwq_index); - - dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " - "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, - scp->device->channel, scp->device->id, scp->device->lun, - get_unaligned_be32(&((u32 *)scp->cmnd)[0]), - get_unaligned_be32(&((u32 *)scp->cmnd)[1]), - get_unaligned_be32(&((u32 *)scp->cmnd)[2]), - get_unaligned_be32(&((u32 *)scp->cmnd)[3])); - - /* When the state is not normal, another reset/reload is in progress. - * Return failed and the mid-layer will invoke host reset handler. - */ - if (cfg->state != STATE_NORMAL) { - dev_dbg(dev, "%s: Invalid state for abort, state=%d\n", - __func__, cfg->state); - goto out; - } - - rc = afu->context_reset(hwq); - if (unlikely(rc)) - goto out; - - rc = SUCCESS; - -out: - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * cxlflash_eh_device_reset_handler() - reset a single LUN - * @scp: SCSI command to send. - * - * Return: - * SUCCESS as defined in scsi/scsi.h - * FAILED as defined in scsi/scsi.h - */ -static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) -{ - int rc = SUCCESS; - struct scsi_device *sdev = scp->device; - struct Scsi_Host *host = sdev->host; - struct cxlflash_cfg *cfg = shost_priv(host); - struct device *dev = &cfg->dev->dev; - int rcr = 0; - - dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__, - host->host_no, sdev->channel, sdev->id, sdev->lun); -retry: - switch (cfg->state) { - case STATE_NORMAL: - rcr = send_tmf(cfg, sdev, TMF_LUN_RESET); - if (unlikely(rcr)) - rc = FAILED; - break; - case STATE_RESET: - wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); - goto retry; - default: - rc = FAILED; - break; - } - - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * cxlflash_eh_host_reset_handler() - reset the host adapter - * @scp: SCSI command from stack identifying host. - * - * Following a reset, the state is evaluated again in case an EEH occurred - * during the reset. In such a scenario, the host reset will either yield - * until the EEH recovery is complete or return success or failure based - * upon the current device state. - * - * Return: - * SUCCESS as defined in scsi/scsi.h - * FAILED as defined in scsi/scsi.h - */ -static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) -{ - int rc = SUCCESS; - int rcr = 0; - struct Scsi_Host *host = scp->device->host; - struct cxlflash_cfg *cfg = shost_priv(host); - struct device *dev = &cfg->dev->dev; - - dev_dbg(dev, "%s: %d\n", __func__, host->host_no); - - switch (cfg->state) { - case STATE_NORMAL: - cfg->state = STATE_RESET; - drain_ioctls(cfg); - cxlflash_mark_contexts_error(cfg); - rcr = afu_reset(cfg); - if (rcr) { - rc = FAILED; - cfg->state = STATE_FAILTERM; - } else - cfg->state = STATE_NORMAL; - wake_up_all(&cfg->reset_waitq); - ssleep(1); - fallthrough; - case STATE_RESET: - wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); - if (cfg->state == STATE_NORMAL) - break; - fallthrough; - default: - rc = FAILED; - break; - } - - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * cxlflash_change_queue_depth() - change the queue depth for the device - * @sdev: SCSI device destined for queue depth change. - * @qdepth: Requested queue depth value to set. - * - * The requested queue depth is capped to the maximum supported value. - * - * Return: The actual queue depth set. - */ -static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) -{ - - if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) - qdepth = CXLFLASH_MAX_CMDS_PER_LUN; - - scsi_change_queue_depth(sdev, qdepth); - return sdev->queue_depth; -} - -/** - * cxlflash_show_port_status() - queries and presents the current port status - * @port: Desired port for status reporting. - * @cfg: Internal structure associated with the host. - * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. - * - * Return: The size of the ASCII string returned in @buf or -EINVAL. - */ -static ssize_t cxlflash_show_port_status(u32 port, - struct cxlflash_cfg *cfg, - char *buf) -{ - struct device *dev = &cfg->dev->dev; - char *disp_status; - u64 status; - __be64 __iomem *fc_port_regs; - - WARN_ON(port >= MAX_FC_PORTS); - - if (port >= cfg->num_fc_ports) { - dev_info(dev, "%s: Port %d not supported on this card.\n", - __func__, port); - return -EINVAL; - } - - fc_port_regs = get_fc_port_regs(cfg, port); - status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]); - status &= FC_MTIP_STATUS_MASK; - - if (status == FC_MTIP_STATUS_ONLINE) - disp_status = "online"; - else if (status == FC_MTIP_STATUS_OFFLINE) - disp_status = "offline"; - else - disp_status = "unknown"; - - return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); -} - -/** - * port0_show() - queries and presents the current status of port 0 - * @dev: Generic device associated with the host owning the port. - * @attr: Device attribute representing the port. - * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t port0_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - - return cxlflash_show_port_status(0, cfg, buf); -} - -/** - * port1_show() - queries and presents the current status of port 1 - * @dev: Generic device associated with the host owning the port. - * @attr: Device attribute representing the port. - * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t port1_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - - return cxlflash_show_port_status(1, cfg, buf); -} - -/** - * port2_show() - queries and presents the current status of port 2 - * @dev: Generic device associated with the host owning the port. - * @attr: Device attribute representing the port. - * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t port2_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - - return cxlflash_show_port_status(2, cfg, buf); -} - -/** - * port3_show() - queries and presents the current status of port 3 - * @dev: Generic device associated with the host owning the port. - * @attr: Device attribute representing the port. - * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t port3_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - - return cxlflash_show_port_status(3, cfg, buf); -} - -/** - * lun_mode_show() - presents the current LUN mode of the host - * @dev: Generic device associated with the host. - * @attr: Device attribute representing the LUN mode. - * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t lun_mode_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - struct afu *afu = cfg->afu; - - return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); -} - -/** - * lun_mode_store() - sets the LUN mode of the host - * @dev: Generic device associated with the host. - * @attr: Device attribute representing the LUN mode. - * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. - * @count: Length of data resizing in @buf. - * - * The CXL Flash AFU supports a dummy LUN mode where the external - * links and storage are not required. Space on the FPGA is used - * to create 1 or 2 small LUNs which are presented to the system - * as if they were a normal storage device. This feature is useful - * during development and also provides manufacturing with a way - * to test the AFU without an actual device. - * - * 0 = external LUN[s] (default) - * 1 = internal LUN (1 x 64K, 512B blocks, id 0) - * 2 = internal LUN (1 x 64K, 4K blocks, id 0) - * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) - * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t lun_mode_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct cxlflash_cfg *cfg = shost_priv(shost); - struct afu *afu = cfg->afu; - int rc; - u32 lun_mode; - - rc = kstrtouint(buf, 10, &lun_mode); - if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { - afu->internal_lun = lun_mode; - - /* - * When configured for internal LUN, there is only one channel, - * channel number 0, else there will be one less than the number - * of fc ports for this card. - */ - if (afu->internal_lun) - shost->max_channel = 0; - else - shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports); - - afu_reset(cfg); - scsi_scan_host(cfg->host); - } - - return count; -} - -/** - * ioctl_version_show() - presents the current ioctl version of the host - * @dev: Generic device associated with the host. - * @attr: Device attribute representing the ioctl version. - * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t ioctl_version_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - ssize_t bytes = 0; - - bytes = scnprintf(buf, PAGE_SIZE, - "disk: %u\n", DK_CXLFLASH_VERSION_0); - bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, - "host: %u\n", HT_CXLFLASH_VERSION_0); - - return bytes; -} - -/** - * cxlflash_show_port_lun_table() - queries and presents the port LUN table - * @port: Desired port for status reporting. - * @cfg: Internal structure associated with the host. - * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. - * - * Return: The size of the ASCII string returned in @buf or -EINVAL. - */ -static ssize_t cxlflash_show_port_lun_table(u32 port, - struct cxlflash_cfg *cfg, - char *buf) -{ - struct device *dev = &cfg->dev->dev; - __be64 __iomem *fc_port_luns; - int i; - ssize_t bytes = 0; - - WARN_ON(port >= MAX_FC_PORTS); - - if (port >= cfg->num_fc_ports) { - dev_info(dev, "%s: Port %d not supported on this card.\n", - __func__, port); - return -EINVAL; - } - - fc_port_luns = get_fc_port_luns(cfg, port); - - for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) - bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, - "%03d: %016llx\n", - i, readq_be(&fc_port_luns[i])); - return bytes; -} - -/** - * port0_lun_table_show() - presents the current LUN table of port 0 - * @dev: Generic device associated with the host owning the port. - * @attr: Device attribute representing the port. - * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t port0_lun_table_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - - return cxlflash_show_port_lun_table(0, cfg, buf); -} - -/** - * port1_lun_table_show() - presents the current LUN table of port 1 - * @dev: Generic device associated with the host owning the port. - * @attr: Device attribute representing the port. - * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t port1_lun_table_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - - return cxlflash_show_port_lun_table(1, cfg, buf); -} - -/** - * port2_lun_table_show() - presents the current LUN table of port 2 - * @dev: Generic device associated with the host owning the port. - * @attr: Device attribute representing the port. - * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t port2_lun_table_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - - return cxlflash_show_port_lun_table(2, cfg, buf); -} - -/** - * port3_lun_table_show() - presents the current LUN table of port 3 - * @dev: Generic device associated with the host owning the port. - * @attr: Device attribute representing the port. - * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t port3_lun_table_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - - return cxlflash_show_port_lun_table(3, cfg, buf); -} - -/** - * irqpoll_weight_show() - presents the current IRQ poll weight for the host - * @dev: Generic device associated with the host. - * @attr: Device attribute representing the IRQ poll weight. - * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll - * weight in ASCII. - * - * An IRQ poll weight of 0 indicates polling is disabled. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t irqpoll_weight_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - struct afu *afu = cfg->afu; - - return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight); -} - -/** - * irqpoll_weight_store() - sets the current IRQ poll weight for the host - * @dev: Generic device associated with the host. - * @attr: Device attribute representing the IRQ poll weight. - * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll - * weight in ASCII. - * @count: Length of data resizing in @buf. - * - * An IRQ poll weight of 0 indicates polling is disabled. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t irqpoll_weight_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - struct device *cfgdev = &cfg->dev->dev; - struct afu *afu = cfg->afu; - struct hwq *hwq; - u32 weight; - int rc, i; - - rc = kstrtouint(buf, 10, &weight); - if (rc) - return -EINVAL; - - if (weight > 256) { - dev_info(cfgdev, - "Invalid IRQ poll weight. It must be 256 or less.\n"); - return -EINVAL; - } - - if (weight == afu->irqpoll_weight) { - dev_info(cfgdev, - "Current IRQ poll weight has the same weight.\n"); - return -EINVAL; - } - - if (afu_is_irqpoll_enabled(afu)) { - for (i = 0; i < afu->num_hwqs; i++) { - hwq = get_hwq(afu, i); - - irq_poll_disable(&hwq->irqpoll); - } - } - - afu->irqpoll_weight = weight; - - if (weight > 0) { - for (i = 0; i < afu->num_hwqs; i++) { - hwq = get_hwq(afu, i); - - irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll); - } - } - - return count; -} - -/** - * num_hwqs_show() - presents the number of hardware queues for the host - * @dev: Generic device associated with the host. - * @attr: Device attribute representing the number of hardware queues. - * @buf: Buffer of length PAGE_SIZE to report back the number of hardware - * queues in ASCII. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t num_hwqs_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - struct afu *afu = cfg->afu; - - return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs); -} - -/** - * num_hwqs_store() - sets the number of hardware queues for the host - * @dev: Generic device associated with the host. - * @attr: Device attribute representing the number of hardware queues. - * @buf: Buffer of length PAGE_SIZE containing the number of hardware - * queues in ASCII. - * @count: Length of data resizing in @buf. - * - * n > 0: num_hwqs = n - * n = 0: num_hwqs = num_online_cpus() - * n < 0: num_online_cpus() / abs(n) - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t num_hwqs_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - struct afu *afu = cfg->afu; - int rc; - int nhwqs, num_hwqs; - - rc = kstrtoint(buf, 10, &nhwqs); - if (rc) - return -EINVAL; - - if (nhwqs >= 1) - num_hwqs = nhwqs; - else if (nhwqs == 0) - num_hwqs = num_online_cpus(); - else - num_hwqs = num_online_cpus() / abs(nhwqs); - - afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS); - WARN_ON_ONCE(afu->desired_hwqs == 0); - -retry: - switch (cfg->state) { - case STATE_NORMAL: - cfg->state = STATE_RESET; - drain_ioctls(cfg); - cxlflash_mark_contexts_error(cfg); - rc = afu_reset(cfg); - if (rc) - cfg->state = STATE_FAILTERM; - else - cfg->state = STATE_NORMAL; - wake_up_all(&cfg->reset_waitq); - break; - case STATE_RESET: - wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); - if (cfg->state == STATE_NORMAL) - goto retry; - fallthrough; - default: - /* Ideally should not happen */ - dev_err(dev, "%s: Device is not ready, state=%d\n", - __func__, cfg->state); - break; - } - - return count; -} - -static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" }; - -/** - * hwq_mode_show() - presents the HWQ steering mode for the host - * @dev: Generic device associated with the host. - * @attr: Device attribute representing the HWQ steering mode. - * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode - * as a character string. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t hwq_mode_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - struct afu *afu = cfg->afu; - - return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]); -} - -/** - * hwq_mode_store() - sets the HWQ steering mode for the host - * @dev: Generic device associated with the host. - * @attr: Device attribute representing the HWQ steering mode. - * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode - * as a character string. - * @count: Length of data resizing in @buf. - * - * rr = Round-Robin - * tag = Block MQ Tagging - * cpu = CPU Affinity - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t hwq_mode_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct cxlflash_cfg *cfg = shost_priv(shost); - struct device *cfgdev = &cfg->dev->dev; - struct afu *afu = cfg->afu; - int i; - u32 mode = MAX_HWQ_MODE; - - for (i = 0; i < MAX_HWQ_MODE; i++) { - if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) { - mode = i; - break; - } - } - - if (mode >= MAX_HWQ_MODE) { - dev_info(cfgdev, "Invalid HWQ steering mode.\n"); - return -EINVAL; - } - - afu->hwq_mode = mode; - - return count; -} - -/** - * mode_show() - presents the current mode of the device - * @dev: Generic device associated with the device. - * @attr: Device attribute representing the device mode. - * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. - * - * Return: The size of the ASCII string returned in @buf. - */ -static ssize_t mode_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct scsi_device *sdev = to_scsi_device(dev); - - return scnprintf(buf, PAGE_SIZE, "%s\n", - sdev->hostdata ? "superpipe" : "legacy"); -} - -/* - * Host attributes - */ -static DEVICE_ATTR_RO(port0); -static DEVICE_ATTR_RO(port1); -static DEVICE_ATTR_RO(port2); -static DEVICE_ATTR_RO(port3); -static DEVICE_ATTR_RW(lun_mode); -static DEVICE_ATTR_RO(ioctl_version); -static DEVICE_ATTR_RO(port0_lun_table); -static DEVICE_ATTR_RO(port1_lun_table); -static DEVICE_ATTR_RO(port2_lun_table); -static DEVICE_ATTR_RO(port3_lun_table); -static DEVICE_ATTR_RW(irqpoll_weight); -static DEVICE_ATTR_RW(num_hwqs); -static DEVICE_ATTR_RW(hwq_mode); - -static struct attribute *cxlflash_host_attrs[] = { - &dev_attr_port0.attr, - &dev_attr_port1.attr, - &dev_attr_port2.attr, - &dev_attr_port3.attr, - &dev_attr_lun_mode.attr, - &dev_attr_ioctl_version.attr, - &dev_attr_port0_lun_table.attr, - &dev_attr_port1_lun_table.attr, - &dev_attr_port2_lun_table.attr, - &dev_attr_port3_lun_table.attr, - &dev_attr_irqpoll_weight.attr, - &dev_attr_num_hwqs.attr, - &dev_attr_hwq_mode.attr, - NULL -}; - -ATTRIBUTE_GROUPS(cxlflash_host); - -/* - * Device attributes - */ -static DEVICE_ATTR_RO(mode); - -static struct attribute *cxlflash_dev_attrs[] = { - &dev_attr_mode.attr, - NULL -}; - -ATTRIBUTE_GROUPS(cxlflash_dev); - -/* - * Host template - */ -static struct scsi_host_template driver_template = { - .module = THIS_MODULE, - .name = CXLFLASH_ADAPTER_NAME, - .info = cxlflash_driver_info, - .ioctl = cxlflash_ioctl, - .proc_name = CXLFLASH_NAME, - .queuecommand = cxlflash_queuecommand, - .eh_abort_handler = cxlflash_eh_abort_handler, - .eh_device_reset_handler = cxlflash_eh_device_reset_handler, - .eh_host_reset_handler = cxlflash_eh_host_reset_handler, - .change_queue_depth = cxlflash_change_queue_depth, - .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, - .can_queue = CXLFLASH_MAX_CMDS, - .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1, - .this_id = -1, - .sg_tablesize = 1, /* No scatter gather support */ - .max_sectors = CXLFLASH_MAX_SECTORS, - .shost_groups = cxlflash_host_groups, - .sdev_groups = cxlflash_dev_groups, -}; - -/* - * Device dependent values - */ -static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, - CXLFLASH_WWPN_VPD_REQUIRED }; -static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, - CXLFLASH_NOTIFY_SHUTDOWN }; -static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, - (CXLFLASH_NOTIFY_SHUTDOWN | - CXLFLASH_OCXL_DEV) }; - -/* - * PCI device binding table - */ -static struct pci_device_id cxlflash_pci_table[] = { - {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, - {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, - {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals}, - {} -}; - -MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); - -/** - * cxlflash_worker_thread() - work thread handler for the AFU - * @work: Work structure contained within cxlflash associated with host. - * - * Handles the following events: - * - Link reset which cannot be performed on interrupt context due to - * blocking up to a few seconds - * - Rescan the host - */ -static void cxlflash_worker_thread(struct work_struct *work) -{ - struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, - work_q); - struct afu *afu = cfg->afu; - struct device *dev = &cfg->dev->dev; - __be64 __iomem *fc_port_regs; - int port; - ulong lock_flags; - - /* Avoid MMIO if the device has failed */ - - if (cfg->state != STATE_NORMAL) - return; - - spin_lock_irqsave(cfg->host->host_lock, lock_flags); - - if (cfg->lr_state == LINK_RESET_REQUIRED) { - port = cfg->lr_port; - if (port < 0) - dev_err(dev, "%s: invalid port index %d\n", - __func__, port); - else { - spin_unlock_irqrestore(cfg->host->host_lock, - lock_flags); - - /* The reset can block... */ - fc_port_regs = get_fc_port_regs(cfg, port); - afu_link_reset(afu, port, fc_port_regs); - spin_lock_irqsave(cfg->host->host_lock, lock_flags); - } - - cfg->lr_state = LINK_RESET_COMPLETE; - } - - spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); - - if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) - scsi_scan_host(cfg->host); -} - -/** - * cxlflash_chr_open() - character device open handler - * @inode: Device inode associated with this character device. - * @file: File pointer for this device. - * - * Only users with admin privileges are allowed to open the character device. - * - * Return: 0 on success, -errno on failure - */ -static int cxlflash_chr_open(struct inode *inode, struct file *file) -{ - struct cxlflash_cfg *cfg; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev); - file->private_data = cfg; - - return 0; -} - -/** - * decode_hioctl() - translates encoded host ioctl to easily identifiable string - * @cmd: The host ioctl command to decode. - * - * Return: A string identifying the decoded host ioctl. - */ -static char *decode_hioctl(unsigned int cmd) -{ - switch (cmd) { - case HT_CXLFLASH_LUN_PROVISION: - return __stringify_1(HT_CXLFLASH_LUN_PROVISION); - } - - return "UNKNOWN"; -} - -/** - * cxlflash_lun_provision() - host LUN provisioning handler - * @cfg: Internal structure associated with the host. - * @lunprov: Kernel copy of userspace ioctl data structure. - * - * Return: 0 on success, -errno on failure - */ -static int cxlflash_lun_provision(struct cxlflash_cfg *cfg, - struct ht_cxlflash_lun_provision *lunprov) -{ - struct afu *afu = cfg->afu; - struct device *dev = &cfg->dev->dev; - struct sisl_ioarcb rcb; - struct sisl_ioasa asa; - __be64 __iomem *fc_port_regs; - u16 port = lunprov->port; - u16 scmd = lunprov->hdr.subcmd; - u16 type; - u64 reg; - u64 size; - u64 lun_id; - int rc = 0; - - if (!afu_is_lun_provision(afu)) { - rc = -ENOTSUPP; - goto out; - } - - if (port >= cfg->num_fc_ports) { - rc = -EINVAL; - goto out; - } - - switch (scmd) { - case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN: - type = SISL_AFU_LUN_PROVISION_CREATE; - size = lunprov->size; - lun_id = 0; - break; - case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN: - type = SISL_AFU_LUN_PROVISION_DELETE; - size = 0; - lun_id = lunprov->lun_id; - break; - case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT: - fc_port_regs = get_fc_port_regs(cfg, port); - - reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]); - lunprov->max_num_luns = reg; - reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]); - lunprov->cur_num_luns = reg; - reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]); - lunprov->max_cap_port = reg; - reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]); - lunprov->cur_cap_port = reg; - - goto out; - default: - rc = -EINVAL; - goto out; - } - - memset(&rcb, 0, sizeof(rcb)); - memset(&asa, 0, sizeof(asa)); - rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; - rcb.lun_id = lun_id; - rcb.msi = SISL_MSI_RRQ_UPDATED; - rcb.timeout = MC_LUN_PROV_TIMEOUT; - rcb.ioasa = &asa; - - rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION; - rcb.cdb[1] = type; - rcb.cdb[2] = port; - put_unaligned_be64(size, &rcb.cdb[8]); - - rc = send_afu_cmd(afu, &rcb); - if (rc) { - dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n", - __func__, rc, asa.ioasc, asa.afu_extra); - goto out; - } - - if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) { - lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo; - memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid)); - } -out: - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * cxlflash_afu_debug() - host AFU debug handler - * @cfg: Internal structure associated with the host. - * @afu_dbg: Kernel copy of userspace ioctl data structure. - * - * For debug requests requiring a data buffer, always provide an aligned - * (cache line) buffer to the AFU to appease any alignment requirements. - * - * Return: 0 on success, -errno on failure - */ -static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, - struct ht_cxlflash_afu_debug *afu_dbg) -{ - struct afu *afu = cfg->afu; - struct device *dev = &cfg->dev->dev; - struct sisl_ioarcb rcb; - struct sisl_ioasa asa; - char *buf = NULL; - char *kbuf = NULL; - void __user *ubuf = (__force void __user *)afu_dbg->data_ea; - u16 req_flags = SISL_REQ_FLAGS_AFU_CMD; - u32 ulen = afu_dbg->data_len; - bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE; - int rc = 0; - - if (!afu_is_afu_debug(afu)) { - rc = -ENOTSUPP; - goto out; - } - - if (ulen) { - req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN; - - if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) { - rc = -EINVAL; - goto out; - } - - buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL); - if (unlikely(!buf)) { - rc = -ENOMEM; - goto out; - } - - kbuf = PTR_ALIGN(buf, cache_line_size()); - - if (is_write) { - req_flags |= SISL_REQ_FLAGS_HOST_WRITE; - - if (copy_from_user(kbuf, ubuf, ulen)) { - rc = -EFAULT; - goto out; - } - } - } - - memset(&rcb, 0, sizeof(rcb)); - memset(&asa, 0, sizeof(asa)); - - rcb.req_flags = req_flags; - rcb.msi = SISL_MSI_RRQ_UPDATED; - rcb.timeout = MC_AFU_DEBUG_TIMEOUT; - rcb.ioasa = &asa; - - if (ulen) { - rcb.data_len = ulen; - rcb.data_ea = (uintptr_t)kbuf; - } - - rcb.cdb[0] = SISL_AFU_CMD_DEBUG; - memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd, - HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN); - - rc = send_afu_cmd(afu, &rcb); - if (rc) { - dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n", - __func__, rc, asa.ioasc, asa.afu_extra); - goto out; - } - - if (ulen && !is_write) { - if (copy_to_user(ubuf, kbuf, ulen)) - rc = -EFAULT; - } -out: - kfree(buf); - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * cxlflash_chr_ioctl() - character device IOCTL handler - * @file: File pointer for this device. - * @cmd: IOCTL command. - * @arg: Userspace ioctl data structure. - * - * A read/write semaphore is used to implement a 'drain' of currently - * running ioctls. The read semaphore is taken at the beginning of each - * ioctl thread and released upon concluding execution. Additionally the - * semaphore should be released and then reacquired in any ioctl execution - * path which will wait for an event to occur that is outside the scope of - * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, - * a thread simply needs to acquire the write semaphore. - * - * Return: 0 on success, -errno on failure - */ -static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - typedef int (*hioctl) (struct cxlflash_cfg *, void *); - - struct cxlflash_cfg *cfg = file->private_data; - struct device *dev = &cfg->dev->dev; - char buf[sizeof(union cxlflash_ht_ioctls)]; - void __user *uarg = (void __user *)arg; - struct ht_cxlflash_hdr *hdr; - size_t size = 0; - bool known_ioctl = false; - int idx = 0; - int rc = 0; - hioctl do_ioctl = NULL; - - static const struct { - size_t size; - hioctl ioctl; - } ioctl_tbl[] = { /* NOTE: order matters here */ - { sizeof(struct ht_cxlflash_lun_provision), - (hioctl)cxlflash_lun_provision }, - { sizeof(struct ht_cxlflash_afu_debug), - (hioctl)cxlflash_afu_debug }, - }; - - /* Hold read semaphore so we can drain if needed */ - down_read(&cfg->ioctl_rwsem); - - dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n", - __func__, cmd, idx, sizeof(ioctl_tbl)); - - switch (cmd) { - case HT_CXLFLASH_LUN_PROVISION: - case HT_CXLFLASH_AFU_DEBUG: - known_ioctl = true; - idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd); - size = ioctl_tbl[idx].size; - do_ioctl = ioctl_tbl[idx].ioctl; - - if (likely(do_ioctl)) - break; - - fallthrough; - default: - rc = -EINVAL; - goto out; - } - - if (unlikely(copy_from_user(&buf, uarg, size))) { - dev_err(dev, "%s: copy_from_user() fail " - "size=%lu cmd=%d (%s) uarg=%p\n", - __func__, size, cmd, decode_hioctl(cmd), uarg); - rc = -EFAULT; - goto out; - } - - hdr = (struct ht_cxlflash_hdr *)&buf; - if (hdr->version != HT_CXLFLASH_VERSION_0) { - dev_dbg(dev, "%s: Version %u not supported for %s\n", - __func__, hdr->version, decode_hioctl(cmd)); - rc = -EINVAL; - goto out; - } - - if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) { - dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__); - rc = -EINVAL; - goto out; - } - - rc = do_ioctl(cfg, (void *)&buf); - if (likely(!rc)) - if (unlikely(copy_to_user(uarg, &buf, size))) { - dev_err(dev, "%s: copy_to_user() fail " - "size=%lu cmd=%d (%s) uarg=%p\n", - __func__, size, cmd, decode_hioctl(cmd), uarg); - rc = -EFAULT; - } - - /* fall through to exit */ - -out: - up_read(&cfg->ioctl_rwsem); - if (unlikely(rc && known_ioctl)) - dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n", - __func__, decode_hioctl(cmd), cmd, rc); - else - dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n", - __func__, decode_hioctl(cmd), cmd, rc); - return rc; -} - -/* - * Character device file operations - */ -static const struct file_operations cxlflash_chr_fops = { - .owner = THIS_MODULE, - .open = cxlflash_chr_open, - .unlocked_ioctl = cxlflash_chr_ioctl, - .compat_ioctl = compat_ptr_ioctl, -}; - -/** - * init_chrdev() - initialize the character device for the host - * @cfg: Internal structure associated with the host. - * - * Return: 0 on success, -errno on failure - */ -static int init_chrdev(struct cxlflash_cfg *cfg) -{ - struct device *dev = &cfg->dev->dev; - struct device *char_dev; - dev_t devno; - int minor; - int rc = 0; - - minor = cxlflash_get_minor(); - if (unlikely(minor < 0)) { - dev_err(dev, "%s: Exhausted allowed adapters\n", __func__); - rc = -ENOSPC; - goto out; - } - - devno = MKDEV(cxlflash_major, minor); - cdev_init(&cfg->cdev, &cxlflash_chr_fops); - - rc = cdev_add(&cfg->cdev, devno, 1); - if (rc) { - dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc); - goto err1; - } - - char_dev = device_create(&cxlflash_class, NULL, devno, - NULL, "cxlflash%d", minor); - if (IS_ERR(char_dev)) { - rc = PTR_ERR(char_dev); - dev_err(dev, "%s: device_create failed rc=%d\n", - __func__, rc); - goto err2; - } - - cfg->chardev = char_dev; -out: - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -err2: - cdev_del(&cfg->cdev); -err1: - cxlflash_put_minor(minor); - goto out; -} - -/** - * cxlflash_probe() - PCI entry point to add host - * @pdev: PCI device associated with the host. - * @dev_id: PCI device id associated with device. - * - * The device will initially start out in a 'probing' state and - * transition to the 'normal' state at the end of a successful - * probe. Should an EEH event occur during probe, the notification - * thread (error_detected()) will wait until the probe handler - * is nearly complete. At that time, the device will be moved to - * a 'probed' state and the EEH thread woken up to drive the slot - * reset and recovery (device moves to 'normal' state). Meanwhile, - * the probe will be allowed to exit successfully. - * - * Return: 0 on success, -errno on failure - */ -static int cxlflash_probe(struct pci_dev *pdev, - const struct pci_device_id *dev_id) -{ - struct Scsi_Host *host; - struct cxlflash_cfg *cfg = NULL; - struct device *dev = &pdev->dev; - struct dev_dependent_vals *ddv; - int rc = 0; - int k; - - dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", - __func__, pdev->irq); - - ddv = (struct dev_dependent_vals *)dev_id->driver_data; - driver_template.max_sectors = ddv->max_sectors; - - host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); - if (!host) { - dev_err(dev, "%s: scsi_host_alloc failed\n", __func__); - rc = -ENOMEM; - goto out; - } - - host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; - host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; - host->unique_id = host->host_no; - host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; - - cfg = shost_priv(host); - cfg->state = STATE_PROBING; - cfg->host = host; - rc = alloc_mem(cfg); - if (rc) { - dev_err(dev, "%s: alloc_mem failed\n", __func__); - rc = -ENOMEM; - scsi_host_put(cfg->host); - goto out; - } - - cfg->init_state = INIT_STATE_NONE; - cfg->dev = pdev; - cfg->cxl_fops = cxlflash_cxl_fops; - cfg->ops = cxlflash_assign_ops(ddv); - WARN_ON_ONCE(!cfg->ops); - - /* - * Promoted LUNs move to the top of the LUN table. The rest stay on - * the bottom half. The bottom half grows from the end (index = 255), - * whereas the top half grows from the beginning (index = 0). - * - * Initialize the last LUN index for all possible ports. - */ - cfg->promote_lun_index = 0; - - for (k = 0; k < MAX_FC_PORTS; k++) - cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1; - - cfg->dev_id = (struct pci_device_id *)dev_id; - - init_waitqueue_head(&cfg->tmf_waitq); - init_waitqueue_head(&cfg->reset_waitq); - - INIT_WORK(&cfg->work_q, cxlflash_worker_thread); - cfg->lr_state = LINK_RESET_INVALID; - cfg->lr_port = -1; - spin_lock_init(&cfg->tmf_slock); - mutex_init(&cfg->ctx_tbl_list_mutex); - mutex_init(&cfg->ctx_recovery_mutex); - init_rwsem(&cfg->ioctl_rwsem); - INIT_LIST_HEAD(&cfg->ctx_err_recovery); - INIT_LIST_HEAD(&cfg->lluns); - - pci_set_drvdata(pdev, cfg); - - rc = init_pci(cfg); - if (rc) { - dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc); - goto out_remove; - } - cfg->init_state = INIT_STATE_PCI; - - cfg->afu_cookie = cfg->ops->create_afu(pdev); - if (unlikely(!cfg->afu_cookie)) { - dev_err(dev, "%s: create_afu failed\n", __func__); - rc = -ENOMEM; - goto out_remove; - } - - rc = init_afu(cfg); - if (rc && !wq_has_sleeper(&cfg->reset_waitq)) { - dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc); - goto out_remove; - } - cfg->init_state = INIT_STATE_AFU; - - rc = init_scsi(cfg); - if (rc) { - dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc); - goto out_remove; - } - cfg->init_state = INIT_STATE_SCSI; - - rc = init_chrdev(cfg); - if (rc) { - dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc); - goto out_remove; - } - cfg->init_state = INIT_STATE_CDEV; - - if (wq_has_sleeper(&cfg->reset_waitq)) { - cfg->state = STATE_PROBED; - wake_up_all(&cfg->reset_waitq); - } else - cfg->state = STATE_NORMAL; -out: - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; - -out_remove: - cfg->state = STATE_PROBED; - cxlflash_remove(pdev); - goto out; -} - -/** - * cxlflash_pci_error_detected() - called when a PCI error is detected - * @pdev: PCI device struct. - * @state: PCI channel state. - * - * When an EEH occurs during an active reset, wait until the reset is - * complete and then take action based upon the device state. - * - * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT - */ -static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - int rc = 0; - struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); - struct device *dev = &cfg->dev->dev; - - dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); - - switch (state) { - case pci_channel_io_frozen: - wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && - cfg->state != STATE_PROBING); - if (cfg->state == STATE_FAILTERM) - return PCI_ERS_RESULT_DISCONNECT; - - cfg->state = STATE_RESET; - scsi_block_requests(cfg->host); - drain_ioctls(cfg); - rc = cxlflash_mark_contexts_error(cfg); - if (unlikely(rc)) - dev_err(dev, "%s: Failed to mark user contexts rc=%d\n", - __func__, rc); - term_afu(cfg); - return PCI_ERS_RESULT_NEED_RESET; - case pci_channel_io_perm_failure: - cfg->state = STATE_FAILTERM; - wake_up_all(&cfg->reset_waitq); - scsi_unblock_requests(cfg->host); - return PCI_ERS_RESULT_DISCONNECT; - default: - break; - } - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * cxlflash_pci_slot_reset() - called when PCI slot has been reset - * @pdev: PCI device struct. - * - * This routine is called by the pci error recovery code after the PCI - * slot has been reset, just before we should resume normal operations. - * - * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT - */ -static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) -{ - int rc = 0; - struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); - struct device *dev = &cfg->dev->dev; - - dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); - - rc = init_afu(cfg); - if (unlikely(rc)) { - dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc); - return PCI_ERS_RESULT_DISCONNECT; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -/** - * cxlflash_pci_resume() - called when normal operation can resume - * @pdev: PCI device struct - */ -static void cxlflash_pci_resume(struct pci_dev *pdev) -{ - struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); - struct device *dev = &cfg->dev->dev; - - dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); - - cfg->state = STATE_NORMAL; - wake_up_all(&cfg->reset_waitq); - scsi_unblock_requests(cfg->host); -} - -/** - * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class - * @dev: Character device. - * @mode: Mode that can be used to verify access. - * - * Return: Allocated string describing the devtmpfs structure. - */ -static char *cxlflash_devnode(const struct device *dev, umode_t *mode) -{ - return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev)); -} - -/** - * cxlflash_class_init() - create character device class - * - * Return: 0 on success, -errno on failure - */ -static int cxlflash_class_init(void) -{ - dev_t devno; - int rc = 0; - - rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash"); - if (unlikely(rc)) { - pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc); - goto out; - } - - cxlflash_major = MAJOR(devno); - - rc = class_register(&cxlflash_class); - if (rc) { - pr_err("%s: class_create failed rc=%d\n", __func__, rc); - goto err; - } - -out: - pr_debug("%s: returning rc=%d\n", __func__, rc); - return rc; -err: - unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS); - goto out; -} - -/** - * cxlflash_class_exit() - destroy character device class - */ -static void cxlflash_class_exit(void) -{ - dev_t devno = MKDEV(cxlflash_major, 0); - - class_unregister(&cxlflash_class); - unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS); -} - -static const struct pci_error_handlers cxlflash_err_handler = { - .error_detected = cxlflash_pci_error_detected, - .slot_reset = cxlflash_pci_slot_reset, - .resume = cxlflash_pci_resume, -}; - -/* - * PCI device structure - */ -static struct pci_driver cxlflash_driver = { - .name = CXLFLASH_NAME, - .id_table = cxlflash_pci_table, - .probe = cxlflash_probe, - .remove = cxlflash_remove, - .shutdown = cxlflash_remove, - .err_handler = &cxlflash_err_handler, -}; - -/** - * init_cxlflash() - module entry point - * - * Return: 0 on success, -errno on failure - */ -static int __init init_cxlflash(void) -{ - int rc; - - check_sizes(); - cxlflash_list_init(); - rc = cxlflash_class_init(); - if (unlikely(rc)) - goto out; - - rc = pci_register_driver(&cxlflash_driver); - if (unlikely(rc)) - goto err; -out: - pr_debug("%s: returning rc=%d\n", __func__, rc); - return rc; -err: - cxlflash_class_exit(); - goto out; -} - -/** - * exit_cxlflash() - module exit point - */ -static void __exit exit_cxlflash(void) -{ - cxlflash_term_global_luns(); - cxlflash_free_errpage(); - - pci_unregister_driver(&cxlflash_driver); - cxlflash_class_exit(); -} - -module_init(init_cxlflash); -module_exit(exit_cxlflash); diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h deleted file mode 100644 index 0bfb98effce0..000000000000 --- a/drivers/scsi/cxlflash/main.h +++ /dev/null @@ -1,129 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * CXL Flash Device Driver - * - * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation - * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2015 IBM Corporation - */ - -#ifndef _CXLFLASH_MAIN_H -#define _CXLFLASH_MAIN_H - -#include <linux/list.h> -#include <linux/types.h> -#include <scsi/scsi.h> -#include <scsi/scsi_device.h> - -#include "backend.h" - -#define CXLFLASH_NAME "cxlflash" -#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter" -#define CXLFLASH_MAX_ADAPTERS 32 - -#define PCI_DEVICE_ID_IBM_CORSA 0x04F0 -#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600 -#define PCI_DEVICE_ID_IBM_BRIARD 0x0624 - -/* Since there is only one target, make it 0 */ -#define CXLFLASH_TARGET 0 -#define CXLFLASH_MAX_CDB_LEN 16 - -/* Really only one target per bus since the Texan is directly attached */ -#define CXLFLASH_MAX_NUM_TARGETS_PER_BUS 1 -#define CXLFLASH_MAX_NUM_LUNS_PER_TARGET 65536 - -#define CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ) - -/* FC defines */ -#define FC_MTIP_CMDCONFIG 0x010 -#define FC_MTIP_STATUS 0x018 -#define FC_MAX_NUM_LUNS 0x080 /* Max LUNs host can provision for port */ -#define FC_CUR_NUM_LUNS 0x088 /* Cur number LUNs provisioned for port */ -#define FC_MAX_CAP_PORT 0x090 /* Max capacity all LUNs for port (4K blocks) */ -#define FC_CUR_CAP_PORT 0x098 /* Cur capacity all LUNs for port (4K blocks) */ - -#define FC_PNAME 0x300 -#define FC_CONFIG 0x320 -#define FC_CONFIG2 0x328 -#define FC_STATUS 0x330 -#define FC_ERROR 0x380 -#define FC_ERRCAP 0x388 -#define FC_ERRMSK 0x390 -#define FC_CNT_CRCERR 0x538 -#define FC_CRC_THRESH 0x580 - -#define FC_MTIP_CMDCONFIG_ONLINE 0x20ULL -#define FC_MTIP_CMDCONFIG_OFFLINE 0x40ULL - -#define FC_MTIP_STATUS_MASK 0x30ULL -#define FC_MTIP_STATUS_ONLINE 0x20ULL -#define FC_MTIP_STATUS_OFFLINE 0x10ULL - -/* TIMEOUT and RETRY definitions */ - -/* AFU command timeout values */ -#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */ -#define MC_LUN_PROV_TIMEOUT 5 /* 5 secs */ -#define MC_AFU_DEBUG_TIMEOUT 5 /* 5 secs */ - -/* AFU command room retry limit */ -#define MC_ROOM_RETRY_CNT 10 - -/* FC CRC clear periodic timer */ -#define MC_CRC_THRESH 100 /* threshold in 5 mins */ - -#define FC_PORT_STATUS_RETRY_CNT 100 /* 100 100ms retries = 10 seconds */ -#define FC_PORT_STATUS_RETRY_INTERVAL_US 100000 /* microseconds */ - -/* VPD defines */ -#define CXLFLASH_VPD_LEN 256 -#define WWPN_LEN 16 -#define WWPN_BUF_LEN (WWPN_LEN + 1) - -enum undo_level { - UNDO_NOOP = 0, - FREE_IRQ, - UNMAP_ONE, - UNMAP_TWO, - UNMAP_THREE -}; - -struct dev_dependent_vals { - u64 max_sectors; - u64 flags; -#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL -#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL -#define CXLFLASH_OCXL_DEV 0x0000000000000004ULL -}; - -static inline const struct cxlflash_backend_ops * -cxlflash_assign_ops(struct dev_dependent_vals *ddv) -{ - const struct cxlflash_backend_ops *ops = NULL; - -#ifdef CONFIG_OCXL_BASE - if (ddv->flags & CXLFLASH_OCXL_DEV) - ops = &cxlflash_ocxl_ops; -#endif - -#ifdef CONFIG_CXL_BASE - if (!(ddv->flags & CXLFLASH_OCXL_DEV)) - ops = &cxlflash_cxl_ops; -#endif - - return ops; -} - -struct asyc_intr_info { - u64 status; - char *desc; - u8 port; - u8 action; -#define CLR_FC_ERROR 0x01 -#define LINK_RESET 0x02 -#define SCAN_HOST 0x04 -}; - -#endif /* _CXLFLASH_MAIN_H */ diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c deleted file mode 100644 index 6542818e595a..000000000000 --- a/drivers/scsi/cxlflash/ocxl_hw.c +++ /dev/null @@ -1,1399 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * CXL Flash Device Driver - * - * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2018 IBM Corporation - */ - -#include <linux/file.h> -#include <linux/idr.h> -#include <linux/module.h> -#include <linux/mount.h> -#include <linux/pseudo_fs.h> -#include <linux/poll.h> -#include <linux/sched/signal.h> -#include <linux/interrupt.h> -#include <linux/irqdomain.h> -#include <asm/xive.h> -#include <misc/ocxl.h> - -#include <uapi/misc/cxl.h> - -#include "backend.h" -#include "ocxl_hw.h" - -/* - * Pseudo-filesystem to allocate inodes. - */ - -#define OCXLFLASH_FS_MAGIC 0x1697698f - -static int ocxlflash_fs_cnt; -static struct vfsmount *ocxlflash_vfs_mount; - -static int ocxlflash_fs_init_fs_context(struct fs_context *fc) -{ - return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM; -} - -static struct file_system_type ocxlflash_fs_type = { - .name = "ocxlflash", - .owner = THIS_MODULE, - .init_fs_context = ocxlflash_fs_init_fs_context, - .kill_sb = kill_anon_super, -}; - -/* - * ocxlflash_release_mapping() - release the memory mapping - * @ctx: Context whose mapping is to be released. - */ -static void ocxlflash_release_mapping(struct ocxlflash_context *ctx) -{ - if (ctx->mapping) - simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); - ctx->mapping = NULL; -} - -/* - * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file - * @dev: Generic device of the host. - * @name: Name of the pseudo filesystem. - * @fops: File operations. - * @priv: Private data. - * @flags: Flags for the file. - * - * Return: pointer to the file on success, ERR_PTR on failure - */ -static struct file *ocxlflash_getfile(struct device *dev, const char *name, - const struct file_operations *fops, - void *priv, int flags) -{ - struct file *file; - struct inode *inode; - int rc; - - if (fops->owner && !try_module_get(fops->owner)) { - dev_err(dev, "%s: Owner does not exist\n", __func__); - rc = -ENOENT; - goto err1; - } - - rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount, - &ocxlflash_fs_cnt); - if (unlikely(rc < 0)) { - dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n", - __func__, rc); - goto err2; - } - - inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb); - if (IS_ERR(inode)) { - rc = PTR_ERR(inode); - dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n", - __func__, rc); - goto err3; - } - - file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name, - flags & (O_ACCMODE | O_NONBLOCK), fops); - if (IS_ERR(file)) { - rc = PTR_ERR(file); - dev_err(dev, "%s: alloc_file failed rc=%d\n", - __func__, rc); - goto err4; - } - - file->private_data = priv; -out: - return file; -err4: - iput(inode); -err3: - simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); -err2: - module_put(fops->owner); -err1: - file = ERR_PTR(rc); - goto out; -} - -/** - * ocxlflash_psa_map() - map the process specific MMIO space - * @ctx_cookie: Adapter context for which the mapping needs to be done. - * - * Return: MMIO pointer of the mapped region - */ -static void __iomem *ocxlflash_psa_map(void *ctx_cookie) -{ - struct ocxlflash_context *ctx = ctx_cookie; - struct device *dev = ctx->hw_afu->dev; - - mutex_lock(&ctx->state_mutex); - if (ctx->state != STARTED) { - dev_err(dev, "%s: Context not started, state=%d\n", __func__, - ctx->state); - mutex_unlock(&ctx->state_mutex); - return NULL; - } - mutex_unlock(&ctx->state_mutex); - - return ioremap(ctx->psn_phys, ctx->psn_size); -} - -/** - * ocxlflash_psa_unmap() - unmap the process specific MMIO space - * @addr: MMIO pointer to unmap. - */ -static void ocxlflash_psa_unmap(void __iomem *addr) -{ - iounmap(addr); -} - -/** - * ocxlflash_process_element() - get process element of the adapter context - * @ctx_cookie: Adapter context associated with the process element. - * - * Return: process element of the adapter context - */ -static int ocxlflash_process_element(void *ctx_cookie) -{ - struct ocxlflash_context *ctx = ctx_cookie; - - return ctx->pe; -} - -/** - * afu_map_irq() - map the interrupt of the adapter context - * @flags: Flags. - * @ctx: Adapter context. - * @num: Per-context AFU interrupt number. - * @handler: Interrupt handler to register. - * @cookie: Interrupt handler private data. - * @name: Name of the interrupt. - * - * Return: 0 on success, -errno on failure - */ -static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num, - irq_handler_t handler, void *cookie, char *name) -{ - struct ocxl_hw_afu *afu = ctx->hw_afu; - struct device *dev = afu->dev; - struct ocxlflash_irqs *irq; - struct xive_irq_data *xd; - u32 virq; - int rc = 0; - - if (num < 0 || num >= ctx->num_irqs) { - dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); - rc = -ENOENT; - goto out; - } - - irq = &ctx->irqs[num]; - virq = irq_create_mapping(NULL, irq->hwirq); - if (unlikely(!virq)) { - dev_err(dev, "%s: irq_create_mapping failed\n", __func__); - rc = -ENOMEM; - goto out; - } - - rc = request_irq(virq, handler, 0, name, cookie); - if (unlikely(rc)) { - dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc); - goto err1; - } - - xd = irq_get_handler_data(virq); - if (unlikely(!xd)) { - dev_err(dev, "%s: Can't get interrupt data\n", __func__); - rc = -ENXIO; - goto err2; - } - - irq->virq = virq; - irq->vtrig = xd->trig_mmio; -out: - return rc; -err2: - free_irq(virq, cookie); -err1: - irq_dispose_mapping(virq); - goto out; -} - -/** - * ocxlflash_map_afu_irq() - map the interrupt of the adapter context - * @ctx_cookie: Adapter context. - * @num: Per-context AFU interrupt number. - * @handler: Interrupt handler to register. - * @cookie: Interrupt handler private data. - * @name: Name of the interrupt. - * - * Return: 0 on success, -errno on failure - */ -static int ocxlflash_map_afu_irq(void *ctx_cookie, int num, - irq_handler_t handler, void *cookie, - char *name) -{ - return afu_map_irq(0, ctx_cookie, num, handler, cookie, name); -} - -/** - * afu_unmap_irq() - unmap the interrupt - * @flags: Flags. - * @ctx: Adapter context. - * @num: Per-context AFU interrupt number. - * @cookie: Interrupt handler private data. - */ -static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num, - void *cookie) -{ - struct ocxl_hw_afu *afu = ctx->hw_afu; - struct device *dev = afu->dev; - struct ocxlflash_irqs *irq; - - if (num < 0 || num >= ctx->num_irqs) { - dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); - return; - } - - irq = &ctx->irqs[num]; - - if (irq_find_mapping(NULL, irq->hwirq)) { - free_irq(irq->virq, cookie); - irq_dispose_mapping(irq->virq); - } - - memset(irq, 0, sizeof(*irq)); -} - -/** - * ocxlflash_unmap_afu_irq() - unmap the interrupt - * @ctx_cookie: Adapter context. - * @num: Per-context AFU interrupt number. - * @cookie: Interrupt handler private data. - */ -static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie) -{ - return afu_unmap_irq(0, ctx_cookie, num, cookie); -} - -/** - * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt - * @ctx_cookie: Context associated with the interrupt. - * @irq: Interrupt number. - * - * Return: effective address of the mapped region - */ -static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq) -{ - struct ocxlflash_context *ctx = ctx_cookie; - - if (irq < 0 || irq >= ctx->num_irqs) - return 0; - - return (__force u64)ctx->irqs[irq].vtrig; -} - -/** - * ocxlflash_xsl_fault() - callback when translation error is triggered - * @data: Private data provided at callback registration, the context. - * @addr: Address that triggered the error. - * @dsisr: Value of dsisr register. - */ -static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr) -{ - struct ocxlflash_context *ctx = data; - - spin_lock(&ctx->slock); - ctx->fault_addr = addr; - ctx->fault_dsisr = dsisr; - ctx->pending_fault = true; - spin_unlock(&ctx->slock); - - wake_up_all(&ctx->wq); -} - -/** - * start_context() - local routine to start a context - * @ctx: Adapter context to be started. - * - * Assign the context specific MMIO space, add and enable the PE. - * - * Return: 0 on success, -errno on failure - */ -static int start_context(struct ocxlflash_context *ctx) -{ - struct ocxl_hw_afu *afu = ctx->hw_afu; - struct ocxl_afu_config *acfg = &afu->acfg; - void *link_token = afu->link_token; - struct pci_dev *pdev = afu->pdev; - struct device *dev = afu->dev; - bool master = ctx->master; - struct mm_struct *mm; - int rc = 0; - u32 pid; - - mutex_lock(&ctx->state_mutex); - if (ctx->state != OPENED) { - dev_err(dev, "%s: Context state invalid, state=%d\n", - __func__, ctx->state); - rc = -EINVAL; - goto out; - } - - if (master) { - ctx->psn_size = acfg->global_mmio_size; - ctx->psn_phys = afu->gmmio_phys; - } else { - ctx->psn_size = acfg->pp_mmio_stride; - ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size); - } - - /* pid and mm not set for master contexts */ - if (master) { - pid = 0; - mm = NULL; - } else { - pid = current->mm->context.id; - mm = current->mm; - } - - rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, - pci_dev_id(pdev), mm, ocxlflash_xsl_fault, - ctx); - if (unlikely(rc)) { - dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n", - __func__, rc); - goto out; - } - - ctx->state = STARTED; -out: - mutex_unlock(&ctx->state_mutex); - return rc; -} - -/** - * ocxlflash_start_context() - start a kernel context - * @ctx_cookie: Adapter context to be started. - * - * Return: 0 on success, -errno on failure - */ -static int ocxlflash_start_context(void *ctx_cookie) -{ - struct ocxlflash_context *ctx = ctx_cookie; - - return start_context(ctx); -} - -/** - * ocxlflash_stop_context() - stop a context - * @ctx_cookie: Adapter context to be stopped. - * - * Return: 0 on success, -errno on failure - */ -static int ocxlflash_stop_context(void *ctx_cookie) -{ - struct ocxlflash_context *ctx = ctx_cookie; - struct ocxl_hw_afu *afu = ctx->hw_afu; - struct ocxl_afu_config *acfg = &afu->acfg; - struct pci_dev *pdev = afu->pdev; - struct device *dev = afu->dev; - enum ocxlflash_ctx_state state; - int rc = 0; - - mutex_lock(&ctx->state_mutex); - state = ctx->state; - ctx->state = CLOSED; - mutex_unlock(&ctx->state_mutex); - if (state != STARTED) - goto out; - - rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos, - ctx->pe); - if (unlikely(rc)) { - dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n", - __func__, rc); - /* If EBUSY, PE could be referenced in future by the AFU */ - if (rc == -EBUSY) - goto out; - } - - rc = ocxl_link_remove_pe(afu->link_token, ctx->pe); - if (unlikely(rc)) { - dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n", - __func__, rc); - goto out; - } -out: - return rc; -} - -/** - * ocxlflash_afu_reset() - reset the AFU - * @ctx_cookie: Adapter context. - */ -static int ocxlflash_afu_reset(void *ctx_cookie) -{ - struct ocxlflash_context *ctx = ctx_cookie; - struct device *dev = ctx->hw_afu->dev; - - /* Pending implementation from OCXL transport services */ - dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__); - - /* Silently return success until it is implemented */ - return 0; -} - -/** - * ocxlflash_set_master() - sets the context as master - * @ctx_cookie: Adapter context to set as master. - */ -static void ocxlflash_set_master(void *ctx_cookie) -{ - struct ocxlflash_context *ctx = ctx_cookie; - - ctx->master = true; -} - -/** - * ocxlflash_get_context() - obtains the context associated with the host - * @pdev: PCI device associated with the host. - * @afu_cookie: Hardware AFU associated with the host. - * - * Return: returns the pointer to host adapter context - */ -static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie) -{ - struct ocxl_hw_afu *afu = afu_cookie; - - return afu->ocxl_ctx; -} - -/** - * ocxlflash_dev_context_init() - allocate and initialize an adapter context - * @pdev: PCI device associated with the host. - * @afu_cookie: Hardware AFU associated with the host. - * - * Return: returns the adapter context on success, ERR_PTR on failure - */ -static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie) -{ - struct ocxl_hw_afu *afu = afu_cookie; - struct device *dev = afu->dev; - struct ocxlflash_context *ctx; - int rc; - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (unlikely(!ctx)) { - dev_err(dev, "%s: Context allocation failed\n", __func__); - rc = -ENOMEM; - goto err1; - } - - idr_preload(GFP_KERNEL); - rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT); - idr_preload_end(); - if (unlikely(rc < 0)) { - dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc); - goto err2; - } - - spin_lock_init(&ctx->slock); - init_waitqueue_head(&ctx->wq); - mutex_init(&ctx->state_mutex); - - ctx->state = OPENED; - ctx->pe = rc; - ctx->master = false; - ctx->mapping = NULL; - ctx->hw_afu = afu; - ctx->irq_bitmap = 0; - ctx->pending_irq = false; - ctx->pending_fault = false; -out: - return ctx; -err2: - kfree(ctx); -err1: - ctx = ERR_PTR(rc); - goto out; -} - -/** - * ocxlflash_release_context() - releases an adapter context - * @ctx_cookie: Adapter context to be released. - * - * Return: 0 on success, -errno on failure - */ -static int ocxlflash_release_context(void *ctx_cookie) -{ - struct ocxlflash_context *ctx = ctx_cookie; - struct device *dev; - int rc = 0; - - if (!ctx) - goto out; - - dev = ctx->hw_afu->dev; - mutex_lock(&ctx->state_mutex); - if (ctx->state >= STARTED) { - dev_err(dev, "%s: Context in use, state=%d\n", __func__, - ctx->state); - mutex_unlock(&ctx->state_mutex); - rc = -EBUSY; - goto out; - } - mutex_unlock(&ctx->state_mutex); - - idr_remove(&ctx->hw_afu->idr, ctx->pe); - ocxlflash_release_mapping(ctx); - kfree(ctx); -out: - return rc; -} - -/** - * ocxlflash_perst_reloads_same_image() - sets the image reload policy - * @afu_cookie: Hardware AFU associated with the host. - * @image: Whether to load the same image on PERST. - */ -static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image) -{ - struct ocxl_hw_afu *afu = afu_cookie; - - afu->perst_same_image = image; -} - -/** - * ocxlflash_read_adapter_vpd() - reads the adapter VPD - * @pdev: PCI device associated with the host. - * @buf: Buffer to get the VPD data. - * @count: Size of buffer (maximum bytes that can be read). - * - * Return: size of VPD on success, -errno on failure - */ -static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf, - size_t count) -{ - return pci_read_vpd(pdev, 0, count, buf); -} - -/** - * free_afu_irqs() - internal service to free interrupts - * @ctx: Adapter context. - */ -static void free_afu_irqs(struct ocxlflash_context *ctx) -{ - struct ocxl_hw_afu *afu = ctx->hw_afu; - struct device *dev = afu->dev; - int i; - - if (!ctx->irqs) { - dev_err(dev, "%s: Interrupts not allocated\n", __func__); - return; - } - - for (i = ctx->num_irqs; i >= 0; i--) - ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq); - - kfree(ctx->irqs); - ctx->irqs = NULL; -} - -/** - * alloc_afu_irqs() - internal service to allocate interrupts - * @ctx: Context associated with the request. - * @num: Number of interrupts requested. - * - * Return: 0 on success, -errno on failure - */ -static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num) -{ - struct ocxl_hw_afu *afu = ctx->hw_afu; - struct device *dev = afu->dev; - struct ocxlflash_irqs *irqs; - int rc = 0; - int hwirq; - int i; - - if (ctx->irqs) { - dev_err(dev, "%s: Interrupts already allocated\n", __func__); - rc = -EEXIST; - goto out; - } - - if (num > OCXL_MAX_IRQS) { - dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num); - rc = -EINVAL; - goto out; - } - - irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL); - if (unlikely(!irqs)) { - dev_err(dev, "%s: Context irqs allocation failed\n", __func__); - rc = -ENOMEM; - goto out; - } - - for (i = 0; i < num; i++) { - rc = ocxl_link_irq_alloc(afu->link_token, &hwirq); - if (unlikely(rc)) { - dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n", - __func__, rc); - goto err; - } - - irqs[i].hwirq = hwirq; - } - - ctx->irqs = irqs; - ctx->num_irqs = num; -out: - return rc; -err: - for (i = i-1; i >= 0; i--) - ocxl_link_free_irq(afu->link_token, irqs[i].hwirq); - kfree(irqs); - goto out; -} - -/** - * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts - * @ctx_cookie: Context associated with the request. - * @num: Number of interrupts requested. - * - * Return: 0 on success, -errno on failure - */ -static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num) -{ - return alloc_afu_irqs(ctx_cookie, num); -} - -/** - * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context - * @ctx_cookie: Adapter context. - */ -static void ocxlflash_free_afu_irqs(void *ctx_cookie) -{ - free_afu_irqs(ctx_cookie); -} - -/** - * ocxlflash_unconfig_afu() - unconfigure the AFU - * @afu: AFU associated with the host. - */ -static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu) -{ - if (afu->gmmio_virt) { - iounmap(afu->gmmio_virt); - afu->gmmio_virt = NULL; - } -} - -/** - * ocxlflash_destroy_afu() - destroy the AFU structure - * @afu_cookie: AFU to be freed. - */ -static void ocxlflash_destroy_afu(void *afu_cookie) -{ - struct ocxl_hw_afu *afu = afu_cookie; - int pos; - - if (!afu) - return; - - ocxlflash_release_context(afu->ocxl_ctx); - idr_destroy(&afu->idr); - - /* Disable the AFU */ - pos = afu->acfg.dvsec_afu_control_pos; - ocxl_config_set_afu_state(afu->pdev, pos, 0); - - ocxlflash_unconfig_afu(afu); - kfree(afu); -} - -/** - * ocxlflash_config_fn() - configure the host function - * @pdev: PCI device associated with the host. - * @afu: AFU associated with the host. - * - * Return: 0 on success, -errno on failure - */ -static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) -{ - struct ocxl_fn_config *fcfg = &afu->fcfg; - struct device *dev = &pdev->dev; - u16 base, enabled, supported; - int rc = 0; - - /* Read DVSEC config of the function */ - rc = ocxl_config_read_function(pdev, fcfg); - if (unlikely(rc)) { - dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n", - __func__, rc); - goto out; - } - - /* Check if function has AFUs defined, only 1 per function supported */ - if (fcfg->max_afu_index >= 0) { - afu->is_present = true; - if (fcfg->max_afu_index != 0) - dev_warn(dev, "%s: Unexpected AFU index value %d\n", - __func__, fcfg->max_afu_index); - } - - rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported); - if (unlikely(rc)) { - dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n", - __func__, rc); - goto out; - } - - afu->fn_actag_base = base; - afu->fn_actag_enabled = enabled; - - ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled); - dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n", - __func__, base, enabled); - - rc = ocxl_link_setup(pdev, 0, &afu->link_token); - if (unlikely(rc)) { - dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n", - __func__, rc); - goto out; - } - - rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos); - if (unlikely(rc)) { - dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n", - __func__, rc); - goto err; - } -out: - return rc; -err: - ocxl_link_release(pdev, afu->link_token); - goto out; -} - -/** - * ocxlflash_unconfig_fn() - unconfigure the host function - * @pdev: PCI device associated with the host. - * @afu: AFU associated with the host. - */ -static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) -{ - ocxl_link_release(pdev, afu->link_token); -} - -/** - * ocxlflash_map_mmio() - map the AFU MMIO space - * @afu: AFU associated with the host. - * - * Return: 0 on success, -errno on failure - */ -static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu) -{ - struct ocxl_afu_config *acfg = &afu->acfg; - struct pci_dev *pdev = afu->pdev; - struct device *dev = afu->dev; - phys_addr_t gmmio, ppmmio; - int rc = 0; - - rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash"); - if (unlikely(rc)) { - dev_err(dev, "%s: pci_request_region for global failed rc=%d\n", - __func__, rc); - goto out; - } - gmmio = pci_resource_start(pdev, acfg->global_mmio_bar); - gmmio += acfg->global_mmio_offset; - - rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash"); - if (unlikely(rc)) { - dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n", - __func__, rc); - goto err1; - } - ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar); - ppmmio += acfg->pp_mmio_offset; - - afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size); - if (unlikely(!afu->gmmio_virt)) { - dev_err(dev, "%s: MMIO mapping failed\n", __func__); - rc = -ENOMEM; - goto err2; - } - - afu->gmmio_phys = gmmio; - afu->ppmmio_phys = ppmmio; -out: - return rc; -err2: - pci_release_region(pdev, acfg->pp_mmio_bar); -err1: - pci_release_region(pdev, acfg->global_mmio_bar); - goto out; -} - -/** - * ocxlflash_config_afu() - configure the host AFU - * @pdev: PCI device associated with the host. - * @afu: AFU associated with the host. - * - * Must be called _after_ host function configuration. - * - * Return: 0 on success, -errno on failure - */ -static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu) -{ - struct ocxl_afu_config *acfg = &afu->acfg; - struct ocxl_fn_config *fcfg = &afu->fcfg; - struct device *dev = &pdev->dev; - int count; - int base; - int pos; - int rc = 0; - - /* This HW AFU function does not have any AFUs defined */ - if (!afu->is_present) - goto out; - - /* Read AFU config at index 0 */ - rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0); - if (unlikely(rc)) { - dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n", - __func__, rc); - goto out; - } - - /* Only one AFU per function is supported, so actag_base is same */ - base = afu->fn_actag_base; - count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled); - pos = acfg->dvsec_afu_control_pos; - - ocxl_config_set_afu_actag(pdev, pos, base, count); - dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count); - afu->afu_actag_base = base; - afu->afu_actag_enabled = count; - afu->max_pasid = 1 << acfg->pasid_supported_log; - - ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log); - - rc = ocxlflash_map_mmio(afu); - if (unlikely(rc)) { - dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n", - __func__, rc); - goto out; - } - - /* Enable the AFU */ - ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1); -out: - return rc; -} - -/** - * ocxlflash_create_afu() - create the AFU for OCXL - * @pdev: PCI device associated with the host. - * - * Return: AFU on success, NULL on failure - */ -static void *ocxlflash_create_afu(struct pci_dev *pdev) -{ - struct device *dev = &pdev->dev; - struct ocxlflash_context *ctx; - struct ocxl_hw_afu *afu; - int rc; - - afu = kzalloc(sizeof(*afu), GFP_KERNEL); - if (unlikely(!afu)) { - dev_err(dev, "%s: HW AFU allocation failed\n", __func__); - goto out; - } - - afu->pdev = pdev; - afu->dev = dev; - idr_init(&afu->idr); - - rc = ocxlflash_config_fn(pdev, afu); - if (unlikely(rc)) { - dev_err(dev, "%s: Function configuration failed rc=%d\n", - __func__, rc); - goto err1; - } - - rc = ocxlflash_config_afu(pdev, afu); - if (unlikely(rc)) { - dev_err(dev, "%s: AFU configuration failed rc=%d\n", - __func__, rc); - goto err2; - } - - ctx = ocxlflash_dev_context_init(pdev, afu); - if (IS_ERR(ctx)) { - rc = PTR_ERR(ctx); - dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n", - __func__, rc); - goto err3; - } - - afu->ocxl_ctx = ctx; -out: - return afu; -err3: - ocxlflash_unconfig_afu(afu); -err2: - ocxlflash_unconfig_fn(pdev, afu); -err1: - idr_destroy(&afu->idr); - kfree(afu); - afu = NULL; - goto out; -} - -/** - * ctx_event_pending() - check for any event pending on the context - * @ctx: Context to be checked. - * - * Return: true if there is an event pending, false if none pending - */ -static inline bool ctx_event_pending(struct ocxlflash_context *ctx) -{ - if (ctx->pending_irq || ctx->pending_fault) - return true; - - return false; -} - -/** - * afu_poll() - poll the AFU for events on the context - * @file: File associated with the adapter context. - * @poll: Poll structure from the user. - * - * Return: poll mask - */ -static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) -{ - struct ocxlflash_context *ctx = file->private_data; - struct device *dev = ctx->hw_afu->dev; - ulong lock_flags; - int mask = 0; - - poll_wait(file, &ctx->wq, poll); - - spin_lock_irqsave(&ctx->slock, lock_flags); - if (ctx_event_pending(ctx)) - mask |= POLLIN | POLLRDNORM; - else if (ctx->state == CLOSED) - mask |= POLLERR; - spin_unlock_irqrestore(&ctx->slock, lock_flags); - - dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n", - __func__, ctx->pe, mask); - - return mask; -} - -/** - * afu_read() - perform a read on the context for any event - * @file: File associated with the adapter context. - * @buf: Buffer to receive the data. - * @count: Size of buffer (maximum bytes that can be read). - * @off: Offset. - * - * Return: size of the data read on success, -errno on failure - */ -static ssize_t afu_read(struct file *file, char __user *buf, size_t count, - loff_t *off) -{ - struct ocxlflash_context *ctx = file->private_data; - struct device *dev = ctx->hw_afu->dev; - struct cxl_event event; - ulong lock_flags; - ssize_t esize; - ssize_t rc; - int bit; - DEFINE_WAIT(event_wait); - - if (*off != 0) { - dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n", - __func__, *off); - rc = -EINVAL; - goto out; - } - - spin_lock_irqsave(&ctx->slock, lock_flags); - - for (;;) { - prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE); - - if (ctx_event_pending(ctx) || (ctx->state == CLOSED)) - break; - - if (file->f_flags & O_NONBLOCK) { - dev_err(dev, "%s: File cannot be blocked on I/O\n", - __func__); - rc = -EAGAIN; - goto err; - } - - if (signal_pending(current)) { - dev_err(dev, "%s: Signal pending on the process\n", - __func__); - rc = -ERESTARTSYS; - goto err; - } - - spin_unlock_irqrestore(&ctx->slock, lock_flags); - schedule(); - spin_lock_irqsave(&ctx->slock, lock_flags); - } - - finish_wait(&ctx->wq, &event_wait); - - memset(&event, 0, sizeof(event)); - event.header.process_element = ctx->pe; - event.header.size = sizeof(struct cxl_event_header); - if (ctx->pending_irq) { - esize = sizeof(struct cxl_event_afu_interrupt); - event.header.size += esize; - event.header.type = CXL_EVENT_AFU_INTERRUPT; - - bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs); - clear_bit(bit, &ctx->irq_bitmap); - event.irq.irq = bit + 1; - if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs)) - ctx->pending_irq = false; - } else if (ctx->pending_fault) { - event.header.size += sizeof(struct cxl_event_data_storage); - event.header.type = CXL_EVENT_DATA_STORAGE; - event.fault.addr = ctx->fault_addr; - event.fault.dsisr = ctx->fault_dsisr; - ctx->pending_fault = false; - } - - spin_unlock_irqrestore(&ctx->slock, lock_flags); - - if (copy_to_user(buf, &event, event.header.size)) { - dev_err(dev, "%s: copy_to_user failed\n", __func__); - rc = -EFAULT; - goto out; - } - - rc = event.header.size; -out: - return rc; -err: - finish_wait(&ctx->wq, &event_wait); - spin_unlock_irqrestore(&ctx->slock, lock_flags); - goto out; -} - -/** - * afu_release() - release and free the context - * @inode: File inode pointer. - * @file: File associated with the context. - * - * Return: 0 on success, -errno on failure - */ -static int afu_release(struct inode *inode, struct file *file) -{ - struct ocxlflash_context *ctx = file->private_data; - int i; - - /* Unmap and free the interrupts associated with the context */ - for (i = ctx->num_irqs; i >= 0; i--) - afu_unmap_irq(0, ctx, i, ctx); - free_afu_irqs(ctx); - - return ocxlflash_release_context(ctx); -} - -/** - * ocxlflash_mmap_fault() - mmap fault handler - * @vmf: VM fault associated with current fault. - * - * Return: 0 on success, -errno on failure - */ -static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct ocxlflash_context *ctx = vma->vm_file->private_data; - struct device *dev = ctx->hw_afu->dev; - u64 mmio_area, offset; - - offset = vmf->pgoff << PAGE_SHIFT; - if (offset >= ctx->psn_size) - return VM_FAULT_SIGBUS; - - mutex_lock(&ctx->state_mutex); - if (ctx->state != STARTED) { - dev_err(dev, "%s: Context not started, state=%d\n", - __func__, ctx->state); - mutex_unlock(&ctx->state_mutex); - return VM_FAULT_SIGBUS; - } - mutex_unlock(&ctx->state_mutex); - - mmio_area = ctx->psn_phys; - mmio_area += offset; - - return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT); -} - -static const struct vm_operations_struct ocxlflash_vmops = { - .fault = ocxlflash_mmap_fault, -}; - -/** - * afu_mmap() - map the fault handler operations - * @file: File associated with the context. - * @vma: VM area associated with mapping. - * - * Return: 0 on success, -errno on failure - */ -static int afu_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct ocxlflash_context *ctx = file->private_data; - - if ((vma_pages(vma) + vma->vm_pgoff) > - (ctx->psn_size >> PAGE_SHIFT)) - return -EINVAL; - - vm_flags_set(vma, VM_IO | VM_PFNMAP); - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vma->vm_ops = &ocxlflash_vmops; - return 0; -} - -static const struct file_operations ocxl_afu_fops = { - .owner = THIS_MODULE, - .poll = afu_poll, - .read = afu_read, - .release = afu_release, - .mmap = afu_mmap, -}; - -#define PATCH_FOPS(NAME) \ - do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0) - -/** - * ocxlflash_get_fd() - get file descriptor for an adapter context - * @ctx_cookie: Adapter context. - * @fops: File operations to be associated. - * @fd: File descriptor to be returned back. - * - * Return: pointer to the file on success, ERR_PTR on failure - */ -static struct file *ocxlflash_get_fd(void *ctx_cookie, - struct file_operations *fops, int *fd) -{ - struct ocxlflash_context *ctx = ctx_cookie; - struct device *dev = ctx->hw_afu->dev; - struct file *file; - int flags, fdtmp; - int rc = 0; - char *name = NULL; - - /* Only allow one fd per context */ - if (ctx->mapping) { - dev_err(dev, "%s: Context is already mapped to an fd\n", - __func__); - rc = -EEXIST; - goto err1; - } - - flags = O_RDWR | O_CLOEXEC; - - /* This code is similar to anon_inode_getfd() */ - rc = get_unused_fd_flags(flags); - if (unlikely(rc < 0)) { - dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n", - __func__, rc); - goto err1; - } - fdtmp = rc; - - /* Patch the file ops that are not defined */ - if (fops) { - PATCH_FOPS(poll); - PATCH_FOPS(read); - PATCH_FOPS(release); - PATCH_FOPS(mmap); - } else /* Use default ops */ - fops = (struct file_operations *)&ocxl_afu_fops; - - name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe); - file = ocxlflash_getfile(dev, name, fops, ctx, flags); - kfree(name); - if (IS_ERR(file)) { - rc = PTR_ERR(file); - dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n", - __func__, rc); - goto err2; - } - - ctx->mapping = file->f_mapping; - *fd = fdtmp; -out: - return file; -err2: - put_unused_fd(fdtmp); -err1: - file = ERR_PTR(rc); - goto out; -} - -/** - * ocxlflash_fops_get_context() - get the context associated with the file - * @file: File associated with the adapter context. - * - * Return: pointer to the context - */ -static void *ocxlflash_fops_get_context(struct file *file) -{ - return file->private_data; -} - -/** - * ocxlflash_afu_irq() - interrupt handler for user contexts - * @irq: Interrupt number. - * @data: Private data provided at interrupt registration, the context. - * - * Return: Always return IRQ_HANDLED. - */ -static irqreturn_t ocxlflash_afu_irq(int irq, void *data) -{ - struct ocxlflash_context *ctx = data; - struct device *dev = ctx->hw_afu->dev; - int i; - - dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n", - __func__, ctx->pe, irq); - - for (i = 0; i < ctx->num_irqs; i++) { - if (ctx->irqs[i].virq == irq) - break; - } - if (unlikely(i >= ctx->num_irqs)) { - dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__); - goto out; - } - - spin_lock(&ctx->slock); - set_bit(i - 1, &ctx->irq_bitmap); - ctx->pending_irq = true; - spin_unlock(&ctx->slock); - - wake_up_all(&ctx->wq); -out: - return IRQ_HANDLED; -} - -/** - * ocxlflash_start_work() - start a user context - * @ctx_cookie: Context to be started. - * @num_irqs: Number of interrupts requested. - * - * Return: 0 on success, -errno on failure - */ -static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs) -{ - struct ocxlflash_context *ctx = ctx_cookie; - struct ocxl_hw_afu *afu = ctx->hw_afu; - struct device *dev = afu->dev; - char *name; - int rc = 0; - int i; - - rc = alloc_afu_irqs(ctx, num_irqs); - if (unlikely(rc < 0)) { - dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc); - goto out; - } - - for (i = 0; i < num_irqs; i++) { - name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i", - dev_name(dev), ctx->pe, i); - rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name); - kfree(name); - if (unlikely(rc < 0)) { - dev_err(dev, "%s: afu_map_irq failed rc=%d\n", - __func__, rc); - goto err; - } - } - - rc = start_context(ctx); - if (unlikely(rc)) { - dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc); - goto err; - } -out: - return rc; -err: - for (i = i-1; i >= 0; i--) - afu_unmap_irq(0, ctx, i, ctx); - free_afu_irqs(ctx); - goto out; -}; - -/** - * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor - * @file: File installed with adapter file descriptor. - * @vma: VM area associated with mapping. - * - * Return: 0 on success, -errno on failure - */ -static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma) -{ - return afu_mmap(file, vma); -} - -/** - * ocxlflash_fd_release() - release the context associated with the file - * @inode: File inode pointer. - * @file: File associated with the adapter context. - * - * Return: 0 on success, -errno on failure - */ -static int ocxlflash_fd_release(struct inode *inode, struct file *file) -{ - return afu_release(inode, file); -} - -/* Backend ops to ocxlflash services */ -const struct cxlflash_backend_ops cxlflash_ocxl_ops = { - .module = THIS_MODULE, - .psa_map = ocxlflash_psa_map, - .psa_unmap = ocxlflash_psa_unmap, - .process_element = ocxlflash_process_element, - .map_afu_irq = ocxlflash_map_afu_irq, - .unmap_afu_irq = ocxlflash_unmap_afu_irq, - .get_irq_objhndl = ocxlflash_get_irq_objhndl, - .start_context = ocxlflash_start_context, - .stop_context = ocxlflash_stop_context, - .afu_reset = ocxlflash_afu_reset, - .set_master = ocxlflash_set_master, - .get_context = ocxlflash_get_context, - .dev_context_init = ocxlflash_dev_context_init, - .release_context = ocxlflash_release_context, - .perst_reloads_same_image = ocxlflash_perst_reloads_same_image, - .read_adapter_vpd = ocxlflash_read_adapter_vpd, - .allocate_afu_irqs = ocxlflash_allocate_afu_irqs, - .free_afu_irqs = ocxlflash_free_afu_irqs, - .create_afu = ocxlflash_create_afu, - .destroy_afu = ocxlflash_destroy_afu, - .get_fd = ocxlflash_get_fd, - .fops_get_context = ocxlflash_fops_get_context, - .start_work = ocxlflash_start_work, - .fd_mmap = ocxlflash_fd_mmap, - .fd_release = ocxlflash_fd_release, -}; diff --git a/drivers/scsi/cxlflash/ocxl_hw.h b/drivers/scsi/cxlflash/ocxl_hw.h deleted file mode 100644 index f2fe88816bea..000000000000 --- a/drivers/scsi/cxlflash/ocxl_hw.h +++ /dev/null @@ -1,72 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * CXL Flash Device Driver - * - * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2018 IBM Corporation - */ - -#define OCXL_MAX_IRQS 4 /* Max interrupts per process */ - -struct ocxlflash_irqs { - int hwirq; - u32 virq; - void __iomem *vtrig; -}; - -/* OCXL hardware AFU associated with the host */ -struct ocxl_hw_afu { - struct ocxlflash_context *ocxl_ctx; /* Host context */ - struct pci_dev *pdev; /* PCI device */ - struct device *dev; /* Generic device */ - bool perst_same_image; /* Same image loaded on perst */ - - struct ocxl_fn_config fcfg; /* DVSEC config of the function */ - struct ocxl_afu_config acfg; /* AFU configuration data */ - - int fn_actag_base; /* Function acTag base */ - int fn_actag_enabled; /* Function acTag number enabled */ - int afu_actag_base; /* AFU acTag base */ - int afu_actag_enabled; /* AFU acTag number enabled */ - - phys_addr_t ppmmio_phys; /* Per process MMIO space */ - phys_addr_t gmmio_phys; /* Global AFU MMIO space */ - void __iomem *gmmio_virt; /* Global MMIO map */ - - void *link_token; /* Link token for the SPA */ - struct idr idr; /* IDR to manage contexts */ - int max_pasid; /* Maximum number of contexts */ - bool is_present; /* Function has AFUs defined */ -}; - -enum ocxlflash_ctx_state { - CLOSED, - OPENED, - STARTED -}; - -struct ocxlflash_context { - struct ocxl_hw_afu *hw_afu; /* HW AFU back pointer */ - struct address_space *mapping; /* Mapping for pseudo filesystem */ - bool master; /* Whether this is a master context */ - int pe; /* Process element */ - - phys_addr_t psn_phys; /* Process mapping */ - u64 psn_size; /* Process mapping size */ - - spinlock_t slock; /* Protects irq/fault/event updates */ - wait_queue_head_t wq; /* Wait queue for poll and interrupts */ - struct mutex state_mutex; /* Mutex to update context state */ - enum ocxlflash_ctx_state state; /* Context state */ - - struct ocxlflash_irqs *irqs; /* Pointer to array of structures */ - int num_irqs; /* Number of interrupts */ - bool pending_irq; /* Pending interrupt on the context */ - ulong irq_bitmap; /* Bits indicating pending irq num */ - - u64 fault_addr; /* Address that triggered the fault */ - u64 fault_dsisr; /* Value of dsisr register at fault */ - bool pending_fault; /* Pending translation fault */ -}; diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h deleted file mode 100644 index ab315c59505b..000000000000 --- a/drivers/scsi/cxlflash/sislite.h +++ /dev/null @@ -1,560 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * CXL Flash Device Driver - * - * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation - * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2015 IBM Corporation - */ - -#ifndef _SISLITE_H -#define _SISLITE_H - -#include <linux/types.h> - -typedef u16 ctx_hndl_t; -typedef u32 res_hndl_t; - -#define SIZE_4K 4096 -#define SIZE_64K 65536 - -/* - * IOARCB: 64 bytes, min 16 byte alignment required, host native endianness - * except for SCSI CDB which remains big endian per SCSI standards. - */ -struct sisl_ioarcb { - u16 ctx_id; /* ctx_hndl_t */ - u16 req_flags; -#define SISL_REQ_FLAGS_RES_HNDL 0x8000U /* bit 0 (MSB) */ -#define SISL_REQ_FLAGS_PORT_LUN_ID 0x0000U - -#define SISL_REQ_FLAGS_SUP_UNDERRUN 0x4000U /* bit 1 */ - -#define SISL_REQ_FLAGS_TIMEOUT_SECS 0x0000U /* bits 8,9 */ -#define SISL_REQ_FLAGS_TIMEOUT_MSECS 0x0040U -#define SISL_REQ_FLAGS_TIMEOUT_USECS 0x0080U -#define SISL_REQ_FLAGS_TIMEOUT_CYCLES 0x00C0U - -#define SISL_REQ_FLAGS_TMF_CMD 0x0004u /* bit 13 */ - -#define SISL_REQ_FLAGS_AFU_CMD 0x0002U /* bit 14 */ - -#define SISL_REQ_FLAGS_HOST_WRITE 0x0001U /* bit 15 (LSB) */ -#define SISL_REQ_FLAGS_HOST_READ 0x0000U - - union { - u32 res_hndl; /* res_hndl_t */ - u32 port_sel; /* this is a selection mask: - * 0x1 -> port#0 can be selected, - * 0x2 -> port#1 can be selected. - * Can be bitwise ORed. - */ - }; - u64 lun_id; - u32 data_len; /* 4K for read/write */ - u32 ioadl_len; - union { - u64 data_ea; /* min 16 byte aligned */ - u64 ioadl_ea; - }; - u8 msi; /* LISN to send on RRQ write */ -#define SISL_MSI_CXL_PFAULT 0 /* reserved for CXL page faults */ -#define SISL_MSI_SYNC_ERROR 1 /* recommended for AFU sync error */ -#define SISL_MSI_RRQ_UPDATED 2 /* recommended for IO completion */ -#define SISL_MSI_ASYNC_ERROR 3 /* master only - for AFU async error */ - - u8 rrq; /* 0 for a single RRQ */ - u16 timeout; /* in units specified by req_flags */ - u32 rsvd1; - u8 cdb[16]; /* must be in big endian */ -#define SISL_AFU_CMD_SYNC 0xC0 /* AFU sync command */ -#define SISL_AFU_CMD_LUN_PROVISION 0xD0 /* AFU LUN provision command */ -#define SISL_AFU_CMD_DEBUG 0xE0 /* AFU debug command */ - -#define SISL_AFU_LUN_PROVISION_CREATE 0x00 /* LUN provision create type */ -#define SISL_AFU_LUN_PROVISION_DELETE 0x01 /* LUN provision delete type */ - - union { - u64 reserved; /* Reserved for IOARRIN mode */ - struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */ - }; -} __packed; - -struct sisl_rc { - u8 flags; -#define SISL_RC_FLAGS_SENSE_VALID 0x80U -#define SISL_RC_FLAGS_FCP_RSP_CODE_VALID 0x40U -#define SISL_RC_FLAGS_OVERRUN 0x20U -#define SISL_RC_FLAGS_UNDERRUN 0x10U - - u8 afu_rc; -#define SISL_AFU_RC_RHT_INVALID 0x01U /* user error */ -#define SISL_AFU_RC_RHT_UNALIGNED 0x02U /* should never happen */ -#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */ -#define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra - * may retry if afu_retry is off - * possible on master exit - */ -#define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */ -#define SISL_AFU_RC_LXT_UNALIGNED 0x12U /* should never happen */ -#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */ -#define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra - * may retry if afu_retry is off - * possible on master exit - */ -#define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */ - -#define SISL_AFU_RC_NOT_XLATE_HOST 0x1au /* possible if master exited */ - - /* NO_CHANNELS means the FC ports selected by dest_port in - * IOARCB or in the LXT entry are down when the AFU tried to select - * a FC port. If the port went down on an active IO, it will set - * fc_rc to =0x54(NOLOGI) or 0x57(LINKDOWN) instead. - */ -#define SISL_AFU_RC_NO_CHANNELS 0x20U /* see afu_extra, may retry */ -#define SISL_AFU_RC_CAP_VIOLATION 0x21U /* either user error or - * afu reset/master restart - */ -#define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30U /* always retry */ -#define SISL_AFU_RC_DATA_DMA_ERR 0x31U /* see afu_extra - * may retry if afu_retry is off - */ - - u8 scsi_rc; /* SCSI status byte, retry as appropriate */ -#define SISL_SCSI_RC_CHECK 0x02U -#define SISL_SCSI_RC_BUSY 0x08u - - u8 fc_rc; /* retry */ - /* - * We should only see fc_rc=0x57 (LINKDOWN) or 0x54(NOLOGI) for - * commands that are in flight when a link goes down or is logged out. - * If the link is down or logged out before AFU selects the port, either - * it will choose the other port or we will get afu_rc=0x20 (no_channel) - * if there is no valid port to use. - * - * ABORTPEND/ABORTOK/ABORTFAIL/TGTABORT can be retried, typically these - * would happen if a frame is dropped and something times out. - * NOLOGI or LINKDOWN can be retried if the other port is up. - * RESIDERR can be retried as well. - * - * ABORTFAIL might indicate that lots of frames are getting CRC errors. - * So it maybe retried once and reset the link if it happens again. - * The link can also be reset on the CRC error threshold interrupt. - */ -#define SISL_FC_RC_ABORTPEND 0x52 /* exchange timeout or abort request */ -#define SISL_FC_RC_WRABORTPEND 0x53 /* due to write XFER_RDY invalid */ -#define SISL_FC_RC_NOLOGI 0x54 /* port not logged in, in-flight cmds */ -#define SISL_FC_RC_NOEXP 0x55 /* FC protocol error or HW bug */ -#define SISL_FC_RC_INUSE 0x56 /* tag already in use, HW bug */ -#define SISL_FC_RC_LINKDOWN 0x57 /* link down, in-flight cmds */ -#define SISL_FC_RC_ABORTOK 0x58 /* pending abort completed w/success */ -#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */ -#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */ -#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI - * reported len, possibly due to dropped - * frames - */ -#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */ -}; - -#define SISL_SENSE_DATA_LEN 20 /* Sense data length */ -#define SISL_WWID_DATA_LEN 16 /* WWID data length */ - -/* - * IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required, - * host native endianness - */ -struct sisl_ioasa { - union { - struct sisl_rc rc; - u32 ioasc; -#define SISL_IOASC_GOOD_COMPLETION 0x00000000U - }; - - union { - u32 resid; - u32 lunid_hi; - }; - - u8 port; - u8 afu_extra; - /* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR): - * afu_exta contains PSL response code. Useful codes are: - */ -#define SISL_AFU_DMA_ERR_PAGE_IN 0x0A /* AFU_retry_on_pagein Action - * Enabled N/A - * Disabled retry - */ -#define SISL_AFU_DMA_ERR_INVALID_EA 0x0B /* this is a hard error - * afu_rc Implies - * 0x04, 0x14 master exit. - * 0x31 user error. - */ - /* when afu rc=0x20 (no channels): - * afu_extra bits [4:5]: available portmask, [6:7]: requested portmask. - */ -#define SISL_AFU_NO_CLANNELS_AMASK(afu_extra) (((afu_extra) & 0x0C) >> 2) -#define SISL_AFU_NO_CLANNELS_RMASK(afu_extra) ((afu_extra) & 0x03) - - u8 scsi_extra; - u8 fc_extra; - - union { - u8 sense_data[SISL_SENSE_DATA_LEN]; - struct { - u32 lunid_lo; - u8 wwid[SISL_WWID_DATA_LEN]; - }; - }; - - /* These fields are defined by the SISlite architecture for the - * host to use as they see fit for their implementation. - */ - union { - u64 host_use[4]; - u8 host_use_b[32]; - }; -} __packed; - -#define SISL_RESP_HANDLE_T_BIT 0x1ULL /* Toggle bit */ - -/* MMIO space is required to support only 64-bit access */ - -/* - * This AFU has two mechanisms to deal with endian-ness. - * One is a global configuration (in the afu_config) register - * below that specifies the endian-ness of the host. - * The other is a per context (i.e. application) specification - * controlled by the endian_ctrl field here. Since the master - * context is one such application the master context's - * endian-ness is set to be the same as the host. - * - * As per the SISlite spec, the MMIO registers are always - * big endian. - */ -#define SISL_ENDIAN_CTRL_BE 0x8000000000000080ULL -#define SISL_ENDIAN_CTRL_LE 0x0000000000000000ULL - -#ifdef __BIG_ENDIAN -#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_BE -#else -#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_LE -#endif - -/* per context host transport MMIO */ -struct sisl_host_map { - __be64 endian_ctrl; /* Per context Endian Control. The AFU will - * operate on whatever the context is of the - * host application. - */ - - __be64 intr_status; /* this sends LISN# programmed in ctx_ctrl. - * Only recovery in a PERM_ERR is a context - * exit since there is no way to tell which - * command caused the error. - */ -#define SISL_ISTATUS_PERM_ERR_LISN_3_EA 0x0400ULL /* b53, user error */ -#define SISL_ISTATUS_PERM_ERR_LISN_2_EA 0x0200ULL /* b54, user error */ -#define SISL_ISTATUS_PERM_ERR_LISN_1_EA 0x0100ULL /* b55, user error */ -#define SISL_ISTATUS_PERM_ERR_LISN_3_PASID 0x0080ULL /* b56, user error */ -#define SISL_ISTATUS_PERM_ERR_LISN_2_PASID 0x0040ULL /* b57, user error */ -#define SISL_ISTATUS_PERM_ERR_LISN_1_PASID 0x0020ULL /* b58, user error */ -#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */ -#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */ -#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */ -#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */ - /* Page in wait accessing RCB/IOASA/RRQ is reported in b63. - * Same error in data/LXT/RHT access is reported via IOASA. - */ -#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can only be - * generated when AFU - * auto retry is - * disabled. If user - * can determine the - * command that caused - * the error, it can - * be retried. - */ -#define SISL_ISTATUS_UNMASK (0x07FFULL) /* 1 means unmasked */ -#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */ - - __be64 intr_clear; - __be64 intr_mask; - __be64 ioarrin; /* only write what cmd_room permits */ - __be64 rrq_start; /* start & end are both inclusive */ - __be64 rrq_end; /* write sequence: start followed by end */ - __be64 cmd_room; - __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */ -#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */ -#define SISL_CTX_CTRL_LISN_MASK (0xFFULL) - __be64 mbox_w; /* restricted use */ - __be64 sq_start; /* Submission Queue (R/W): write sequence and */ - __be64 sq_end; /* inclusion semantics are the same as RRQ */ - __be64 sq_head; /* Submission Queue Head (R): for debugging */ - __be64 sq_tail; /* Submission Queue TAIL (R/W): next IOARCB */ - __be64 sq_ctx_reset; /* Submission Queue Context Reset (R/W) */ -}; - -/* per context provisioning & control MMIO */ -struct sisl_ctrl_map { - __be64 rht_start; - __be64 rht_cnt_id; - /* both cnt & ctx_id args must be ULL */ -#define SISL_RHT_CNT_ID(cnt, ctx_id) (((cnt) << 48) | ((ctx_id) << 32)) - - __be64 ctx_cap; /* afu_rc below is when the capability is violated */ -#define SISL_CTX_CAP_PROXY_ISSUE 0x8000000000000000ULL /* afu_rc 0x21 */ -#define SISL_CTX_CAP_REAL_MODE 0x4000000000000000ULL /* afu_rc 0x21 */ -#define SISL_CTX_CAP_HOST_XLATE 0x2000000000000000ULL /* afu_rc 0x1a */ -#define SISL_CTX_CAP_PROXY_TARGET 0x1000000000000000ULL /* afu_rc 0x21 */ -#define SISL_CTX_CAP_AFU_CMD 0x0000000000000008ULL /* afu_rc 0x21 */ -#define SISL_CTX_CAP_GSCSI_CMD 0x0000000000000004ULL /* afu_rc 0x21 */ -#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */ -#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */ - __be64 mbox_r; - __be64 lisn_pasid[2]; - /* pasid _a arg must be ULL */ -#define SISL_LISN_PASID(_a, _b) (((_a) << 32) | (_b)) - __be64 lisn_ea[3]; -}; - -/* single copy global regs */ -struct sisl_global_regs { - __be64 aintr_status; - /* - * In cxlflash, FC port/link are arranged in port pairs, each - * gets a byte of status: - * - * *_OTHER: other err, FC_ERRCAP[31:20] - * *_LOGO: target sent FLOGI/PLOGI/LOGO while logged in - * *_CRC_T: CRC threshold exceeded - * *_LOGI_R: login state machine timed out and retrying - * *_LOGI_F: login failed, FC_ERROR[19:0] - * *_LOGI_S: login succeeded - * *_LINK_DN: link online to offline - * *_LINK_UP: link offline to online - */ -#define SISL_ASTATUS_FC2_OTHER 0x80000000ULL /* b32 */ -#define SISL_ASTATUS_FC2_LOGO 0x40000000ULL /* b33 */ -#define SISL_ASTATUS_FC2_CRC_T 0x20000000ULL /* b34 */ -#define SISL_ASTATUS_FC2_LOGI_R 0x10000000ULL /* b35 */ -#define SISL_ASTATUS_FC2_LOGI_F 0x08000000ULL /* b36 */ -#define SISL_ASTATUS_FC2_LOGI_S 0x04000000ULL /* b37 */ -#define SISL_ASTATUS_FC2_LINK_DN 0x02000000ULL /* b38 */ -#define SISL_ASTATUS_FC2_LINK_UP 0x01000000ULL /* b39 */ - -#define SISL_ASTATUS_FC3_OTHER 0x00800000ULL /* b40 */ -#define SISL_ASTATUS_FC3_LOGO 0x00400000ULL /* b41 */ -#define SISL_ASTATUS_FC3_CRC_T 0x00200000ULL /* b42 */ -#define SISL_ASTATUS_FC3_LOGI_R 0x00100000ULL /* b43 */ -#define SISL_ASTATUS_FC3_LOGI_F 0x00080000ULL /* b44 */ -#define SISL_ASTATUS_FC3_LOGI_S 0x00040000ULL /* b45 */ -#define SISL_ASTATUS_FC3_LINK_DN 0x00020000ULL /* b46 */ -#define SISL_ASTATUS_FC3_LINK_UP 0x00010000ULL /* b47 */ - -#define SISL_ASTATUS_FC0_OTHER 0x00008000ULL /* b48 */ -#define SISL_ASTATUS_FC0_LOGO 0x00004000ULL /* b49 */ -#define SISL_ASTATUS_FC0_CRC_T 0x00002000ULL /* b50 */ -#define SISL_ASTATUS_FC0_LOGI_R 0x00001000ULL /* b51 */ -#define SISL_ASTATUS_FC0_LOGI_F 0x00000800ULL /* b52 */ -#define SISL_ASTATUS_FC0_LOGI_S 0x00000400ULL /* b53 */ -#define SISL_ASTATUS_FC0_LINK_DN 0x00000200ULL /* b54 */ -#define SISL_ASTATUS_FC0_LINK_UP 0x00000100ULL /* b55 */ - -#define SISL_ASTATUS_FC1_OTHER 0x00000080ULL /* b56 */ -#define SISL_ASTATUS_FC1_LOGO 0x00000040ULL /* b57 */ -#define SISL_ASTATUS_FC1_CRC_T 0x00000020ULL /* b58 */ -#define SISL_ASTATUS_FC1_LOGI_R 0x00000010ULL /* b59 */ -#define SISL_ASTATUS_FC1_LOGI_F 0x00000008ULL /* b60 */ -#define SISL_ASTATUS_FC1_LOGI_S 0x00000004ULL /* b61 */ -#define SISL_ASTATUS_FC1_LINK_DN 0x00000002ULL /* b62 */ -#define SISL_ASTATUS_FC1_LINK_UP 0x00000001ULL /* b63 */ - -#define SISL_FC_INTERNAL_UNMASK 0x0000000300000000ULL /* 1 means unmasked */ -#define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK) -#define SISL_FC_INTERNAL_SHIFT 32 - -#define SISL_FC_SHUTDOWN_NORMAL 0x0000000000000010ULL -#define SISL_FC_SHUTDOWN_ABRUPT 0x0000000000000020ULL - -#define SISL_STATUS_SHUTDOWN_ACTIVE 0x0000000000000010ULL -#define SISL_STATUS_SHUTDOWN_COMPLETE 0x0000000000000020ULL - -#define SISL_ASTATUS_UNMASK 0xFFFFFFFFULL /* 1 means unmasked */ -#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */ - - __be64 aintr_clear; - __be64 aintr_mask; - __be64 afu_ctrl; - __be64 afu_hb; - __be64 afu_scratch_pad; - __be64 afu_port_sel; -#define SISL_AFUCONF_AR_IOARCB 0x4000ULL -#define SISL_AFUCONF_AR_LXT 0x2000ULL -#define SISL_AFUCONF_AR_RHT 0x1000ULL -#define SISL_AFUCONF_AR_DATA 0x0800ULL -#define SISL_AFUCONF_AR_RSRC 0x0400ULL -#define SISL_AFUCONF_AR_IOASA 0x0200ULL -#define SISL_AFUCONF_AR_RRQ 0x0100ULL -/* Aggregate all Auto Retry Bits */ -#define SISL_AFUCONF_AR_ALL (SISL_AFUCONF_AR_IOARCB|SISL_AFUCONF_AR_LXT| \ - SISL_AFUCONF_AR_RHT|SISL_AFUCONF_AR_DATA| \ - SISL_AFUCONF_AR_RSRC|SISL_AFUCONF_AR_IOASA| \ - SISL_AFUCONF_AR_RRQ) -#ifdef __BIG_ENDIAN -#define SISL_AFUCONF_ENDIAN 0x0000ULL -#else -#define SISL_AFUCONF_ENDIAN 0x0020ULL -#endif -#define SISL_AFUCONF_MBOX_CLR_READ 0x0010ULL - __be64 afu_config; - __be64 rsvd[0xf8]; - __le64 afu_version; - __be64 interface_version; -#define SISL_INTVER_CAP_SHIFT 16 -#define SISL_INTVER_MAJ_SHIFT 8 -#define SISL_INTVER_CAP_MASK 0xFFFFFFFF00000000ULL -#define SISL_INTVER_MAJ_MASK 0x00000000FFFF0000ULL -#define SISL_INTVER_MIN_MASK 0x000000000000FFFFULL -#define SISL_INTVER_CAP_IOARRIN_CMD_MODE 0x800000000000ULL -#define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL -#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL -#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL -#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL -#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL -#define SISL_INTVER_CAP_OCXL_LISN 0x020000000000ULL -}; - -#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */ -#define CXLFLASH_MAX_FC_BANKS 2 /* max # of banks supported */ -#define CXLFLASH_MAX_FC_PORTS (CXLFLASH_NUM_FC_PORTS_PER_BANK * \ - CXLFLASH_MAX_FC_BANKS) -#define CXLFLASH_MAX_CONTEXT 512 /* number of contexts per AFU */ -#define CXLFLASH_NUM_VLUNS 512 /* number of vluns per AFU/port */ -#define CXLFLASH_NUM_REGS 512 /* number of registers per port */ - -struct fc_port_bank { - __be64 fc_port_regs[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_REGS]; - __be64 fc_port_luns[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_VLUNS]; -}; - -struct sisl_global_map { - union { - struct sisl_global_regs regs; - char page0[SIZE_4K]; /* page 0 */ - }; - - char page1[SIZE_4K]; /* page 1 */ - - struct fc_port_bank bank[CXLFLASH_MAX_FC_BANKS]; /* pages 2 - 9 */ - - /* pages 10 - 15 are reserved */ - -}; - -/* - * CXL Flash Memory Map - * - * +-------------------------------+ - * | 512 * 64 KB User MMIO | - * | (per context) | - * | User Accessible | - * +-------------------------------+ - * | 512 * 128 B per context | - * | Provisioning and Control | - * | Trusted Process accessible | - * +-------------------------------+ - * | 64 KB Global | - * | Trusted Process accessible | - * +-------------------------------+ - */ -struct cxlflash_afu_map { - union { - struct sisl_host_map host; - char harea[SIZE_64K]; /* 64KB each */ - } hosts[CXLFLASH_MAX_CONTEXT]; - - union { - struct sisl_ctrl_map ctrl; - char carea[cache_line_size()]; /* 128B each */ - } ctrls[CXLFLASH_MAX_CONTEXT]; - - union { - struct sisl_global_map global; - char garea[SIZE_64K]; /* 64KB single block */ - }; -}; - -/* - * LXT - LBA Translation Table - * LXT control blocks - */ -struct sisl_lxt_entry { - u64 rlba_base; /* bits 0:47 is base - * b48:55 is lun index - * b58:59 is write & read perms - * (if no perm, afu_rc=0x15) - * b60:63 is port_sel mask - */ -}; - -/* - * RHT - Resource Handle Table - * Per the SISlite spec, RHT entries are to be 16-byte aligned - */ -struct sisl_rht_entry { - struct sisl_lxt_entry *lxt_start; - u32 lxt_cnt; - u16 rsvd; - u8 fp; /* format & perm nibbles. - * (if no perm, afu_rc=0x05) - */ - u8 nmask; -} __packed __aligned(16); - -struct sisl_rht_entry_f1 { - u64 lun_id; - union { - struct { - u8 valid; - u8 rsvd[5]; - u8 fp; - u8 port_sel; - }; - - u64 dw; - }; -} __packed __aligned(16); - -/* make the fp byte */ -#define SISL_RHT_FP(fmt, perm) (((fmt) << 4) | (perm)) - -/* make the fp byte for a clone from a source fp and clone flags - * flags must be only 2 LSB bits. - */ -#define SISL_RHT_FP_CLONE(src_fp, cln_flags) ((src_fp) & (0xFC | (cln_flags))) - -#define RHT_PERM_READ 0x01U -#define RHT_PERM_WRITE 0x02U -#define RHT_PERM_RW (RHT_PERM_READ | RHT_PERM_WRITE) - -/* extract the perm bits from a fp */ -#define SISL_RHT_PERM(fp) ((fp) & RHT_PERM_RW) - -#define PORT0 0x01U -#define PORT1 0x02U -#define PORT2 0x04U -#define PORT3 0x08U -#define PORT_MASK(_n) ((1 << (_n)) - 1) - -/* AFU Sync Mode byte */ -#define AFU_LW_SYNC 0x0U -#define AFU_HW_SYNC 0x1U -#define AFU_GSYNC 0x2U - -/* Special Task Management Function CDB */ -#define TMF_LUN_RESET 0x1U -#define TMF_CLEAR_ACA 0x2U - -#endif /* _SISLITE_H */ diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c deleted file mode 100644 index e1b55b03e812..000000000000 --- a/drivers/scsi/cxlflash/superpipe.c +++ /dev/null @@ -1,2220 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * CXL Flash Device Driver - * - * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation - * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2015 IBM Corporation - */ - -#include <linux/delay.h> -#include <linux/file.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/syscalls.h> -#include <asm/unaligned.h> - -#include <scsi/scsi.h> -#include <scsi/scsi_host.h> -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_eh.h> -#include <uapi/scsi/cxlflash_ioctl.h> - -#include "sislite.h" -#include "common.h" -#include "vlun.h" -#include "superpipe.h" - -struct cxlflash_global global; - -/** - * marshal_rele_to_resize() - translate release to resize structure - * @release: Source structure from which to translate/copy. - * @resize: Destination structure for the translate/copy. - */ -static void marshal_rele_to_resize(struct dk_cxlflash_release *release, - struct dk_cxlflash_resize *resize) -{ - resize->hdr = release->hdr; - resize->context_id = release->context_id; - resize->rsrc_handle = release->rsrc_handle; -} - -/** - * marshal_det_to_rele() - translate detach to release structure - * @detach: Destination structure for the translate/copy. - * @release: Source structure from which to translate/copy. - */ -static void marshal_det_to_rele(struct dk_cxlflash_detach *detach, - struct dk_cxlflash_release *release) -{ - release->hdr = detach->hdr; - release->context_id = detach->context_id; -} - -/** - * marshal_udir_to_rele() - translate udirect to release structure - * @udirect: Source structure from which to translate/copy. - * @release: Destination structure for the translate/copy. - */ -static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect, - struct dk_cxlflash_release *release) -{ - release->hdr = udirect->hdr; - release->context_id = udirect->context_id; - release->rsrc_handle = udirect->rsrc_handle; -} - -/** - * cxlflash_free_errpage() - frees resources associated with global error page - */ -void cxlflash_free_errpage(void) -{ - - mutex_lock(&global.mutex); - if (global.err_page) { - __free_page(global.err_page); - global.err_page = NULL; - } - mutex_unlock(&global.mutex); -} - -/** - * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts - * @cfg: Internal structure associated with the host. - * - * When the host needs to go down, all users must be quiesced and their - * memory freed. This is accomplished by putting the contexts in error - * state which will notify the user and let them 'drive' the tear down. - * Meanwhile, this routine camps until all user contexts have been removed. - * - * Note that the main loop in this routine will always execute at least once - * to flush the reset_waitq. - */ -void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg) -{ - struct device *dev = &cfg->dev->dev; - int i, found = true; - - cxlflash_mark_contexts_error(cfg); - - while (true) { - for (i = 0; i < MAX_CONTEXT; i++) - if (cfg->ctx_tbl[i]) { - found = true; - break; - } - - if (!found && list_empty(&cfg->ctx_err_recovery)) - return; - - dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n", - __func__); - wake_up_all(&cfg->reset_waitq); - ssleep(1); - found = false; - } -} - -/** - * find_error_context() - locates a context by cookie on the error recovery list - * @cfg: Internal structure associated with the host. - * @rctxid: Desired context by id. - * @file: Desired context by file. - * - * Return: Found context on success, NULL on failure - */ -static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid, - struct file *file) -{ - struct ctx_info *ctxi; - - list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list) - if ((ctxi->ctxid == rctxid) || (ctxi->file == file)) - return ctxi; - - return NULL; -} - -/** - * get_context() - obtains a validated and locked context reference - * @cfg: Internal structure associated with the host. - * @rctxid: Desired context (raw, un-decoded format). - * @arg: LUN information or file associated with request. - * @ctx_ctrl: Control information to 'steer' desired lookup. - * - * NOTE: despite the name pid, in linux, current->pid actually refers - * to the lightweight process id (tid) and can change if the process is - * multi threaded. The tgid remains constant for the process and only changes - * when the process of fork. For all intents and purposes, think of tgid - * as a pid in the traditional sense. - * - * Return: Validated context on success, NULL on failure - */ -struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid, - void *arg, enum ctx_ctrl ctx_ctrl) -{ - struct device *dev = &cfg->dev->dev; - struct ctx_info *ctxi = NULL; - struct lun_access *lun_access = NULL; - struct file *file = NULL; - struct llun_info *lli = arg; - u64 ctxid = DECODE_CTXID(rctxid); - int rc; - pid_t pid = task_tgid_nr(current), ctxpid = 0; - - if (ctx_ctrl & CTX_CTRL_FILE) { - lli = NULL; - file = (struct file *)arg; - } - - if (ctx_ctrl & CTX_CTRL_CLONE) - pid = task_ppid_nr(current); - - if (likely(ctxid < MAX_CONTEXT)) { - while (true) { - mutex_lock(&cfg->ctx_tbl_list_mutex); - ctxi = cfg->ctx_tbl[ctxid]; - if (ctxi) - if ((file && (ctxi->file != file)) || - (!file && (ctxi->ctxid != rctxid))) - ctxi = NULL; - - if ((ctx_ctrl & CTX_CTRL_ERR) || - (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK))) - ctxi = find_error_context(cfg, rctxid, file); - if (!ctxi) { - mutex_unlock(&cfg->ctx_tbl_list_mutex); - goto out; - } - - /* - * Need to acquire ownership of the context while still - * under the table/list lock to serialize with a remove - * thread. Use the 'try' to avoid stalling the - * table/list lock for a single context. - * - * Note that the lock order is: - * - * cfg->ctx_tbl_list_mutex -> ctxi->mutex - * - * Therefore release ctx_tbl_list_mutex before retrying. - */ - rc = mutex_trylock(&ctxi->mutex); - mutex_unlock(&cfg->ctx_tbl_list_mutex); - if (rc) - break; /* got the context's lock! */ - } - - if (ctxi->unavail) - goto denied; - - ctxpid = ctxi->pid; - if (likely(!(ctx_ctrl & CTX_CTRL_NOPID))) - if (pid != ctxpid) - goto denied; - - if (lli) { - list_for_each_entry(lun_access, &ctxi->luns, list) - if (lun_access->lli == lli) - goto out; - goto denied; - } - } - -out: - dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u " - "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid, - ctx_ctrl); - - return ctxi; - -denied: - mutex_unlock(&ctxi->mutex); - ctxi = NULL; - goto out; -} - -/** - * put_context() - release a context that was retrieved from get_context() - * @ctxi: Context to release. - * - * For now, releasing the context equates to unlocking it's mutex. - */ -void put_context(struct ctx_info *ctxi) -{ - mutex_unlock(&ctxi->mutex); -} - -/** - * afu_attach() - attach a context to the AFU - * @cfg: Internal structure associated with the host. - * @ctxi: Context to attach. - * - * Upon setting the context capabilities, they must be confirmed with - * a read back operation as the context might have been closed since - * the mailbox was unlocked. When this occurs, registration is failed. - * - * Return: 0 on success, -errno on failure - */ -static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) -{ - struct device *dev = &cfg->dev->dev; - struct afu *afu = cfg->afu; - struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map; - int rc = 0; - struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); - u64 val; - int i; - - /* Unlock cap and restrict user to read/write cmds in translated mode */ - readq_be(&ctrl_map->mbox_r); - val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD); - writeq_be(val, &ctrl_map->ctx_cap); - val = readq_be(&ctrl_map->ctx_cap); - if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) { - dev_err(dev, "%s: ctx may be closed val=%016llx\n", - __func__, val); - rc = -EAGAIN; - goto out; - } - - if (afu_is_ocxl_lisn(afu)) { - /* Set up the LISN effective address for each interrupt */ - for (i = 0; i < ctxi->irqs; i++) { - val = cfg->ops->get_irq_objhndl(ctxi->ctx, i); - writeq_be(val, &ctrl_map->lisn_ea[i]); - } - - /* Use primary HWQ PASID as identifier for all interrupts */ - val = hwq->ctx_hndl; - writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]); - writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]); - } - - /* Set up MMIO registers pointing to the RHT */ - writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start); - val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl)); - writeq_be(val, &ctrl_map->rht_cnt_id); -out: - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * read_cap16() - issues a SCSI READ_CAP16 command - * @sdev: SCSI device associated with LUN. - * @lli: LUN destined for capacity request. - * - * The READ_CAP16 can take quite a while to complete. Should an EEH occur while - * in scsi_execute_cmd(), the EEH handler will attempt to recover. As part of - * the recovery, the handler drains all currently running ioctls, waiting until - * they have completed before proceeding with a reset. As this routine is used - * on the ioctl path, this can create a condition where the EEH handler becomes - * stuck, infinitely waiting for this ioctl thread. To avoid this behavior, - * temporarily unmark this thread as an ioctl thread by releasing the ioctl - * read semaphore. This will allow the EEH handler to proceed with a recovery - * while this thread is still running. Once the scsi_execute_cmd() returns, - * reacquire the ioctl read semaphore and check the adapter state in case it - * changed while inside of scsi_execute_cmd(). The state check will wait if the - * adapter is still being recovered or return a failure if the recovery failed. - * In the event that the adapter reset failed, simply return the failure as the - * ioctl would be unable to continue. - * - * Note that the above puts a requirement on this routine to only be called on - * an ioctl thread. - * - * Return: 0 on success, -errno on failure - */ -static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct glun_info *gli = lli->parent; - struct scsi_sense_hdr sshdr; - const struct scsi_exec_args exec_args = { - .sshdr = &sshdr, - }; - u8 *cmd_buf = NULL; - u8 *scsi_cmd = NULL; - int rc = 0; - int result = 0; - int retry_cnt = 0; - u32 to = CMD_TIMEOUT * HZ; - -retry: - cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); - scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL); - if (unlikely(!cmd_buf || !scsi_cmd)) { - rc = -ENOMEM; - goto out; - } - - scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */ - scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */ - put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]); - - dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__, - retry_cnt ? "re" : "", scsi_cmd[0]); - - /* Drop the ioctl read semaphore across lengthy call */ - up_read(&cfg->ioctl_rwsem); - result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, cmd_buf, - CMD_BUFSIZE, to, CMD_RETRIES, &exec_args); - down_read(&cfg->ioctl_rwsem); - rc = check_state(cfg); - if (rc) { - dev_err(dev, "%s: Failed state result=%08x\n", - __func__, result); - rc = -ENODEV; - goto out; - } - - if (result > 0 && scsi_sense_valid(&sshdr)) { - if (result & SAM_STAT_CHECK_CONDITION) { - switch (sshdr.sense_key) { - case NO_SENSE: - case RECOVERED_ERROR: - case NOT_READY: - result &= ~SAM_STAT_CHECK_CONDITION; - break; - case UNIT_ATTENTION: - switch (sshdr.asc) { - case 0x29: /* Power on Reset or Device Reset */ - fallthrough; - case 0x2A: /* Device capacity changed */ - case 0x3F: /* Report LUNs changed */ - /* Retry the command once more */ - if (retry_cnt++ < 1) { - kfree(cmd_buf); - kfree(scsi_cmd); - goto retry; - } - } - break; - default: - break; - } - } - } - - if (result) { - dev_err(dev, "%s: command failed, result=%08x\n", - __func__, result); - rc = -EIO; - goto out; - } - - /* - * Read cap was successful, grab values from the buffer; - * note that we don't need to worry about unaligned access - * as the buffer is allocated on an aligned boundary. - */ - mutex_lock(&gli->mutex); - gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0])); - gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8])); - mutex_unlock(&gli->mutex); - -out: - kfree(cmd_buf); - kfree(scsi_cmd); - - dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n", - __func__, gli->max_lba, gli->blk_len, rc); - return rc; -} - -/** - * get_rhte() - obtains validated resource handle table entry reference - * @ctxi: Context owning the resource handle. - * @rhndl: Resource handle associated with entry. - * @lli: LUN associated with request. - * - * Return: Validated RHTE on success, NULL on failure - */ -struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl, - struct llun_info *lli) -{ - struct cxlflash_cfg *cfg = ctxi->cfg; - struct device *dev = &cfg->dev->dev; - struct sisl_rht_entry *rhte = NULL; - - if (unlikely(!ctxi->rht_start)) { - dev_dbg(dev, "%s: Context does not have allocated RHT\n", - __func__); - goto out; - } - - if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) { - dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", - __func__, rhndl); - goto out; - } - - if (unlikely(ctxi->rht_lun[rhndl] != lli)) { - dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n", - __func__, rhndl); - goto out; - } - - rhte = &ctxi->rht_start[rhndl]; - if (unlikely(rhte->nmask == 0)) { - dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n", - __func__, rhndl); - rhte = NULL; - goto out; - } - -out: - return rhte; -} - -/** - * rhte_checkout() - obtains free/empty resource handle table entry - * @ctxi: Context owning the resource handle. - * @lli: LUN associated with request. - * - * Return: Free RHTE on success, NULL on failure - */ -struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, - struct llun_info *lli) -{ - struct cxlflash_cfg *cfg = ctxi->cfg; - struct device *dev = &cfg->dev->dev; - struct sisl_rht_entry *rhte = NULL; - int i; - - /* Find a free RHT entry */ - for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) - if (ctxi->rht_start[i].nmask == 0) { - rhte = &ctxi->rht_start[i]; - ctxi->rht_out++; - break; - } - - if (likely(rhte)) - ctxi->rht_lun[i] = lli; - - dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i); - return rhte; -} - -/** - * rhte_checkin() - releases a resource handle table entry - * @ctxi: Context owning the resource handle. - * @rhte: RHTE to release. - */ -void rhte_checkin(struct ctx_info *ctxi, - struct sisl_rht_entry *rhte) -{ - u32 rsrc_handle = rhte - ctxi->rht_start; - - rhte->nmask = 0; - rhte->fp = 0; - ctxi->rht_out--; - ctxi->rht_lun[rsrc_handle] = NULL; - ctxi->rht_needs_ws[rsrc_handle] = false; -} - -/** - * rht_format1() - populates a RHTE for format 1 - * @rhte: RHTE to populate. - * @lun_id: LUN ID of LUN associated with RHTE. - * @perm: Desired permissions for RHTE. - * @port_sel: Port selection mask - */ -static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm, - u32 port_sel) -{ - /* - * Populate the Format 1 RHT entry for direct access (physical - * LUN) using the synchronization sequence defined in the - * SISLite specification. - */ - struct sisl_rht_entry_f1 dummy = { 0 }; - struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; - - memset(rhte_f1, 0, sizeof(*rhte_f1)); - rhte_f1->fp = SISL_RHT_FP(1U, 0); - dma_wmb(); /* Make setting of format bit visible */ - - rhte_f1->lun_id = lun_id; - dma_wmb(); /* Make setting of LUN id visible */ - - /* - * Use a dummy RHT Format 1 entry to build the second dword - * of the entry that must be populated in a single write when - * enabled (valid bit set to TRUE). - */ - dummy.valid = 0x80; - dummy.fp = SISL_RHT_FP(1U, perm); - dummy.port_sel = port_sel; - rhte_f1->dw = dummy.dw; - - dma_wmb(); /* Make remaining RHT entry fields visible */ -} - -/** - * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode - * @gli: LUN to attach. - * @mode: Desired mode of the LUN. - * @locked: Mutex status on current thread. - * - * Return: 0 on success, -errno on failure - */ -int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked) -{ - int rc = 0; - - if (!locked) - mutex_lock(&gli->mutex); - - if (gli->mode == MODE_NONE) - gli->mode = mode; - else if (gli->mode != mode) { - pr_debug("%s: gli_mode=%d requested_mode=%d\n", - __func__, gli->mode, mode); - rc = -EINVAL; - goto out; - } - - gli->users++; - WARN_ON(gli->users <= 0); -out: - pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n", - __func__, rc, gli->mode, gli->users); - if (!locked) - mutex_unlock(&gli->mutex); - return rc; -} - -/** - * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode - * @gli: LUN to detach. - * - * When resetting the mode, terminate block allocation resources as they - * are no longer required (service is safe to call even when block allocation - * resources were not present - such as when transitioning from physical mode). - * These resources will be reallocated when needed (subsequent transition to - * virtual mode). - */ -void cxlflash_lun_detach(struct glun_info *gli) -{ - mutex_lock(&gli->mutex); - WARN_ON(gli->mode == MODE_NONE); - if (--gli->users == 0) { - gli->mode = MODE_NONE; - cxlflash_ba_terminate(&gli->blka.ba_lun); - } - pr_debug("%s: gli->users=%u\n", __func__, gli->users); - WARN_ON(gli->users < 0); - mutex_unlock(&gli->mutex); -} - -/** - * _cxlflash_disk_release() - releases the specified resource entry - * @sdev: SCSI device associated with LUN. - * @ctxi: Context owning resources. - * @release: Release ioctl data structure. - * - * For LUNs in virtual mode, the virtual LUN associated with the specified - * resource handle is resized to 0 prior to releasing the RHTE. Note that the - * AFU sync should _not_ be performed when the context is sitting on the error - * recovery list. A context on the error recovery list is not known to the AFU - * due to reset. When the context is recovered, it will be reattached and made - * known again to the AFU. - * - * Return: 0 on success, -errno on failure - */ -int _cxlflash_disk_release(struct scsi_device *sdev, - struct ctx_info *ctxi, - struct dk_cxlflash_release *release) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct llun_info *lli = sdev->hostdata; - struct glun_info *gli = lli->parent; - struct afu *afu = cfg->afu; - bool put_ctx = false; - - struct dk_cxlflash_resize size; - res_hndl_t rhndl = release->rsrc_handle; - - int rc = 0; - int rcr = 0; - u64 ctxid = DECODE_CTXID(release->context_id), - rctxid = release->context_id; - - struct sisl_rht_entry *rhte; - struct sisl_rht_entry_f1 *rhte_f1; - - dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n", - __func__, ctxid, release->rsrc_handle, gli->mode, gli->users); - - if (!ctxi) { - ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); - if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context ctxid=%llu\n", - __func__, ctxid); - rc = -EINVAL; - goto out; - } - - put_ctx = true; - } - - rhte = get_rhte(ctxi, rhndl, lli); - if (unlikely(!rhte)) { - dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", - __func__, rhndl); - rc = -EINVAL; - goto out; - } - - /* - * Resize to 0 for virtual LUNS by setting the size - * to 0. This will clear LXT_START and LXT_CNT fields - * in the RHT entry and properly sync with the AFU. - * - * Afterwards we clear the remaining fields. - */ - switch (gli->mode) { - case MODE_VIRTUAL: - marshal_rele_to_resize(release, &size); - size.req_size = 0; - rc = _cxlflash_vlun_resize(sdev, ctxi, &size); - if (rc) { - dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc); - goto out; - } - - break; - case MODE_PHYSICAL: - /* - * Clear the Format 1 RHT entry for direct access - * (physical LUN) using the synchronization sequence - * defined in the SISLite specification. - */ - rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; - - rhte_f1->valid = 0; - dma_wmb(); /* Make revocation of RHT entry visible */ - - rhte_f1->lun_id = 0; - dma_wmb(); /* Make clearing of LUN id visible */ - - rhte_f1->dw = 0; - dma_wmb(); /* Make RHT entry bottom-half clearing visible */ - - if (!ctxi->err_recovery_active) { - rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); - if (unlikely(rcr)) - dev_dbg(dev, "%s: AFU sync failed rc=%d\n", - __func__, rcr); - } - break; - default: - WARN(1, "Unsupported LUN mode!"); - goto out; - } - - rhte_checkin(ctxi, rhte); - cxlflash_lun_detach(gli); - -out: - if (put_ctx) - put_context(ctxi); - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -int cxlflash_disk_release(struct scsi_device *sdev, - struct dk_cxlflash_release *release) -{ - return _cxlflash_disk_release(sdev, NULL, release); -} - -/** - * destroy_context() - releases a context - * @cfg: Internal structure associated with the host. - * @ctxi: Context to release. - * - * This routine is safe to be called with a a non-initialized context. - * Also note that the routine conditionally checks for the existence - * of the context control map before clearing the RHT registers and - * context capabilities because it is possible to destroy a context - * while the context is in the error state (previous mapping was - * removed [so there is no need to worry about clearing] and context - * is waiting for a new mapping). - */ -static void destroy_context(struct cxlflash_cfg *cfg, - struct ctx_info *ctxi) -{ - struct afu *afu = cfg->afu; - - if (ctxi->initialized) { - WARN_ON(!list_empty(&ctxi->luns)); - - /* Clear RHT registers and drop all capabilities for context */ - if (afu->afu_map && ctxi->ctrl_map) { - writeq_be(0, &ctxi->ctrl_map->rht_start); - writeq_be(0, &ctxi->ctrl_map->rht_cnt_id); - writeq_be(0, &ctxi->ctrl_map->ctx_cap); - } - } - - /* Free memory associated with context */ - free_page((ulong)ctxi->rht_start); - kfree(ctxi->rht_needs_ws); - kfree(ctxi->rht_lun); - kfree(ctxi); -} - -/** - * create_context() - allocates and initializes a context - * @cfg: Internal structure associated with the host. - * - * Return: Allocated context on success, NULL on failure - */ -static struct ctx_info *create_context(struct cxlflash_cfg *cfg) -{ - struct device *dev = &cfg->dev->dev; - struct ctx_info *ctxi = NULL; - struct llun_info **lli = NULL; - u8 *ws = NULL; - struct sisl_rht_entry *rhte; - - ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL); - lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL); - ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL); - if (unlikely(!ctxi || !lli || !ws)) { - dev_err(dev, "%s: Unable to allocate context\n", __func__); - goto err; - } - - rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL); - if (unlikely(!rhte)) { - dev_err(dev, "%s: Unable to allocate RHT\n", __func__); - goto err; - } - - ctxi->rht_lun = lli; - ctxi->rht_needs_ws = ws; - ctxi->rht_start = rhte; -out: - return ctxi; - -err: - kfree(ws); - kfree(lli); - kfree(ctxi); - ctxi = NULL; - goto out; -} - -/** - * init_context() - initializes a previously allocated context - * @ctxi: Previously allocated context - * @cfg: Internal structure associated with the host. - * @ctx: Previously obtained context cookie. - * @ctxid: Previously obtained process element associated with CXL context. - * @file: Previously obtained file associated with CXL context. - * @perms: User-specified permissions. - * @irqs: User-specified number of interrupts. - */ -static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg, - void *ctx, int ctxid, struct file *file, u32 perms, - u64 irqs) -{ - struct afu *afu = cfg->afu; - - ctxi->rht_perms = perms; - ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; - ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); - ctxi->irqs = irqs; - ctxi->pid = task_tgid_nr(current); /* tgid = pid */ - ctxi->ctx = ctx; - ctxi->cfg = cfg; - ctxi->file = file; - ctxi->initialized = true; - mutex_init(&ctxi->mutex); - kref_init(&ctxi->kref); - INIT_LIST_HEAD(&ctxi->luns); - INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */ -} - -/** - * remove_context() - context kref release handler - * @kref: Kernel reference associated with context to be removed. - * - * When a context no longer has any references it can safely be removed - * from global access and destroyed. Note that it is assumed the thread - * relinquishing access to the context holds its mutex. - */ -static void remove_context(struct kref *kref) -{ - struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref); - struct cxlflash_cfg *cfg = ctxi->cfg; - u64 ctxid = DECODE_CTXID(ctxi->ctxid); - - /* Remove context from table/error list */ - WARN_ON(!mutex_is_locked(&ctxi->mutex)); - ctxi->unavail = true; - mutex_unlock(&ctxi->mutex); - mutex_lock(&cfg->ctx_tbl_list_mutex); - mutex_lock(&ctxi->mutex); - - if (!list_empty(&ctxi->list)) - list_del(&ctxi->list); - cfg->ctx_tbl[ctxid] = NULL; - mutex_unlock(&cfg->ctx_tbl_list_mutex); - mutex_unlock(&ctxi->mutex); - - /* Context now completely uncoupled/unreachable */ - destroy_context(cfg, ctxi); -} - -/** - * _cxlflash_disk_detach() - detaches a LUN from a context - * @sdev: SCSI device associated with LUN. - * @ctxi: Context owning resources. - * @detach: Detach ioctl data structure. - * - * As part of the detach, all per-context resources associated with the LUN - * are cleaned up. When detaching the last LUN for a context, the context - * itself is cleaned up and released. - * - * Return: 0 on success, -errno on failure - */ -static int _cxlflash_disk_detach(struct scsi_device *sdev, - struct ctx_info *ctxi, - struct dk_cxlflash_detach *detach) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct llun_info *lli = sdev->hostdata; - struct lun_access *lun_access, *t; - struct dk_cxlflash_release rel; - bool put_ctx = false; - - int i; - int rc = 0; - u64 ctxid = DECODE_CTXID(detach->context_id), - rctxid = detach->context_id; - - dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid); - - if (!ctxi) { - ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); - if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context ctxid=%llu\n", - __func__, ctxid); - rc = -EINVAL; - goto out; - } - - put_ctx = true; - } - - /* Cleanup outstanding resources tied to this LUN */ - if (ctxi->rht_out) { - marshal_det_to_rele(detach, &rel); - for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { - if (ctxi->rht_lun[i] == lli) { - rel.rsrc_handle = i; - _cxlflash_disk_release(sdev, ctxi, &rel); - } - - /* No need to loop further if we're done */ - if (ctxi->rht_out == 0) - break; - } - } - - /* Take our LUN out of context, free the node */ - list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) - if (lun_access->lli == lli) { - list_del(&lun_access->list); - kfree(lun_access); - lun_access = NULL; - break; - } - - /* - * Release the context reference and the sdev reference that - * bound this LUN to the context. - */ - if (kref_put(&ctxi->kref, remove_context)) - put_ctx = false; - scsi_device_put(sdev); -out: - if (put_ctx) - put_context(ctxi); - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -static int cxlflash_disk_detach(struct scsi_device *sdev, - struct dk_cxlflash_detach *detach) -{ - return _cxlflash_disk_detach(sdev, NULL, detach); -} - -/** - * cxlflash_cxl_release() - release handler for adapter file descriptor - * @inode: File-system inode associated with fd. - * @file: File installed with adapter file descriptor. - * - * This routine is the release handler for the fops registered with - * the CXL services on an initial attach for a context. It is called - * when a close (explicity by the user or as part of a process tear - * down) is performed on the adapter file descriptor returned to the - * user. The user should be aware that explicitly performing a close - * considered catastrophic and subsequent usage of the superpipe API - * with previously saved off tokens will fail. - * - * This routine derives the context reference and calls detach for - * each LUN associated with the context.The final detach operation - * causes the context itself to be freed. With exception to when the - * CXL process element (context id) lookup fails (a case that should - * theoretically never occur), every call into this routine results - * in a complete freeing of a context. - * - * Detaching the LUN is typically an ioctl() operation and the underlying - * code assumes that ioctl_rwsem has been acquired as a reader. To support - * that design point, the semaphore is acquired and released around detach. - * - * Return: 0 on success - */ -static int cxlflash_cxl_release(struct inode *inode, struct file *file) -{ - struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, - cxl_fops); - void *ctx = cfg->ops->fops_get_context(file); - struct device *dev = &cfg->dev->dev; - struct ctx_info *ctxi = NULL; - struct dk_cxlflash_detach detach = { { 0 }, 0 }; - struct lun_access *lun_access, *t; - enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; - int ctxid; - - ctxid = cfg->ops->process_element(ctx); - if (unlikely(ctxid < 0)) { - dev_err(dev, "%s: Context %p was closed ctxid=%d\n", - __func__, ctx, ctxid); - goto out; - } - - ctxi = get_context(cfg, ctxid, file, ctrl); - if (unlikely(!ctxi)) { - ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE); - if (!ctxi) { - dev_dbg(dev, "%s: ctxid=%d already free\n", - __func__, ctxid); - goto out_release; - } - - dev_dbg(dev, "%s: Another process owns ctxid=%d\n", - __func__, ctxid); - put_context(ctxi); - goto out; - } - - dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid); - - down_read(&cfg->ioctl_rwsem); - detach.context_id = ctxi->ctxid; - list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) - _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach); - up_read(&cfg->ioctl_rwsem); -out_release: - cfg->ops->fd_release(inode, file); -out: - dev_dbg(dev, "%s: returning\n", __func__); - return 0; -} - -/** - * unmap_context() - clears a previously established mapping - * @ctxi: Context owning the mapping. - * - * This routine is used to switch between the error notification page - * (dummy page of all 1's) and the real mapping (established by the CXL - * fault handler). - */ -static void unmap_context(struct ctx_info *ctxi) -{ - unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1); -} - -/** - * get_err_page() - obtains and allocates the error notification page - * @cfg: Internal structure associated with the host. - * - * Return: error notification page on success, NULL on failure - */ -static struct page *get_err_page(struct cxlflash_cfg *cfg) -{ - struct page *err_page = global.err_page; - struct device *dev = &cfg->dev->dev; - - if (unlikely(!err_page)) { - err_page = alloc_page(GFP_KERNEL); - if (unlikely(!err_page)) { - dev_err(dev, "%s: Unable to allocate err_page\n", - __func__); - goto out; - } - - memset(page_address(err_page), -1, PAGE_SIZE); - - /* Serialize update w/ other threads to avoid a leak */ - mutex_lock(&global.mutex); - if (likely(!global.err_page)) - global.err_page = err_page; - else { - __free_page(err_page); - err_page = global.err_page; - } - mutex_unlock(&global.mutex); - } - -out: - dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page); - return err_page; -} - -/** - * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor - * @vmf: VM fault associated with current fault. - * - * To support error notification via MMIO, faults are 'caught' by this routine - * that was inserted before passing back the adapter file descriptor on attach. - * When a fault occurs, this routine evaluates if error recovery is active and - * if so, installs the error page to 'notify' the user about the error state. - * During normal operation, the fault is simply handled by the original fault - * handler that was installed by CXL services as part of initializing the - * adapter file descriptor. The VMA's page protection bits are toggled to - * indicate cached/not-cached depending on the memory backing the fault. - * - * Return: 0 on success, VM_FAULT_SIGBUS on failure - */ -static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct file *file = vma->vm_file; - struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, - cxl_fops); - void *ctx = cfg->ops->fops_get_context(file); - struct device *dev = &cfg->dev->dev; - struct ctx_info *ctxi = NULL; - struct page *err_page = NULL; - enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; - vm_fault_t rc = 0; - int ctxid; - - ctxid = cfg->ops->process_element(ctx); - if (unlikely(ctxid < 0)) { - dev_err(dev, "%s: Context %p was closed ctxid=%d\n", - __func__, ctx, ctxid); - goto err; - } - - ctxi = get_context(cfg, ctxid, file, ctrl); - if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid); - goto err; - } - - dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid); - - if (likely(!ctxi->err_recovery_active)) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - rc = ctxi->cxl_mmap_vmops->fault(vmf); - } else { - dev_dbg(dev, "%s: err recovery active, use err_page\n", - __func__); - - err_page = get_err_page(cfg); - if (unlikely(!err_page)) { - dev_err(dev, "%s: Could not get err_page\n", __func__); - rc = VM_FAULT_RETRY; - goto out; - } - - get_page(err_page); - vmf->page = err_page; - vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); - } - -out: - if (likely(ctxi)) - put_context(ctxi); - dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc); - return rc; - -err: - rc = VM_FAULT_SIGBUS; - goto out; -} - -/* - * Local MMAP vmops to 'catch' faults - */ -static const struct vm_operations_struct cxlflash_mmap_vmops = { - .fault = cxlflash_mmap_fault, -}; - -/** - * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor - * @file: File installed with adapter file descriptor. - * @vma: VM area associated with mapping. - * - * Installs local mmap vmops to 'catch' faults for error notification support. - * - * Return: 0 on success, -errno on failure - */ -static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, - cxl_fops); - void *ctx = cfg->ops->fops_get_context(file); - struct device *dev = &cfg->dev->dev; - struct ctx_info *ctxi = NULL; - enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; - int ctxid; - int rc = 0; - - ctxid = cfg->ops->process_element(ctx); - if (unlikely(ctxid < 0)) { - dev_err(dev, "%s: Context %p was closed ctxid=%d\n", - __func__, ctx, ctxid); - rc = -EIO; - goto out; - } - - ctxi = get_context(cfg, ctxid, file, ctrl); - if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid); - rc = -EIO; - goto out; - } - - dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid); - - rc = cfg->ops->fd_mmap(file, vma); - if (likely(!rc)) { - /* Insert ourself in the mmap fault handler path */ - ctxi->cxl_mmap_vmops = vma->vm_ops; - vma->vm_ops = &cxlflash_mmap_vmops; - } - -out: - if (likely(ctxi)) - put_context(ctxi); - return rc; -} - -const struct file_operations cxlflash_cxl_fops = { - .owner = THIS_MODULE, - .mmap = cxlflash_cxl_mmap, - .release = cxlflash_cxl_release, -}; - -/** - * cxlflash_mark_contexts_error() - move contexts to error state and list - * @cfg: Internal structure associated with the host. - * - * A context is only moved over to the error list when there are no outstanding - * references to it. This ensures that a running operation has completed. - * - * Return: 0 on success, -errno on failure - */ -int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg) -{ - int i, rc = 0; - struct ctx_info *ctxi = NULL; - - mutex_lock(&cfg->ctx_tbl_list_mutex); - - for (i = 0; i < MAX_CONTEXT; i++) { - ctxi = cfg->ctx_tbl[i]; - if (ctxi) { - mutex_lock(&ctxi->mutex); - cfg->ctx_tbl[i] = NULL; - list_add(&ctxi->list, &cfg->ctx_err_recovery); - ctxi->err_recovery_active = true; - ctxi->ctrl_map = NULL; - unmap_context(ctxi); - mutex_unlock(&ctxi->mutex); - } - } - - mutex_unlock(&cfg->ctx_tbl_list_mutex); - return rc; -} - -/* - * Dummy NULL fops - */ -static const struct file_operations null_fops = { - .owner = THIS_MODULE, -}; - -/** - * check_state() - checks and responds to the current adapter state - * @cfg: Internal structure associated with the host. - * - * This routine can block and should only be used on process context. - * It assumes that the caller is an ioctl thread and holding the ioctl - * read semaphore. This is temporarily let up across the wait to allow - * for draining actively running ioctls. Also note that when waking up - * from waiting in reset, the state is unknown and must be checked again - * before proceeding. - * - * Return: 0 on success, -errno on failure - */ -int check_state(struct cxlflash_cfg *cfg) -{ - struct device *dev = &cfg->dev->dev; - int rc = 0; - -retry: - switch (cfg->state) { - case STATE_RESET: - dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__); - up_read(&cfg->ioctl_rwsem); - rc = wait_event_interruptible(cfg->reset_waitq, - cfg->state != STATE_RESET); - down_read(&cfg->ioctl_rwsem); - if (unlikely(rc)) - break; - goto retry; - case STATE_FAILTERM: - dev_dbg(dev, "%s: Failed/Terminating\n", __func__); - rc = -ENODEV; - break; - default: - break; - } - - return rc; -} - -/** - * cxlflash_disk_attach() - attach a LUN to a context - * @sdev: SCSI device associated with LUN. - * @attach: Attach ioctl data structure. - * - * Creates a context and attaches LUN to it. A LUN can only be attached - * one time to a context (subsequent attaches for the same context/LUN pair - * are not supported). Additional LUNs can be attached to a context by - * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header. - * - * Return: 0 on success, -errno on failure - */ -static int cxlflash_disk_attach(struct scsi_device *sdev, - struct dk_cxlflash_attach *attach) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct afu *afu = cfg->afu; - struct llun_info *lli = sdev->hostdata; - struct glun_info *gli = lli->parent; - struct ctx_info *ctxi = NULL; - struct lun_access *lun_access = NULL; - int rc = 0; - u32 perms; - int ctxid = -1; - u64 irqs = attach->num_interrupts; - u64 flags = 0UL; - u64 rctxid = 0UL; - struct file *file = NULL; - - void *ctx = NULL; - - int fd = -1; - - if (irqs > 4) { - dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n", - __func__, irqs); - rc = -EINVAL; - goto out; - } - - if (gli->max_lba == 0) { - dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n", - __func__, lli->lun_id[sdev->channel]); - rc = read_cap16(sdev, lli); - if (rc) { - dev_err(dev, "%s: Invalid device rc=%d\n", - __func__, rc); - rc = -ENODEV; - goto out; - } - dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba); - dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len); - } - - if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) { - rctxid = attach->context_id; - ctxi = get_context(cfg, rctxid, NULL, 0); - if (!ctxi) { - dev_dbg(dev, "%s: Bad context rctxid=%016llx\n", - __func__, rctxid); - rc = -EINVAL; - goto out; - } - - list_for_each_entry(lun_access, &ctxi->luns, list) - if (lun_access->lli == lli) { - dev_dbg(dev, "%s: Already attached\n", - __func__); - rc = -EINVAL; - goto out; - } - } - - rc = scsi_device_get(sdev); - if (unlikely(rc)) { - dev_err(dev, "%s: Unable to get sdev reference\n", __func__); - goto out; - } - - lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL); - if (unlikely(!lun_access)) { - dev_err(dev, "%s: Unable to allocate lun_access\n", __func__); - rc = -ENOMEM; - goto err; - } - - lun_access->lli = lli; - lun_access->sdev = sdev; - - /* Non-NULL context indicates reuse (another context reference) */ - if (ctxi) { - dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n", - __func__, rctxid); - kref_get(&ctxi->kref); - list_add(&lun_access->list, &ctxi->luns); - goto out_attach; - } - - ctxi = create_context(cfg); - if (unlikely(!ctxi)) { - dev_err(dev, "%s: Failed to create context ctxid=%d\n", - __func__, ctxid); - rc = -ENOMEM; - goto err; - } - - ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); - if (IS_ERR_OR_NULL(ctx)) { - dev_err(dev, "%s: Could not initialize context %p\n", - __func__, ctx); - rc = -ENODEV; - goto err; - } - - rc = cfg->ops->start_work(ctx, irqs); - if (unlikely(rc)) { - dev_dbg(dev, "%s: Could not start context rc=%d\n", - __func__, rc); - goto err; - } - - ctxid = cfg->ops->process_element(ctx); - if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { - dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); - rc = -EPERM; - goto err; - } - - file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd); - if (unlikely(fd < 0)) { - rc = -ENODEV; - dev_err(dev, "%s: Could not get file descriptor\n", __func__); - goto err; - } - - /* Translate read/write O_* flags from fcntl.h to AFU permission bits */ - perms = SISL_RHT_PERM(attach->hdr.flags + 1); - - /* Context mutex is locked upon return */ - init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs); - - rc = afu_attach(cfg, ctxi); - if (unlikely(rc)) { - dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); - goto err; - } - - /* - * No error paths after this point. Once the fd is installed it's - * visible to user space and can't be undone safely on this thread. - * There is no need to worry about a deadlock here because no one - * knows about us yet; we can be the only one holding our mutex. - */ - list_add(&lun_access->list, &ctxi->luns); - mutex_lock(&cfg->ctx_tbl_list_mutex); - mutex_lock(&ctxi->mutex); - cfg->ctx_tbl[ctxid] = ctxi; - mutex_unlock(&cfg->ctx_tbl_list_mutex); - fd_install(fd, file); - -out_attach: - if (fd != -1) - flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD; - if (afu_is_sq_cmd_mode(afu)) - flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE; - - attach->hdr.return_flags = flags; - attach->context_id = ctxi->ctxid; - attach->block_size = gli->blk_len; - attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea); - attach->last_lba = gli->max_lba; - attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT; - attach->max_xfer /= gli->blk_len; - -out: - attach->adap_fd = fd; - - if (ctxi) - put_context(ctxi); - - dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n", - __func__, ctxid, fd, attach->block_size, rc, attach->last_lba); - return rc; - -err: - /* Cleanup CXL context; okay to 'stop' even if it was not started */ - if (!IS_ERR_OR_NULL(ctx)) { - cfg->ops->stop_context(ctx); - cfg->ops->release_context(ctx); - ctx = NULL; - } - - /* - * Here, we're overriding the fops with a dummy all-NULL fops because - * fput() calls the release fop, which will cause us to mistakenly - * call into the CXL code. Rather than try to add yet more complexity - * to that routine (cxlflash_cxl_release) we should try to fix the - * issue here. - */ - if (fd > 0) { - file->f_op = &null_fops; - fput(file); - put_unused_fd(fd); - fd = -1; - file = NULL; - } - - /* Cleanup our context */ - if (ctxi) { - destroy_context(cfg, ctxi); - ctxi = NULL; - } - - kfree(lun_access); - scsi_device_put(sdev); - goto out; -} - -/** - * recover_context() - recovers a context in error - * @cfg: Internal structure associated with the host. - * @ctxi: Context to release. - * @adap_fd: Adapter file descriptor associated with new/recovered context. - * - * Restablishes the state for a context-in-error. - * - * Return: 0 on success, -errno on failure - */ -static int recover_context(struct cxlflash_cfg *cfg, - struct ctx_info *ctxi, - int *adap_fd) -{ - struct device *dev = &cfg->dev->dev; - int rc = 0; - int fd = -1; - int ctxid = -1; - struct file *file; - void *ctx; - struct afu *afu = cfg->afu; - - ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); - if (IS_ERR_OR_NULL(ctx)) { - dev_err(dev, "%s: Could not initialize context %p\n", - __func__, ctx); - rc = -ENODEV; - goto out; - } - - rc = cfg->ops->start_work(ctx, ctxi->irqs); - if (unlikely(rc)) { - dev_dbg(dev, "%s: Could not start context rc=%d\n", - __func__, rc); - goto err1; - } - - ctxid = cfg->ops->process_element(ctx); - if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { - dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); - rc = -EPERM; - goto err2; - } - - file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd); - if (unlikely(fd < 0)) { - rc = -ENODEV; - dev_err(dev, "%s: Could not get file descriptor\n", __func__); - goto err2; - } - - /* Update with new MMIO area based on updated context id */ - ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; - - rc = afu_attach(cfg, ctxi); - if (rc) { - dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); - goto err3; - } - - /* - * No error paths after this point. Once the fd is installed it's - * visible to user space and can't be undone safely on this thread. - */ - ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); - ctxi->ctx = ctx; - ctxi->file = file; - - /* - * Put context back in table (note the reinit of the context list); - * we must first drop the context's mutex and then acquire it in - * order with the table/list mutex to avoid a deadlock - safe to do - * here because no one can find us at this moment in time. - */ - mutex_unlock(&ctxi->mutex); - mutex_lock(&cfg->ctx_tbl_list_mutex); - mutex_lock(&ctxi->mutex); - list_del_init(&ctxi->list); - cfg->ctx_tbl[ctxid] = ctxi; - mutex_unlock(&cfg->ctx_tbl_list_mutex); - fd_install(fd, file); - *adap_fd = fd; -out: - dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n", - __func__, ctxid, fd, rc); - return rc; - -err3: - fput(file); - put_unused_fd(fd); -err2: - cfg->ops->stop_context(ctx); -err1: - cfg->ops->release_context(ctx); - goto out; -} - -/** - * cxlflash_afu_recover() - initiates AFU recovery - * @sdev: SCSI device associated with LUN. - * @recover: Recover ioctl data structure. - * - * Only a single recovery is allowed at a time to avoid exhausting CXL - * resources (leading to recovery failure) in the event that we're up - * against the maximum number of contexts limit. For similar reasons, - * a context recovery is retried if there are multiple recoveries taking - * place at the same time and the failure was due to CXL services being - * unable to keep up. - * - * As this routine is called on ioctl context, it holds the ioctl r/w - * semaphore that is used to drain ioctls in recovery scenarios. The - * implementation to achieve the pacing described above (a local mutex) - * requires that the ioctl r/w semaphore be dropped and reacquired to - * avoid a 3-way deadlock when multiple process recoveries operate in - * parallel. - * - * Because a user can detect an error condition before the kernel, it is - * quite possible for this routine to act as the kernel's EEH detection - * source (MMIO read of mbox_r). Because of this, there is a window of - * time where an EEH might have been detected but not yet 'serviced' - * (callback invoked, causing the device to enter reset state). To avoid - * looping in this routine during that window, a 1 second sleep is in place - * between the time the MMIO failure is detected and the time a wait on the - * reset wait queue is attempted via check_state(). - * - * Return: 0 on success, -errno on failure - */ -static int cxlflash_afu_recover(struct scsi_device *sdev, - struct dk_cxlflash_recover_afu *recover) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct llun_info *lli = sdev->hostdata; - struct afu *afu = cfg->afu; - struct ctx_info *ctxi = NULL; - struct mutex *mutex = &cfg->ctx_recovery_mutex; - struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); - u64 flags; - u64 ctxid = DECODE_CTXID(recover->context_id), - rctxid = recover->context_id; - long reg; - bool locked = true; - int lretry = 20; /* up to 2 seconds */ - int new_adap_fd = -1; - int rc = 0; - - atomic_inc(&cfg->recovery_threads); - up_read(&cfg->ioctl_rwsem); - rc = mutex_lock_interruptible(mutex); - down_read(&cfg->ioctl_rwsem); - if (rc) { - locked = false; - goto out; - } - - rc = check_state(cfg); - if (rc) { - dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc); - rc = -ENODEV; - goto out; - } - - dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n", - __func__, recover->reason, rctxid); - -retry: - /* Ensure that this process is attached to the context */ - ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); - if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); - rc = -EINVAL; - goto out; - } - - if (ctxi->err_recovery_active) { -retry_recover: - rc = recover_context(cfg, ctxi, &new_adap_fd); - if (unlikely(rc)) { - dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n", - __func__, ctxid, rc); - if ((rc == -ENODEV) && - ((atomic_read(&cfg->recovery_threads) > 1) || - (lretry--))) { - dev_dbg(dev, "%s: Going to try again\n", - __func__); - mutex_unlock(mutex); - msleep(100); - rc = mutex_lock_interruptible(mutex); - if (rc) { - locked = false; - goto out; - } - goto retry_recover; - } - - goto out; - } - - ctxi->err_recovery_active = false; - - flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD | - DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET; - if (afu_is_sq_cmd_mode(afu)) - flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE; - - recover->hdr.return_flags = flags; - recover->context_id = ctxi->ctxid; - recover->adap_fd = new_adap_fd; - recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea); - goto out; - } - - /* Test if in error state */ - reg = readq_be(&hwq->ctrl_map->mbox_r); - if (reg == -1) { - dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__); - - /* - * Before checking the state, put back the context obtained with - * get_context() as it is no longer needed and sleep for a short - * period of time (see prolog notes). - */ - put_context(ctxi); - ctxi = NULL; - ssleep(1); - rc = check_state(cfg); - if (unlikely(rc)) - goto out; - goto retry; - } - - dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__); -out: - if (likely(ctxi)) - put_context(ctxi); - if (locked) - mutex_unlock(mutex); - atomic_dec_if_positive(&cfg->recovery_threads); - return rc; -} - -/** - * process_sense() - evaluates and processes sense data - * @sdev: SCSI device associated with LUN. - * @verify: Verify ioctl data structure. - * - * Return: 0 on success, -errno on failure - */ -static int process_sense(struct scsi_device *sdev, - struct dk_cxlflash_verify *verify) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct llun_info *lli = sdev->hostdata; - struct glun_info *gli = lli->parent; - u64 prev_lba = gli->max_lba; - struct scsi_sense_hdr sshdr = { 0 }; - int rc = 0; - - rc = scsi_normalize_sense((const u8 *)&verify->sense_data, - DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr); - if (!rc) { - dev_err(dev, "%s: Failed to normalize sense data\n", __func__); - rc = -EINVAL; - goto out; - } - - switch (sshdr.sense_key) { - case NO_SENSE: - case RECOVERED_ERROR: - case NOT_READY: - break; - case UNIT_ATTENTION: - switch (sshdr.asc) { - case 0x29: /* Power on Reset or Device Reset */ - fallthrough; - case 0x2A: /* Device settings/capacity changed */ - rc = read_cap16(sdev, lli); - if (rc) { - rc = -ENODEV; - break; - } - if (prev_lba != gli->max_lba) - dev_dbg(dev, "%s: Capacity changed old=%lld " - "new=%lld\n", __func__, prev_lba, - gli->max_lba); - break; - case 0x3F: /* Report LUNs changed, Rescan. */ - scsi_scan_host(cfg->host); - break; - default: - rc = -EIO; - break; - } - break; - default: - rc = -EIO; - break; - } -out: - dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__, - sshdr.sense_key, sshdr.asc, sshdr.ascq, rc); - return rc; -} - -/** - * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes - * @sdev: SCSI device associated with LUN. - * @verify: Verify ioctl data structure. - * - * Return: 0 on success, -errno on failure - */ -static int cxlflash_disk_verify(struct scsi_device *sdev, - struct dk_cxlflash_verify *verify) -{ - int rc = 0; - struct ctx_info *ctxi = NULL; - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct llun_info *lli = sdev->hostdata; - struct glun_info *gli = lli->parent; - struct sisl_rht_entry *rhte = NULL; - res_hndl_t rhndl = verify->rsrc_handle; - u64 ctxid = DECODE_CTXID(verify->context_id), - rctxid = verify->context_id; - u64 last_lba = 0; - - dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, " - "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle, - verify->hint, verify->hdr.flags); - - ctxi = get_context(cfg, rctxid, lli, 0); - if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); - rc = -EINVAL; - goto out; - } - - rhte = get_rhte(ctxi, rhndl, lli); - if (unlikely(!rhte)) { - dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", - __func__, rhndl); - rc = -EINVAL; - goto out; - } - - /* - * Look at the hint/sense to see if it requires us to redrive - * inquiry (i.e. the Unit attention is due to the WWN changing). - */ - if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) { - /* Can't hold mutex across process_sense/read_cap16, - * since we could have an intervening EEH event. - */ - ctxi->unavail = true; - mutex_unlock(&ctxi->mutex); - rc = process_sense(sdev, verify); - if (unlikely(rc)) { - dev_err(dev, "%s: Failed to validate sense data (%d)\n", - __func__, rc); - mutex_lock(&ctxi->mutex); - ctxi->unavail = false; - goto out; - } - mutex_lock(&ctxi->mutex); - ctxi->unavail = false; - } - - switch (gli->mode) { - case MODE_PHYSICAL: - last_lba = gli->max_lba; - break; - case MODE_VIRTUAL: - /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */ - last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len); - last_lba /= CXLFLASH_BLOCK_SIZE; - last_lba--; - break; - default: - WARN(1, "Unsupported LUN mode!"); - } - - verify->last_lba = last_lba; - -out: - if (likely(ctxi)) - put_context(ctxi); - dev_dbg(dev, "%s: returning rc=%d llba=%llx\n", - __func__, rc, verify->last_lba); - return rc; -} - -/** - * decode_ioctl() - translates an encoded ioctl to an easily identifiable string - * @cmd: The ioctl command to decode. - * - * Return: A string identifying the decoded ioctl. - */ -static char *decode_ioctl(unsigned int cmd) -{ - switch (cmd) { - case DK_CXLFLASH_ATTACH: - return __stringify_1(DK_CXLFLASH_ATTACH); - case DK_CXLFLASH_USER_DIRECT: - return __stringify_1(DK_CXLFLASH_USER_DIRECT); - case DK_CXLFLASH_USER_VIRTUAL: - return __stringify_1(DK_CXLFLASH_USER_VIRTUAL); - case DK_CXLFLASH_VLUN_RESIZE: - return __stringify_1(DK_CXLFLASH_VLUN_RESIZE); - case DK_CXLFLASH_RELEASE: - return __stringify_1(DK_CXLFLASH_RELEASE); - case DK_CXLFLASH_DETACH: - return __stringify_1(DK_CXLFLASH_DETACH); - case DK_CXLFLASH_VERIFY: - return __stringify_1(DK_CXLFLASH_VERIFY); - case DK_CXLFLASH_VLUN_CLONE: - return __stringify_1(DK_CXLFLASH_VLUN_CLONE); - case DK_CXLFLASH_RECOVER_AFU: - return __stringify_1(DK_CXLFLASH_RECOVER_AFU); - case DK_CXLFLASH_MANAGE_LUN: - return __stringify_1(DK_CXLFLASH_MANAGE_LUN); - } - - return "UNKNOWN"; -} - -/** - * cxlflash_disk_direct_open() - opens a direct (physical) disk - * @sdev: SCSI device associated with LUN. - * @arg: UDirect ioctl data structure. - * - * On successful return, the user is informed of the resource handle - * to be used to identify the direct lun and the size (in blocks) of - * the direct lun in last LBA format. - * - * Return: 0 on success, -errno on failure - */ -static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct afu *afu = cfg->afu; - struct llun_info *lli = sdev->hostdata; - struct glun_info *gli = lli->parent; - struct dk_cxlflash_release rel = { { 0 }, 0 }; - - struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg; - - u64 ctxid = DECODE_CTXID(pphys->context_id), - rctxid = pphys->context_id; - u64 lun_size = 0; - u64 last_lba = 0; - u64 rsrc_handle = -1; - u32 port = CHAN2PORTMASK(sdev->channel); - - int rc = 0; - - struct ctx_info *ctxi = NULL; - struct sisl_rht_entry *rhte = NULL; - - dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size); - - rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false); - if (unlikely(rc)) { - dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__); - goto out; - } - - ctxi = get_context(cfg, rctxid, lli, 0); - if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); - rc = -EINVAL; - goto err1; - } - - rhte = rhte_checkout(ctxi, lli); - if (unlikely(!rhte)) { - dev_dbg(dev, "%s: Too many opens ctxid=%lld\n", - __func__, ctxid); - rc = -EMFILE; /* too many opens */ - goto err1; - } - - rsrc_handle = (rhte - ctxi->rht_start); - - rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port); - - last_lba = gli->max_lba; - pphys->hdr.return_flags = 0; - pphys->last_lba = last_lba; - pphys->rsrc_handle = rsrc_handle; - - rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC); - if (unlikely(rc)) { - dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc); - goto err2; - } - -out: - if (likely(ctxi)) - put_context(ctxi); - dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n", - __func__, rsrc_handle, rc, last_lba); - return rc; - -err2: - marshal_udir_to_rele(pphys, &rel); - _cxlflash_disk_release(sdev, ctxi, &rel); - goto out; -err1: - cxlflash_lun_detach(gli); - goto out; -} - -/** - * ioctl_common() - common IOCTL handler for driver - * @sdev: SCSI device associated with LUN. - * @cmd: IOCTL command. - * - * Handles common fencing operations that are valid for multiple ioctls. Always - * allow through ioctls that are cleanup oriented in nature, even when operating - * in a failed/terminating state. - * - * Return: 0 on success, -errno on failure - */ -static int ioctl_common(struct scsi_device *sdev, unsigned int cmd) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct llun_info *lli = sdev->hostdata; - int rc = 0; - - if (unlikely(!lli)) { - dev_dbg(dev, "%s: Unknown LUN\n", __func__); - rc = -EINVAL; - goto out; - } - - rc = check_state(cfg); - if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) { - switch (cmd) { - case DK_CXLFLASH_VLUN_RESIZE: - case DK_CXLFLASH_RELEASE: - case DK_CXLFLASH_DETACH: - dev_dbg(dev, "%s: Command override rc=%d\n", - __func__, rc); - rc = 0; - break; - } - } -out: - return rc; -} - -/** - * cxlflash_ioctl() - IOCTL handler for driver - * @sdev: SCSI device associated with LUN. - * @cmd: IOCTL command. - * @arg: Userspace ioctl data structure. - * - * A read/write semaphore is used to implement a 'drain' of currently - * running ioctls. The read semaphore is taken at the beginning of each - * ioctl thread and released upon concluding execution. Additionally the - * semaphore should be released and then reacquired in any ioctl execution - * path which will wait for an event to occur that is outside the scope of - * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, - * a thread simply needs to acquire the write semaphore. - * - * Return: 0 on success, -errno on failure - */ -int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) -{ - typedef int (*sioctl) (struct scsi_device *, void *); - - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct afu *afu = cfg->afu; - struct dk_cxlflash_hdr *hdr; - char buf[sizeof(union cxlflash_ioctls)]; - size_t size = 0; - bool known_ioctl = false; - int idx; - int rc = 0; - struct Scsi_Host *shost = sdev->host; - sioctl do_ioctl = NULL; - - static const struct { - size_t size; - sioctl ioctl; - } ioctl_tbl[] = { /* NOTE: order matters here */ - {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach}, - {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open}, - {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release}, - {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach}, - {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify}, - {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover}, - {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun}, - {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open}, - {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize}, - {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone}, - }; - - /* Hold read semaphore so we can drain if needed */ - down_read(&cfg->ioctl_rwsem); - - /* Restrict command set to physical support only for internal LUN */ - if (afu->internal_lun) - switch (cmd) { - case DK_CXLFLASH_RELEASE: - case DK_CXLFLASH_USER_VIRTUAL: - case DK_CXLFLASH_VLUN_RESIZE: - case DK_CXLFLASH_VLUN_CLONE: - dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n", - __func__, decode_ioctl(cmd), afu->internal_lun); - rc = -EINVAL; - goto cxlflash_ioctl_exit; - } - - switch (cmd) { - case DK_CXLFLASH_ATTACH: - case DK_CXLFLASH_USER_DIRECT: - case DK_CXLFLASH_RELEASE: - case DK_CXLFLASH_DETACH: - case DK_CXLFLASH_VERIFY: - case DK_CXLFLASH_RECOVER_AFU: - case DK_CXLFLASH_USER_VIRTUAL: - case DK_CXLFLASH_VLUN_RESIZE: - case DK_CXLFLASH_VLUN_CLONE: - dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n", - __func__, decode_ioctl(cmd), cmd, shost->host_no, - sdev->channel, sdev->id, sdev->lun); - rc = ioctl_common(sdev, cmd); - if (unlikely(rc)) - goto cxlflash_ioctl_exit; - - fallthrough; - - case DK_CXLFLASH_MANAGE_LUN: - known_ioctl = true; - idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH); - size = ioctl_tbl[idx].size; - do_ioctl = ioctl_tbl[idx].ioctl; - - if (likely(do_ioctl)) - break; - - fallthrough; - default: - rc = -EINVAL; - goto cxlflash_ioctl_exit; - } - - if (unlikely(copy_from_user(&buf, arg, size))) { - dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n", - __func__, size, cmd, decode_ioctl(cmd), arg); - rc = -EFAULT; - goto cxlflash_ioctl_exit; - } - - hdr = (struct dk_cxlflash_hdr *)&buf; - if (hdr->version != DK_CXLFLASH_VERSION_0) { - dev_dbg(dev, "%s: Version %u not supported for %s\n", - __func__, hdr->version, decode_ioctl(cmd)); - rc = -EINVAL; - goto cxlflash_ioctl_exit; - } - - if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) { - dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__); - rc = -EINVAL; - goto cxlflash_ioctl_exit; - } - - rc = do_ioctl(sdev, (void *)&buf); - if (likely(!rc)) - if (unlikely(copy_to_user(arg, &buf, size))) { - dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n", - __func__, size, cmd, decode_ioctl(cmd), arg); - rc = -EFAULT; - } - - /* fall through to exit */ - -cxlflash_ioctl_exit: - up_read(&cfg->ioctl_rwsem); - if (unlikely(rc && known_ioctl)) - dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " - "returned rc %d\n", __func__, - decode_ioctl(cmd), cmd, shost->host_no, - sdev->channel, sdev->id, sdev->lun, rc); - else - dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " - "returned rc %d\n", __func__, decode_ioctl(cmd), - cmd, shost->host_no, sdev->channel, sdev->id, - sdev->lun, rc); - return rc; -} diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h deleted file mode 100644 index 0e3b45964066..000000000000 --- a/drivers/scsi/cxlflash/superpipe.h +++ /dev/null @@ -1,153 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * CXL Flash Device Driver - * - * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation - * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2015 IBM Corporation - */ - -#ifndef _CXLFLASH_SUPERPIPE_H -#define _CXLFLASH_SUPERPIPE_H - -extern struct cxlflash_global global; - -/* - * Terminology: use afu (and not adapter) to refer to the HW. - * Adapter is the entire slot and includes PSL out of which - * only the AFU is visible to user space. - */ - -/* Chunk size parms: note sislite minimum chunk size is - * 0x10000 LBAs corresponding to a NMASK or 16. - */ -#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */ - -#define CMD_TIMEOUT 30 /* 30 secs */ -#define CMD_RETRIES 5 /* 5 retries for scsi_execute */ - -#define MAX_SECTOR_UNIT 512 /* max_sector is in 512 byte multiples */ - -enum lun_mode { - MODE_NONE = 0, - MODE_VIRTUAL, - MODE_PHYSICAL -}; - -/* Global (entire driver, spans adapters) lun_info structure */ -struct glun_info { - u64 max_lba; /* from read cap(16) */ - u32 blk_len; /* from read cap(16) */ - enum lun_mode mode; /* NONE, VIRTUAL, PHYSICAL */ - int users; /* Number of users w/ references to LUN */ - - u8 wwid[16]; - - struct mutex mutex; - - struct blka blka; - struct list_head list; -}; - -/* Local (per-adapter) lun_info structure */ -struct llun_info { - u64 lun_id[MAX_FC_PORTS]; /* from REPORT_LUNS */ - u32 lun_index; /* Index in the LUN table */ - u32 host_no; /* host_no from Scsi_host */ - u32 port_sel; /* What port to use for this LUN */ - bool in_table; /* Whether a LUN table entry was created */ - - u8 wwid[16]; /* Keep a duplicate copy here? */ - - struct glun_info *parent; /* Pointer to entry in global LUN structure */ - struct scsi_device *sdev; - struct list_head list; -}; - -struct lun_access { - struct llun_info *lli; - struct scsi_device *sdev; - struct list_head list; -}; - -enum ctx_ctrl { - CTX_CTRL_CLONE = (1 << 1), - CTX_CTRL_ERR = (1 << 2), - CTX_CTRL_ERR_FALLBACK = (1 << 3), - CTX_CTRL_NOPID = (1 << 4), - CTX_CTRL_FILE = (1 << 5) -}; - -#define ENCODE_CTXID(_ctx, _id) (((((u64)_ctx) & 0xFFFFFFFF0ULL) << 28) | _id) -#define DECODE_CTXID(_val) (_val & 0xFFFFFFFF) - -struct ctx_info { - struct sisl_ctrl_map __iomem *ctrl_map; /* initialized at startup */ - struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment), - * alloc/free on attach/detach - */ - u32 rht_out; /* Number of checked out RHT entries */ - u32 rht_perms; /* User-defined permissions for RHT entries */ - struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */ - u8 *rht_needs_ws; /* User-desired write-same function per RHTE */ - - u64 ctxid; - u64 irqs; /* Number of interrupts requested for context */ - pid_t pid; - bool initialized; - bool unavail; - bool err_recovery_active; - struct mutex mutex; /* Context protection */ - struct kref kref; - void *ctx; - struct cxlflash_cfg *cfg; - struct list_head luns; /* LUNs attached to this context */ - const struct vm_operations_struct *cxl_mmap_vmops; - struct file *file; - struct list_head list; /* Link contexts in error recovery */ -}; - -struct cxlflash_global { - struct mutex mutex; - struct list_head gluns;/* list of glun_info structs */ - struct page *err_page; /* One page of all 0xF for error notification */ -}; - -int cxlflash_vlun_resize(struct scsi_device *sdev, - struct dk_cxlflash_resize *resize); -int _cxlflash_vlun_resize(struct scsi_device *sdev, struct ctx_info *ctxi, - struct dk_cxlflash_resize *resize); - -int cxlflash_disk_release(struct scsi_device *sdev, - struct dk_cxlflash_release *release); -int _cxlflash_disk_release(struct scsi_device *sdev, struct ctx_info *ctxi, - struct dk_cxlflash_release *release); - -int cxlflash_disk_clone(struct scsi_device *sdev, - struct dk_cxlflash_clone *clone); - -int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg); - -int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked); -void cxlflash_lun_detach(struct glun_info *gli); - -struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxit, void *arg, - enum ctx_ctrl ctrl); -void put_context(struct ctx_info *ctxi); - -struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl, - struct llun_info *lli); - -struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, - struct llun_info *lli); -void rhte_checkin(struct ctx_info *ctxi, struct sisl_rht_entry *rhte); - -void cxlflash_ba_terminate(struct ba_lun *ba_lun); - -int cxlflash_manage_lun(struct scsi_device *sdev, - struct dk_cxlflash_manage_lun *manage); - -int check_state(struct cxlflash_cfg *cfg); - -#endif /* ifndef _CXLFLASH_SUPERPIPE_H */ diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c deleted file mode 100644 index cbd5a648a131..000000000000 --- a/drivers/scsi/cxlflash/vlun.c +++ /dev/null @@ -1,1337 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * CXL Flash Device Driver - * - * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation - * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2015 IBM Corporation - */ - -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/syscalls.h> -#include <asm/unaligned.h> -#include <asm/bitsperlong.h> - -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_host.h> -#include <uapi/scsi/cxlflash_ioctl.h> - -#include "sislite.h" -#include "common.h" -#include "vlun.h" -#include "superpipe.h" - -/** - * marshal_virt_to_resize() - translate uvirtual to resize structure - * @virt: Source structure from which to translate/copy. - * @resize: Destination structure for the translate/copy. - */ -static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt, - struct dk_cxlflash_resize *resize) -{ - resize->hdr = virt->hdr; - resize->context_id = virt->context_id; - resize->rsrc_handle = virt->rsrc_handle; - resize->req_size = virt->lun_size; - resize->last_lba = virt->last_lba; -} - -/** - * marshal_clone_to_rele() - translate clone to release structure - * @clone: Source structure from which to translate/copy. - * @release: Destination structure for the translate/copy. - */ -static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone, - struct dk_cxlflash_release *release) -{ - release->hdr = clone->hdr; - release->context_id = clone->context_id_dst; -} - -/** - * ba_init() - initializes a block allocator - * @ba_lun: Block allocator to initialize. - * - * Return: 0 on success, -errno on failure - */ -static int ba_init(struct ba_lun *ba_lun) -{ - struct ba_lun_info *bali = NULL; - int lun_size_au = 0, i = 0; - int last_word_underflow = 0; - u64 *lam; - - pr_debug("%s: Initializing LUN: lun_id=%016llx " - "ba_lun->lsize=%lx ba_lun->au_size=%lX\n", - __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size); - - /* Calculate bit map size */ - lun_size_au = ba_lun->lsize / ba_lun->au_size; - if (lun_size_au == 0) { - pr_debug("%s: Requested LUN size of 0!\n", __func__); - return -EINVAL; - } - - /* Allocate lun information container */ - bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL); - if (unlikely(!bali)) { - pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n", - __func__, ba_lun->lun_id); - return -ENOMEM; - } - - bali->total_aus = lun_size_au; - bali->lun_bmap_size = lun_size_au / BITS_PER_LONG; - - if (lun_size_au % BITS_PER_LONG) - bali->lun_bmap_size++; - - /* Allocate bitmap space */ - bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)), - GFP_KERNEL); - if (unlikely(!bali->lun_alloc_map)) { - pr_err("%s: Failed to allocate lun allocation map: " - "lun_id=%016llx\n", __func__, ba_lun->lun_id); - kfree(bali); - return -ENOMEM; - } - - /* Initialize the bit map size and set all bits to '1' */ - bali->free_aun_cnt = lun_size_au; - - for (i = 0; i < bali->lun_bmap_size; i++) - bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL; - - /* If the last word not fully utilized, mark extra bits as allocated */ - last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG); - last_word_underflow -= bali->free_aun_cnt; - if (last_word_underflow > 0) { - lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1]; - for (i = (HIBIT - last_word_underflow + 1); - i < BITS_PER_LONG; - i++) - clear_bit(i, (ulong *)lam); - } - - /* Initialize high elevator index, low/curr already at 0 from kzalloc */ - bali->free_high_idx = bali->lun_bmap_size; - - /* Allocate clone map */ - bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)), - GFP_KERNEL); - if (unlikely(!bali->aun_clone_map)) { - pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n", - __func__, ba_lun->lun_id); - kfree(bali->lun_alloc_map); - kfree(bali); - return -ENOMEM; - } - - /* Pass the allocated LUN info as a handle to the user */ - ba_lun->ba_lun_handle = bali; - - pr_debug("%s: Successfully initialized the LUN: " - "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n", - __func__, ba_lun->lun_id, bali->lun_bmap_size, - bali->free_aun_cnt); - return 0; -} - -/** - * find_free_range() - locates a free bit within the block allocator - * @low: First word in block allocator to start search. - * @high: Last word in block allocator to search. - * @bali: LUN information structure owning the block allocator to search. - * @bit_word: Passes back the word in the block allocator owning the free bit. - * - * Return: The bit position within the passed back word, -1 on failure - */ -static int find_free_range(u32 low, - u32 high, - struct ba_lun_info *bali, int *bit_word) -{ - int i; - u64 bit_pos = -1; - ulong *lam, num_bits; - - for (i = low; i < high; i++) - if (bali->lun_alloc_map[i] != 0) { - lam = (ulong *)&bali->lun_alloc_map[i]; - num_bits = (sizeof(*lam) * BITS_PER_BYTE); - bit_pos = find_first_bit(lam, num_bits); - - pr_devel("%s: Found free bit %llu in LUN " - "map entry %016llx at bitmap index = %d\n", - __func__, bit_pos, bali->lun_alloc_map[i], i); - - *bit_word = i; - bali->free_aun_cnt--; - clear_bit(bit_pos, lam); - break; - } - - return bit_pos; -} - -/** - * ba_alloc() - allocates a block from the block allocator - * @ba_lun: Block allocator from which to allocate a block. - * - * Return: The allocated block, -1 on failure - */ -static u64 ba_alloc(struct ba_lun *ba_lun) -{ - u64 bit_pos = -1; - int bit_word = 0; - struct ba_lun_info *bali = NULL; - - bali = ba_lun->ba_lun_handle; - - pr_debug("%s: Received block allocation request: " - "lun_id=%016llx free_aun_cnt=%llx\n", - __func__, ba_lun->lun_id, bali->free_aun_cnt); - - if (bali->free_aun_cnt == 0) { - pr_debug("%s: No space left on LUN: lun_id=%016llx\n", - __func__, ba_lun->lun_id); - return -1ULL; - } - - /* Search to find a free entry, curr->high then low->curr */ - bit_pos = find_free_range(bali->free_curr_idx, - bali->free_high_idx, bali, &bit_word); - if (bit_pos == -1) { - bit_pos = find_free_range(bali->free_low_idx, - bali->free_curr_idx, - bali, &bit_word); - if (bit_pos == -1) { - pr_debug("%s: Could not find an allocation unit on LUN:" - " lun_id=%016llx\n", __func__, ba_lun->lun_id); - return -1ULL; - } - } - - /* Update the free_curr_idx */ - if (bit_pos == HIBIT) - bali->free_curr_idx = bit_word + 1; - else - bali->free_curr_idx = bit_word; - - pr_debug("%s: Allocating AU number=%llx lun_id=%016llx " - "free_aun_cnt=%llx\n", __func__, - ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id, - bali->free_aun_cnt); - - return (u64) ((bit_word * BITS_PER_LONG) + bit_pos); -} - -/** - * validate_alloc() - validates the specified block has been allocated - * @bali: LUN info owning the block allocator. - * @aun: Block to validate. - * - * Return: 0 on success, -1 on failure - */ -static int validate_alloc(struct ba_lun_info *bali, u64 aun) -{ - int idx = 0, bit_pos = 0; - - idx = aun / BITS_PER_LONG; - bit_pos = aun % BITS_PER_LONG; - - if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx])) - return -1; - - return 0; -} - -/** - * ba_free() - frees a block from the block allocator - * @ba_lun: Block allocator from which to allocate a block. - * @to_free: Block to free. - * - * Return: 0 on success, -1 on failure - */ -static int ba_free(struct ba_lun *ba_lun, u64 to_free) -{ - int idx = 0, bit_pos = 0; - struct ba_lun_info *bali = NULL; - - bali = ba_lun->ba_lun_handle; - - if (validate_alloc(bali, to_free)) { - pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n", - __func__, to_free, ba_lun->lun_id); - return -1; - } - - pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx " - "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id, - bali->free_aun_cnt); - - if (bali->aun_clone_map[to_free] > 0) { - pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n", - __func__, to_free, ba_lun->lun_id, - bali->aun_clone_map[to_free]); - bali->aun_clone_map[to_free]--; - return 0; - } - - idx = to_free / BITS_PER_LONG; - bit_pos = to_free % BITS_PER_LONG; - - set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]); - bali->free_aun_cnt++; - - if (idx < bali->free_low_idx) - bali->free_low_idx = idx; - else if (idx > bali->free_high_idx) - bali->free_high_idx = idx; - - pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x " - "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx, - ba_lun->lun_id, bali->free_aun_cnt); - - return 0; -} - -/** - * ba_clone() - Clone a chunk of the block allocation table - * @ba_lun: Block allocator from which to allocate a block. - * @to_clone: Block to clone. - * - * Return: 0 on success, -1 on failure - */ -static int ba_clone(struct ba_lun *ba_lun, u64 to_clone) -{ - struct ba_lun_info *bali = ba_lun->ba_lun_handle; - - if (validate_alloc(bali, to_clone)) { - pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n", - __func__, to_clone, ba_lun->lun_id); - return -1; - } - - pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n", - __func__, to_clone, ba_lun->lun_id); - - if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) { - pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n", - __func__, to_clone, ba_lun->lun_id); - return -1; - } - - bali->aun_clone_map[to_clone]++; - - return 0; -} - -/** - * ba_space() - returns the amount of free space left in the block allocator - * @ba_lun: Block allocator. - * - * Return: Amount of free space in block allocator - */ -static u64 ba_space(struct ba_lun *ba_lun) -{ - struct ba_lun_info *bali = ba_lun->ba_lun_handle; - - return bali->free_aun_cnt; -} - -/** - * cxlflash_ba_terminate() - frees resources associated with the block allocator - * @ba_lun: Block allocator. - * - * Safe to call in a partially allocated state. - */ -void cxlflash_ba_terminate(struct ba_lun *ba_lun) -{ - struct ba_lun_info *bali = ba_lun->ba_lun_handle; - - if (bali) { - kfree(bali->aun_clone_map); - kfree(bali->lun_alloc_map); - kfree(bali); - ba_lun->ba_lun_handle = NULL; - } -} - -/** - * init_vlun() - initializes a LUN for virtual use - * @lli: LUN information structure that owns the block allocator. - * - * Return: 0 on success, -errno on failure - */ -static int init_vlun(struct llun_info *lli) -{ - int rc = 0; - struct glun_info *gli = lli->parent; - struct blka *blka = &gli->blka; - - memset(blka, 0, sizeof(*blka)); - mutex_init(&blka->mutex); - - /* LUN IDs are unique per port, save the index instead */ - blka->ba_lun.lun_id = lli->lun_index; - blka->ba_lun.lsize = gli->max_lba + 1; - blka->ba_lun.lba_size = gli->blk_len; - - blka->ba_lun.au_size = MC_CHUNK_SIZE; - blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE; - - rc = ba_init(&blka->ba_lun); - if (unlikely(rc)) - pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc); - - pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli); - return rc; -} - -/** - * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN - * @sdev: SCSI device associated with LUN. - * @lba: Logical block address to start write same. - * @nblks: Number of logical blocks to write same. - * - * The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur - * while in scsi_execute_cmd(), the EEH handler will attempt to recover. As - * part of the recovery, the handler drains all currently running ioctls, - * waiting until they have completed before proceeding with a reset. As this - * routine is used on the ioctl path, this can create a condition where the - * EEH handler becomes stuck, infinitely waiting for this ioctl thread. To - * avoid this behavior, temporarily unmark this thread as an ioctl thread by - * releasing the ioctl read semaphore. This will allow the EEH handler to - * proceed with a recovery while this thread is still running. Once the - * scsi_execute_cmd() returns, reacquire the ioctl read semaphore and check the - * adapter state in case it changed while inside of scsi_execute_cmd(). The - * state check will wait if the adapter is still being recovered or return a - * failure if the recovery failed. In the event that the adapter reset failed, - * simply return the failure as the ioctl would be unable to continue. - * - * Note that the above puts a requirement on this routine to only be called on - * an ioctl thread. - * - * Return: 0 on success, -errno on failure - */ -static int write_same16(struct scsi_device *sdev, - u64 lba, - u32 nblks) -{ - u8 *cmd_buf = NULL; - u8 *scsi_cmd = NULL; - int rc = 0; - int result = 0; - u64 offset = lba; - int left = nblks; - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - const u32 s = ilog2(sdev->sector_size) - 9; - const u32 to = sdev->request_queue->rq_timeout; - const u32 ws_limit = - sdev->request_queue->limits.max_write_zeroes_sectors >> s; - - cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); - scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL); - if (unlikely(!cmd_buf || !scsi_cmd)) { - rc = -ENOMEM; - goto out; - } - - while (left > 0) { - - scsi_cmd[0] = WRITE_SAME_16; - scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0; - put_unaligned_be64(offset, &scsi_cmd[2]); - put_unaligned_be32(ws_limit < left ? ws_limit : left, - &scsi_cmd[10]); - - /* Drop the ioctl read semaphore across lengthy call */ - up_read(&cfg->ioctl_rwsem); - result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_OUT, - cmd_buf, CMD_BUFSIZE, to, - CMD_RETRIES, NULL); - down_read(&cfg->ioctl_rwsem); - rc = check_state(cfg); - if (rc) { - dev_err(dev, "%s: Failed state result=%08x\n", - __func__, result); - rc = -ENODEV; - goto out; - } - - if (result) { - dev_err_ratelimited(dev, "%s: command failed for " - "offset=%lld result=%08x\n", - __func__, offset, result); - rc = -EIO; - goto out; - } - left -= ws_limit; - offset += ws_limit; - } - -out: - kfree(cmd_buf); - kfree(scsi_cmd); - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * grow_lxt() - expands the translation table associated with the specified RHTE - * @afu: AFU associated with the host. - * @sdev: SCSI device associated with LUN. - * @ctxid: Context ID of context owning the RHTE. - * @rhndl: Resource handle associated with the RHTE. - * @rhte: Resource handle entry (RHTE). - * @new_size: Number of translation entries associated with RHTE. - * - * By design, this routine employs a 'best attempt' allocation and will - * truncate the requested size down if there is not sufficient space in - * the block allocator to satisfy the request but there does exist some - * amount of space. The user is made aware of this by returning the size - * allocated. - * - * Return: 0 on success, -errno on failure - */ -static int grow_lxt(struct afu *afu, - struct scsi_device *sdev, - ctx_hndl_t ctxid, - res_hndl_t rhndl, - struct sisl_rht_entry *rhte, - u64 *new_size) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL; - struct llun_info *lli = sdev->hostdata; - struct glun_info *gli = lli->parent; - struct blka *blka = &gli->blka; - u32 av_size; - u32 ngrps, ngrps_old; - u64 aun; /* chunk# allocated by block allocator */ - u64 delta = *new_size - rhte->lxt_cnt; - u64 my_new_size; - int i, rc = 0; - - /* - * Check what is available in the block allocator before re-allocating - * LXT array. This is done up front under the mutex which must not be - * released until after allocation is complete. - */ - mutex_lock(&blka->mutex); - av_size = ba_space(&blka->ba_lun); - if (unlikely(av_size <= 0)) { - dev_dbg(dev, "%s: ba_space error av_size=%d\n", - __func__, av_size); - mutex_unlock(&blka->mutex); - rc = -ENOSPC; - goto out; - } - - if (av_size < delta) - delta = av_size; - - lxt_old = rhte->lxt_start; - ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt); - ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta); - - if (ngrps != ngrps_old) { - /* reallocate to fit new size */ - lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps), - GFP_KERNEL); - if (unlikely(!lxt)) { - mutex_unlock(&blka->mutex); - rc = -ENOMEM; - goto out; - } - - /* copy over all old entries */ - memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt)); - } else - lxt = lxt_old; - - /* nothing can fail from now on */ - my_new_size = rhte->lxt_cnt + delta; - - /* add new entries to the end */ - for (i = rhte->lxt_cnt; i < my_new_size; i++) { - /* - * Due to the earlier check of available space, ba_alloc - * cannot fail here. If it did due to internal error, - * leave a rlba_base of -1u which will likely be a - * invalid LUN (too large). - */ - aun = ba_alloc(&blka->ba_lun); - if ((aun == -1ULL) || (aun >= blka->nchunk)) - dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu " - "max=%llu\n", __func__, aun, blka->nchunk - 1); - - /* select both ports, use r/w perms from RHT */ - lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) | - (lli->lun_index << LXT_LUNIDX_SHIFT) | - (RHT_PERM_RW << LXT_PERM_SHIFT | - lli->port_sel)); - } - - mutex_unlock(&blka->mutex); - - /* - * The following sequence is prescribed in the SISlite spec - * for syncing up with the AFU when adding LXT entries. - */ - dma_wmb(); /* Make LXT updates are visible */ - - rhte->lxt_start = lxt; - dma_wmb(); /* Make RHT entry's LXT table update visible */ - - rhte->lxt_cnt = my_new_size; - dma_wmb(); /* Make RHT entry's LXT table size update visible */ - - rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC); - if (unlikely(rc)) - rc = -EAGAIN; - - /* free old lxt if reallocated */ - if (lxt != lxt_old) - kfree(lxt_old); - *new_size = my_new_size; -out: - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * shrink_lxt() - reduces translation table associated with the specified RHTE - * @afu: AFU associated with the host. - * @sdev: SCSI device associated with LUN. - * @rhndl: Resource handle associated with the RHTE. - * @rhte: Resource handle entry (RHTE). - * @ctxi: Context owning resources. - * @new_size: Number of translation entries associated with RHTE. - * - * Return: 0 on success, -errno on failure - */ -static int shrink_lxt(struct afu *afu, - struct scsi_device *sdev, - res_hndl_t rhndl, - struct sisl_rht_entry *rhte, - struct ctx_info *ctxi, - u64 *new_size) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct sisl_lxt_entry *lxt, *lxt_old; - struct llun_info *lli = sdev->hostdata; - struct glun_info *gli = lli->parent; - struct blka *blka = &gli->blka; - ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid); - bool needs_ws = ctxi->rht_needs_ws[rhndl]; - bool needs_sync = !ctxi->err_recovery_active; - u32 ngrps, ngrps_old; - u64 aun; /* chunk# allocated by block allocator */ - u64 delta = rhte->lxt_cnt - *new_size; - u64 my_new_size; - int i, rc = 0; - - lxt_old = rhte->lxt_start; - ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt); - ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta); - - if (ngrps != ngrps_old) { - /* Reallocate to fit new size unless new size is 0 */ - if (ngrps) { - lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps), - GFP_KERNEL); - if (unlikely(!lxt)) { - rc = -ENOMEM; - goto out; - } - - /* Copy over old entries that will remain */ - memcpy(lxt, lxt_old, - (sizeof(*lxt) * (rhte->lxt_cnt - delta))); - } else - lxt = NULL; - } else - lxt = lxt_old; - - /* Nothing can fail from now on */ - my_new_size = rhte->lxt_cnt - delta; - - /* - * The following sequence is prescribed in the SISlite spec - * for syncing up with the AFU when removing LXT entries. - */ - rhte->lxt_cnt = my_new_size; - dma_wmb(); /* Make RHT entry's LXT table size update visible */ - - rhte->lxt_start = lxt; - dma_wmb(); /* Make RHT entry's LXT table update visible */ - - if (needs_sync) { - rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); - if (unlikely(rc)) - rc = -EAGAIN; - } - - if (needs_ws) { - /* - * Mark the context as unavailable, so that we can release - * the mutex safely. - */ - ctxi->unavail = true; - mutex_unlock(&ctxi->mutex); - } - - /* Free LBAs allocated to freed chunks */ - mutex_lock(&blka->mutex); - for (i = delta - 1; i >= 0; i--) { - aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT; - if (needs_ws) - write_same16(sdev, aun, MC_CHUNK_SIZE); - ba_free(&blka->ba_lun, aun); - } - mutex_unlock(&blka->mutex); - - if (needs_ws) { - /* Make the context visible again */ - mutex_lock(&ctxi->mutex); - ctxi->unavail = false; - } - - /* Free old lxt if reallocated */ - if (lxt != lxt_old) - kfree(lxt_old); - *new_size = my_new_size; -out: - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * _cxlflash_vlun_resize() - changes the size of a virtual LUN - * @sdev: SCSI device associated with LUN owning virtual LUN. - * @ctxi: Context owning resources. - * @resize: Resize ioctl data structure. - * - * On successful return, the user is informed of the new size (in blocks) - * of the virtual LUN in last LBA format. When the size of the virtual - * LUN is zero, the last LBA is reflected as -1. See comment in the - * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts - * on the error recovery list. - * - * Return: 0 on success, -errno on failure - */ -int _cxlflash_vlun_resize(struct scsi_device *sdev, - struct ctx_info *ctxi, - struct dk_cxlflash_resize *resize) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct llun_info *lli = sdev->hostdata; - struct glun_info *gli = lli->parent; - struct afu *afu = cfg->afu; - bool put_ctx = false; - - res_hndl_t rhndl = resize->rsrc_handle; - u64 new_size; - u64 nsectors; - u64 ctxid = DECODE_CTXID(resize->context_id), - rctxid = resize->context_id; - - struct sisl_rht_entry *rhte; - - int rc = 0; - - /* - * The requested size (req_size) is always assumed to be in 4k blocks, - * so we have to convert it here from 4k to chunk size. - */ - nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len; - new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE); - - dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n", - __func__, ctxid, resize->rsrc_handle, resize->req_size, - new_size); - - if (unlikely(gli->mode != MODE_VIRTUAL)) { - dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n", - __func__, gli->mode); - rc = -EINVAL; - goto out; - - } - - if (!ctxi) { - ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); - if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context ctxid=%llu\n", - __func__, ctxid); - rc = -EINVAL; - goto out; - } - - put_ctx = true; - } - - rhte = get_rhte(ctxi, rhndl, lli); - if (unlikely(!rhte)) { - dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n", - __func__, rhndl); - rc = -EINVAL; - goto out; - } - - if (new_size > rhte->lxt_cnt) - rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size); - else if (new_size < rhte->lxt_cnt) - rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size); - else { - /* - * Rare case where there is already sufficient space, just - * need to perform a translation sync with the AFU. This - * scenario likely follows a previous sync failure during - * a resize operation. Accordingly, perform the heavyweight - * form of translation sync as it is unknown which type of - * resize failed previously. - */ - rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); - if (unlikely(rc)) { - rc = -EAGAIN; - goto out; - } - } - - resize->hdr.return_flags = 0; - resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len); - resize->last_lba /= CXLFLASH_BLOCK_SIZE; - resize->last_lba--; - -out: - if (put_ctx) - put_context(ctxi); - dev_dbg(dev, "%s: resized to %llu returning rc=%d\n", - __func__, resize->last_lba, rc); - return rc; -} - -int cxlflash_vlun_resize(struct scsi_device *sdev, - struct dk_cxlflash_resize *resize) -{ - return _cxlflash_vlun_resize(sdev, NULL, resize); -} - -/** - * cxlflash_restore_luntable() - Restore LUN table to prior state - * @cfg: Internal structure associated with the host. - */ -void cxlflash_restore_luntable(struct cxlflash_cfg *cfg) -{ - struct llun_info *lli, *temp; - u32 lind; - int k; - struct device *dev = &cfg->dev->dev; - __be64 __iomem *fc_port_luns; - - mutex_lock(&global.mutex); - - list_for_each_entry_safe(lli, temp, &cfg->lluns, list) { - if (!lli->in_table) - continue; - - lind = lli->lun_index; - dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind); - - for (k = 0; k < cfg->num_fc_ports; k++) - if (lli->port_sel & (1 << k)) { - fc_port_luns = get_fc_port_luns(cfg, k); - writeq_be(lli->lun_id[k], &fc_port_luns[lind]); - dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]); - } - } - - mutex_unlock(&global.mutex); -} - -/** - * get_num_ports() - compute number of ports from port selection mask - * @psm: Port selection mask. - * - * Return: Population count of port selection mask - */ -static inline u8 get_num_ports(u32 psm) -{ - static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3, - 1, 2, 2, 3, 2, 3, 3, 4 }; - - return bits[psm & 0xf]; -} - -/** - * init_luntable() - write an entry in the LUN table - * @cfg: Internal structure associated with the host. - * @lli: Per adapter LUN information structure. - * - * On successful return, a LUN table entry is created: - * - at the top for LUNs visible on multiple ports. - * - at the bottom for LUNs visible only on one port. - * - * Return: 0 on success, -errno on failure - */ -static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli) -{ - u32 chan; - u32 lind; - u32 nports; - int rc = 0; - int k; - struct device *dev = &cfg->dev->dev; - __be64 __iomem *fc_port_luns; - - mutex_lock(&global.mutex); - - if (lli->in_table) - goto out; - - nports = get_num_ports(lli->port_sel); - if (nports == 0 || nports > cfg->num_fc_ports) { - WARN(1, "Unsupported port configuration nports=%u", nports); - rc = -EIO; - goto out; - } - - if (nports > 1) { - /* - * When LUN is visible from multiple ports, we will put - * it in the top half of the LUN table. - */ - for (k = 0; k < cfg->num_fc_ports; k++) { - if (!(lli->port_sel & (1 << k))) - continue; - - if (cfg->promote_lun_index == cfg->last_lun_index[k]) { - rc = -ENOSPC; - goto out; - } - } - - lind = lli->lun_index = cfg->promote_lun_index; - dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind); - - for (k = 0; k < cfg->num_fc_ports; k++) { - if (!(lli->port_sel & (1 << k))) - continue; - - fc_port_luns = get_fc_port_luns(cfg, k); - writeq_be(lli->lun_id[k], &fc_port_luns[lind]); - dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]); - } - - cfg->promote_lun_index++; - } else { - /* - * When LUN is visible only from one port, we will put - * it in the bottom half of the LUN table. - */ - chan = PORTMASK2CHAN(lli->port_sel); - if (cfg->promote_lun_index == cfg->last_lun_index[chan]) { - rc = -ENOSPC; - goto out; - } - - lind = lli->lun_index = cfg->last_lun_index[chan]; - fc_port_luns = get_fc_port_luns(cfg, chan); - writeq_be(lli->lun_id[chan], &fc_port_luns[lind]); - cfg->last_lun_index[chan]--; - dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n", - __func__, lind, chan, lli->lun_id[chan]); - } - - lli->in_table = true; -out: - mutex_unlock(&global.mutex); - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -} - -/** - * cxlflash_disk_virtual_open() - open a virtual disk of specified size - * @sdev: SCSI device associated with LUN owning virtual LUN. - * @arg: UVirtual ioctl data structure. - * - * On successful return, the user is informed of the resource handle - * to be used to identify the virtual LUN and the size (in blocks) of - * the virtual LUN in last LBA format. When the size of the virtual LUN - * is zero, the last LBA is reflected as -1. - * - * Return: 0 on success, -errno on failure - */ -int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct llun_info *lli = sdev->hostdata; - struct glun_info *gli = lli->parent; - - struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg; - struct dk_cxlflash_resize resize; - - u64 ctxid = DECODE_CTXID(virt->context_id), - rctxid = virt->context_id; - u64 lun_size = virt->lun_size; - u64 last_lba = 0; - u64 rsrc_handle = -1; - - int rc = 0; - - struct ctx_info *ctxi = NULL; - struct sisl_rht_entry *rhte = NULL; - - dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size); - - /* Setup the LUNs block allocator on first call */ - mutex_lock(&gli->mutex); - if (gli->mode == MODE_NONE) { - rc = init_vlun(lli); - if (rc) { - dev_err(dev, "%s: init_vlun failed rc=%d\n", - __func__, rc); - rc = -ENOMEM; - goto err0; - } - } - - rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true); - if (unlikely(rc)) { - dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__); - goto err0; - } - mutex_unlock(&gli->mutex); - - rc = init_luntable(cfg, lli); - if (rc) { - dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc); - goto err1; - } - - ctxi = get_context(cfg, rctxid, lli, 0); - if (unlikely(!ctxi)) { - dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); - rc = -EINVAL; - goto err1; - } - - rhte = rhte_checkout(ctxi, lli); - if (unlikely(!rhte)) { - dev_err(dev, "%s: too many opens ctxid=%llu\n", - __func__, ctxid); - rc = -EMFILE; /* too many opens */ - goto err1; - } - - rsrc_handle = (rhte - ctxi->rht_start); - - /* Populate RHT format 0 */ - rhte->nmask = MC_RHT_NMASK; - rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms); - - /* Resize even if requested size is 0 */ - marshal_virt_to_resize(virt, &resize); - resize.rsrc_handle = rsrc_handle; - rc = _cxlflash_vlun_resize(sdev, ctxi, &resize); - if (rc) { - dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc); - goto err2; - } - last_lba = resize.last_lba; - - if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME) - ctxi->rht_needs_ws[rsrc_handle] = true; - - virt->hdr.return_flags = 0; - virt->last_lba = last_lba; - virt->rsrc_handle = rsrc_handle; - - if (get_num_ports(lli->port_sel) > 1) - virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE; -out: - if (likely(ctxi)) - put_context(ctxi); - dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n", - __func__, rsrc_handle, rc, last_lba); - return rc; - -err2: - rhte_checkin(ctxi, rhte); -err1: - cxlflash_lun_detach(gli); - goto out; -err0: - /* Special common cleanup prior to successful LUN attach */ - cxlflash_ba_terminate(&gli->blka.ba_lun); - mutex_unlock(&gli->mutex); - goto out; -} - -/** - * clone_lxt() - copies translation tables from source to destination RHTE - * @afu: AFU associated with the host. - * @blka: Block allocator associated with LUN. - * @ctxid: Context ID of context owning the RHTE. - * @rhndl: Resource handle associated with the RHTE. - * @rhte: Destination resource handle entry (RHTE). - * @rhte_src: Source resource handle entry (RHTE). - * - * Return: 0 on success, -errno on failure - */ -static int clone_lxt(struct afu *afu, - struct blka *blka, - ctx_hndl_t ctxid, - res_hndl_t rhndl, - struct sisl_rht_entry *rhte, - struct sisl_rht_entry *rhte_src) -{ - struct cxlflash_cfg *cfg = afu->parent; - struct device *dev = &cfg->dev->dev; - struct sisl_lxt_entry *lxt = NULL; - bool locked = false; - u32 ngrps; - u64 aun; /* chunk# allocated by block allocator */ - int j; - int i = 0; - int rc = 0; - - ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt); - - if (ngrps) { - /* allocate new LXTs for clone */ - lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps), - GFP_KERNEL); - if (unlikely(!lxt)) { - rc = -ENOMEM; - goto out; - } - - /* copy over */ - memcpy(lxt, rhte_src->lxt_start, - (sizeof(*lxt) * rhte_src->lxt_cnt)); - - /* clone the LBAs in block allocator via ref_cnt, note that the - * block allocator mutex must be held until it is established - * that this routine will complete without the need for a - * cleanup. - */ - mutex_lock(&blka->mutex); - locked = true; - for (i = 0; i < rhte_src->lxt_cnt; i++) { - aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT); - if (ba_clone(&blka->ba_lun, aun) == -1ULL) { - rc = -EIO; - goto err; - } - } - } - - /* - * The following sequence is prescribed in the SISlite spec - * for syncing up with the AFU when adding LXT entries. - */ - dma_wmb(); /* Make LXT updates are visible */ - - rhte->lxt_start = lxt; - dma_wmb(); /* Make RHT entry's LXT table update visible */ - - rhte->lxt_cnt = rhte_src->lxt_cnt; - dma_wmb(); /* Make RHT entry's LXT table size update visible */ - - rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC); - if (unlikely(rc)) { - rc = -EAGAIN; - goto err2; - } - -out: - if (locked) - mutex_unlock(&blka->mutex); - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; -err2: - /* Reset the RHTE */ - rhte->lxt_cnt = 0; - dma_wmb(); - rhte->lxt_start = NULL; - dma_wmb(); -err: - /* free the clones already made */ - for (j = 0; j < i; j++) { - aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT); - ba_free(&blka->ba_lun, aun); - } - kfree(lxt); - goto out; -} - -/** - * cxlflash_disk_clone() - clone a context by making snapshot of another - * @sdev: SCSI device associated with LUN owning virtual LUN. - * @clone: Clone ioctl data structure. - * - * This routine effectively performs cxlflash_disk_open operation for each - * in-use virtual resource in the source context. Note that the destination - * context must be in pristine state and cannot have any resource handles - * open at the time of the clone. - * - * Return: 0 on success, -errno on failure - */ -int cxlflash_disk_clone(struct scsi_device *sdev, - struct dk_cxlflash_clone *clone) -{ - struct cxlflash_cfg *cfg = shost_priv(sdev->host); - struct device *dev = &cfg->dev->dev; - struct llun_info *lli = sdev->hostdata; - struct glun_info *gli = lli->parent; - struct blka *blka = &gli->blka; - struct afu *afu = cfg->afu; - struct dk_cxlflash_release release = { { 0 }, 0 }; - - struct ctx_info *ctxi_src = NULL, - *ctxi_dst = NULL; - struct lun_access *lun_access_src, *lun_access_dst; - u32 perms; - u64 ctxid_src = DECODE_CTXID(clone->context_id_src), - ctxid_dst = DECODE_CTXID(clone->context_id_dst), - rctxid_src = clone->context_id_src, - rctxid_dst = clone->context_id_dst; - int i, j; - int rc = 0; - bool found; - LIST_HEAD(sidecar); - - dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n", - __func__, ctxid_src, ctxid_dst); - - /* Do not clone yourself */ - if (unlikely(rctxid_src == rctxid_dst)) { - rc = -EINVAL; - goto out; - } - - if (unlikely(gli->mode != MODE_VIRTUAL)) { - rc = -EINVAL; - dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n", - __func__, gli->mode); - goto out; - } - - ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE); - ctxi_dst = get_context(cfg, rctxid_dst, lli, 0); - if (unlikely(!ctxi_src || !ctxi_dst)) { - dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n", - __func__, ctxid_src, ctxid_dst); - rc = -EINVAL; - goto out; - } - - /* Verify there is no open resource handle in the destination context */ - for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) - if (ctxi_dst->rht_start[i].nmask != 0) { - rc = -EINVAL; - goto out; - } - - /* Clone LUN access list */ - list_for_each_entry(lun_access_src, &ctxi_src->luns, list) { - found = false; - list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list) - if (lun_access_dst->sdev == lun_access_src->sdev) { - found = true; - break; - } - - if (!found) { - lun_access_dst = kzalloc(sizeof(*lun_access_dst), - GFP_KERNEL); - if (unlikely(!lun_access_dst)) { - dev_err(dev, "%s: lun_access allocation fail\n", - __func__); - rc = -ENOMEM; - goto out; - } - - *lun_access_dst = *lun_access_src; - list_add(&lun_access_dst->list, &sidecar); - } - } - - if (unlikely(!ctxi_src->rht_out)) { - dev_dbg(dev, "%s: Nothing to clone\n", __func__); - goto out_success; - } - - /* User specified permission on attach */ - perms = ctxi_dst->rht_perms; - - /* - * Copy over checked-out RHT (and their associated LXT) entries by - * hand, stopping after we've copied all outstanding entries and - * cleaning up if the clone fails. - * - * Note: This loop is equivalent to performing cxlflash_disk_open and - * cxlflash_vlun_resize. As such, LUN accounting needs to be taken into - * account by attaching after each successful RHT entry clone. In the - * event that a clone failure is experienced, the LUN detach is handled - * via the cleanup performed by _cxlflash_disk_release. - */ - for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { - if (ctxi_src->rht_out == ctxi_dst->rht_out) - break; - if (ctxi_src->rht_start[i].nmask == 0) - continue; - - /* Consume a destination RHT entry */ - ctxi_dst->rht_out++; - ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask; - ctxi_dst->rht_start[i].fp = - SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms); - ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i]; - - rc = clone_lxt(afu, blka, ctxid_dst, i, - &ctxi_dst->rht_start[i], - &ctxi_src->rht_start[i]); - if (rc) { - marshal_clone_to_rele(clone, &release); - for (j = 0; j < i; j++) { - release.rsrc_handle = j; - _cxlflash_disk_release(sdev, ctxi_dst, - &release); - } - - /* Put back the one we failed on */ - rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]); - goto err; - } - - cxlflash_lun_attach(gli, gli->mode, false); - } - -out_success: - list_splice(&sidecar, &ctxi_dst->luns); - - /* fall through */ -out: - if (ctxi_src) - put_context(ctxi_src); - if (ctxi_dst) - put_context(ctxi_dst); - dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); - return rc; - -err: - list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list) - kfree(lun_access_src); - goto out; -} diff --git a/drivers/scsi/cxlflash/vlun.h b/drivers/scsi/cxlflash/vlun.h deleted file mode 100644 index 68e3ea52fe80..000000000000 --- a/drivers/scsi/cxlflash/vlun.h +++ /dev/null @@ -1,82 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * CXL Flash Device Driver - * - * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation - * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation - * - * Copyright (C) 2015 IBM Corporation - */ - -#ifndef _CXLFLASH_VLUN_H -#define _CXLFLASH_VLUN_H - -/* RHT - Resource Handle Table */ -#define MC_RHT_NMASK 16 /* in bits */ -#define MC_CHUNK_SHIFT MC_RHT_NMASK /* shift to go from LBA to chunk# */ - -#define HIBIT (BITS_PER_LONG - 1) - -#define MAX_AUN_CLONE_CNT 0xFF - -/* - * LXT - LBA Translation Table - * - * +-------+-------+-------+-------+-------+-------+-------+---+---+ - * | RLBA_BASE |LUN_IDX| P |SEL| - * +-------+-------+-------+-------+-------+-------+-------+---+---+ - * - * The LXT Entry contains the physical LBA where the chunk starts (RLBA_BASE). - * AFU ORes the low order bits from the virtual LBA (offset into the chunk) - * with RLBA_BASE. The result is the physical LBA to be sent to storage. - * The LXT Entry also contains an index to a LUN TBL and a bitmask of which - * outgoing (FC) * ports can be selected. The port select bit-mask is ANDed - * with a global port select bit-mask maintained by the driver. - * In addition, it has permission bits that are ANDed with the - * RHT permissions to arrive at the final permissions for the chunk. - * - * LXT tables are allocated dynamically in groups. This is done to avoid - * a malloc/free overhead each time the LXT has to grow or shrink. - * - * Based on the current lxt_cnt (used), it is always possible to know - * how many are allocated (used+free). The number of allocated entries is - * not stored anywhere. - * - * The LXT table is re-allocated whenever it needs to cross into another group. - */ -#define LXT_GROUP_SIZE 8 -#define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8) /* alloc'ed groups */ -#define LXT_LUNIDX_SHIFT 8 /* LXT entry, shift for LUN index */ -#define LXT_PERM_SHIFT 4 /* LXT entry, shift for permission bits */ - -struct ba_lun_info { - u64 *lun_alloc_map; - u32 lun_bmap_size; - u32 total_aus; - u64 free_aun_cnt; - - /* indices to be used for elevator lookup of free map */ - u32 free_low_idx; - u32 free_curr_idx; - u32 free_high_idx; - - u8 *aun_clone_map; -}; - -struct ba_lun { - u64 lun_id; - u64 wwpn; - size_t lsize; /* LUN size in number of LBAs */ - size_t lba_size; /* LBA size in number of bytes */ - size_t au_size; /* Allocation Unit size in number of LBAs */ - struct ba_lun_info *ba_lun_handle; -}; - -/* Block Allocator */ -struct blka { - struct ba_lun ba_lun; - u64 nchunk; /* number of chunks */ - struct mutex mutex; -}; - -#endif /* ifndef _CXLFLASH_SUPERPIPE_H */ diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index d108a86e196e..96b335c92603 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -83,65 +83,6 @@ /*#define DC395x_NO_SYNC*/ /*#define DC395x_NO_WIDE*/ -/*--------------------------------------------------------------------------- - Debugging - ---------------------------------------------------------------------------*/ -/* - * Types of debugging that can be enabled and disabled - */ -#define DBG_KG 0x0001 -#define DBG_0 0x0002 -#define DBG_1 0x0004 -#define DBG_SG 0x0020 -#define DBG_FIFO 0x0040 -#define DBG_PIO 0x0080 - - -/* - * Set set of things to output debugging for. - * Undefine to remove all debugging - */ -/*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/ -/*#define DEBUG_MASK DBG_0*/ - - -/* - * Output a kernel mesage at the specified level and append the - * driver name and a ": " to the start of the message - */ -#define dprintkl(level, format, arg...) \ - printk(level DC395X_NAME ": " format , ## arg) - - -#ifdef DEBUG_MASK -/* - * print a debug message - this is formated with KERN_DEBUG, then the - * driver name followed by a ": " and then the message is output. - * This also checks that the specified debug level is enabled before - * outputing the message - */ -#define dprintkdbg(type, format, arg...) \ - do { \ - if ((type) & (DEBUG_MASK)) \ - dprintkl(KERN_DEBUG , format , ## arg); \ - } while (0) - -/* - * Check if the specified type of debugging is enabled - */ -#define debug_enabled(type) ((DEBUG_MASK) & (type)) - -#else -/* - * No debugging. Do nothing - */ -#define dprintkdbg(type, format, arg...) \ - do {} while (0) -#define debug_enabled(type) (0) - -#endif - - #ifndef PCI_VENDOR_ID_TEKRAM #define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */ #endif @@ -432,7 +373,6 @@ static void *dc395x_scsi_phase1[] = { /* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */ static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 }; -static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 }; /*--------------------------------------------------------------------------- @@ -564,7 +504,6 @@ static void set_safe_settings(void) { int i; - dprintkl(KERN_INFO, "Using safe settings.\n"); for (i = 0; i < CFG_NUM; i++) { cfg_data[i].value = cfg_data[i].safe; @@ -581,15 +520,6 @@ static void fix_settings(void) { int i; - dprintkdbg(DBG_1, - "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x " - "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n", - cfg_data[CFG_ADAPTER_ID].value, - cfg_data[CFG_MAX_SPEED].value, - cfg_data[CFG_DEV_MODE].value, - cfg_data[CFG_ADAPTER_MODE].value, - cfg_data[CFG_TAGS].value, - cfg_data[CFG_RESET_DELAY].value); for (i = 0; i < CFG_NUM; i++) { if (cfg_data[i].value < cfg_data[i].min @@ -765,7 +695,7 @@ static void waiting_process_next(struct AdapterCtlBlk *acb) return; if (timer_pending(&acb->waiting_timer)) - del_timer(&acb->waiting_timer); + timer_delete(&acb->waiting_timer); if (list_empty(dcb_list_head)) return; @@ -822,8 +752,6 @@ static void waiting_timeout(struct timer_list *t) { unsigned long flags; struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer); - dprintkdbg(DBG_1, - "waiting_timeout: Queue woken up by timer. acb=%p\n", acb); DC395x_LOCK_IO(acb->scsi_host, flags); waiting_process_next(acb); DC395x_UNLOCK_IO(acb->scsi_host, flags); @@ -864,8 +792,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, { int nseg; enum dma_data_direction dir = cmd->sc_data_direction; - dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n", - cmd, dcb->target_id, dcb->target_lun); srb->dcb = dcb; srb->cmd = cmd; @@ -887,12 +813,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); - if (dir == DMA_NONE || !nseg) { - dprintkdbg(DBG_0, - "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n", - cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd), - srb->segment_x[0].address); - } else { + if (!(dir == DMA_NONE || !nseg)) { int i; u32 reqlen = scsi_bufflen(cmd); struct scatterlist *sg; @@ -900,11 +821,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, srb->sg_count = nseg; - dprintkdbg(DBG_0, - "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n", - reqlen, scsi_sglist(cmd), scsi_sg_count(cmd), - srb->sg_count); - scsi_for_each_sg(cmd, sg, srb->sg_count, i) { u32 busaddr = (u32)sg_dma_address(sg); u32 seglen = (u32)sg->length; @@ -933,8 +849,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev, srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE); - dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n", - srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN); } srb->request_length = srb->total_xfer_length; @@ -966,8 +880,6 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd) struct ScsiReqBlk *srb; struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)cmd->device->host->hostdata; - dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n", - cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]); /* Assume BAD_TARGET; will be cleared later */ set_host_byte(cmd, DID_BAD_TARGET); @@ -975,37 +887,26 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd) /* ignore invalid targets */ if (cmd->device->id >= acb->scsi_host->max_id || cmd->device->lun >= acb->scsi_host->max_lun || - cmd->device->lun >31) { + cmd->device->lun > 31) goto complete; - } /* does the specified lun on the specified device exist */ - if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) { - dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n", - cmd->device->id, (u8)cmd->device->lun); + if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) goto complete; - } /* do we have a DCB for the device */ dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); - if (!dcb) { - /* should never happen */ - dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>", - cmd->device->id, (u8)cmd->device->lun); + if (!dcb) goto complete; - } set_host_byte(cmd, DID_OK); set_status_byte(cmd, SAM_STAT_GOOD); srb = list_first_entry_or_null(&acb->srb_free_list, - struct ScsiReqBlk, list); + struct ScsiReqBlk, list); + if (!srb) { - /* - * Return 1 since we are unable to queue this command at this - * point in time. - */ - dprintkdbg(DBG_0, "queue_command: No free srb's\n"); + /* should never happen */ return 1; } list_del(&srb->list); @@ -1020,7 +921,6 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd) /* process immediately */ send_srb(acb, srb); } - dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd); return 0; complete: @@ -1036,82 +936,8 @@ complete: static DEF_SCSI_QCMD(dc395x_queue_command) -static void dump_register_info(struct AdapterCtlBlk *acb, - struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) -{ - u16 pstat; - struct pci_dev *dev = acb->dev; - pci_read_config_word(dev, PCI_STATUS, &pstat); - if (!dcb) - dcb = acb->active_dcb; - if (!srb && dcb) - srb = dcb->active_srb; - if (srb) { - if (!srb->cmd) - dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n", - srb, srb->cmd); - else - dprintkl(KERN_INFO, "dump: srb=%p cmd=%p " - "cmnd=0x%02x <%02i-%i>\n", - srb, srb->cmd, - srb->cmd->cmnd[0], srb->cmd->device->id, - (u8)srb->cmd->device->lun); - printk(" sglist=%p cnt=%i idx=%i len=%zu\n", - srb->segment_x, srb->sg_count, srb->sg_index, - srb->total_xfer_length); - printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n", - srb->state, srb->status, srb->scsi_phase, - (acb->active_dcb) ? "" : "not"); - } - dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x " - "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x " - "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x " - "config2=0x%02x cmd=0x%02x selto=0x%02x}\n", - DC395x_read16(acb, TRM_S1040_SCSI_STATUS), - DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), - DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL), - DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS), - DC395x_read8(acb, TRM_S1040_SCSI_SYNC), - DC395x_read8(acb, TRM_S1040_SCSI_TARGETID), - DC395x_read8(acb, TRM_S1040_SCSI_IDMSG), - DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), - DC395x_read8(acb, TRM_S1040_SCSI_INTEN), - DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0), - DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2), - DC395x_read8(acb, TRM_S1040_SCSI_COMMAND), - DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT)); - dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x " - "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x " - "ctctr=0x%08x addr=0x%08x:0x%08x}\n", - DC395x_read16(acb, TRM_S1040_DMA_COMMAND), - DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), - DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), - DC395x_read8(acb, TRM_S1040_DMA_STATUS), - DC395x_read8(acb, TRM_S1040_DMA_INTEN), - DC395x_read16(acb, TRM_S1040_DMA_CONFIG), - DC395x_read32(acb, TRM_S1040_DMA_XCNT), - DC395x_read32(acb, TRM_S1040_DMA_CXCNT), - DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR), - DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR)); - dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} " - "pci{status=0x%04x}\n", - DC395x_read8(acb, TRM_S1040_GEN_CONTROL), - DC395x_read8(acb, TRM_S1040_GEN_STATUS), - DC395x_read8(acb, TRM_S1040_GEN_TIMER), - pstat); -} - - static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt) { -#if debug_enabled(DBG_FIFO) - u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL); - u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT); - if (!(fifocnt & 0x40)) - dprintkdbg(DBG_FIFO, - "clear_fifo: (%i bytes) on phase %02x in %s\n", - fifocnt & 0x3f, lines, txt); -#endif DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO); } @@ -1120,7 +946,6 @@ static void reset_dev_param(struct AdapterCtlBlk *acb) { struct DeviceCtlBlk *dcb; struct NvRamType *eeprom = &acb->eeprom; - dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb); list_for_each_entry(dcb, &acb->dcb_list, list) { u8 period_index; @@ -1148,12 +973,9 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd) { struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)cmd->device->host->hostdata; - dprintkl(KERN_INFO, - "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n", - cmd, cmd->device->id, (u8)cmd->device->lun, cmd); if (timer_pending(&acb->waiting_timer)) - del_timer(&acb->waiting_timer); + timer_delete(&acb->waiting_timer); /* * disable interrupt @@ -1216,14 +1038,10 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd) (struct AdapterCtlBlk *)cmd->device->host->hostdata; struct DeviceCtlBlk *dcb; struct ScsiReqBlk *srb; - dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n", - cmd, cmd->device->id, (u8)cmd->device->lun, cmd); dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); - if (!dcb) { - dprintkl(KERN_DEBUG, "eh_abort: No such device\n"); + if (!dcb) return FAILED; - } srb = find_cmd(cmd, &dcb->srb_waiting_list); if (srb) { @@ -1232,16 +1050,12 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd) pci_unmap_srb(acb, srb); free_tag(dcb, srb); list_add_tail(&srb->list, &acb->srb_free_list); - dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n"); set_host_byte(cmd, DID_ABORT); return SUCCESS; } srb = find_cmd(cmd, &dcb->srb_going_list); if (srb) { - dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n"); /* XXX: Should abort the command here */ - } else { - dprintkl(KERN_DEBUG, "eh_abort: Command not found\n"); } return FAILED; } @@ -1253,10 +1067,6 @@ static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, { u8 *ptr = srb->msgout_buf + srb->msg_count; if (srb->msg_count > 1) { - dprintkl(KERN_INFO, - "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n", - srb->msg_count, srb->msgout_buf[0], - srb->msgout_buf[1]); return; } if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) { @@ -1278,13 +1088,9 @@ static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) & (acb->config & HCC_WIDE_CARD)) ? 1 : 0; u8 *ptr = srb->msgout_buf + srb->msg_count; - if (srb->msg_count > 1) { - dprintkl(KERN_INFO, - "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n", - srb->msg_count, srb->msgout_buf[0], - srb->msgout_buf[1]); + if (srb->msg_count > 1) return; - } + srb->msg_count += spi_populate_width_msg(ptr, wide); srb->state |= SRB_DO_WIDE_NEGO; } @@ -1316,11 +1122,9 @@ void selection_timeout_missed(unsigned long ptr) unsigned long flags; struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr; struct ScsiReqBlk *srb; - dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n"); - if (!acb->active_dcb || !acb->active_dcb->active_srb) { - dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n"); + if (!acb->active_dcb || !acb->active_dcb->active_srb) return; - } + DC395x_LOCK_IO(acb->scsi_host, flags); srb = acb->active_dcb->active_srb; disconnect(acb); @@ -1335,8 +1139,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, u16 __maybe_unused s_stat2, return_code; u8 s_stat, scsicommand, i, identify_message; u8 *ptr; - dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n", - dcb->target_id, dcb->target_lun, srb); srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */ @@ -1345,8 +1147,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS); #if 1 if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) { - dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n", - s_stat, s_stat2); /* * Try anyway? * @@ -1361,24 +1161,16 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, return 1; } #endif - if (acb->active_dcb) { - dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a" - "command while another command (0x%p) is active.", - srb->cmd, - acb->active_dcb->active_srb ? - acb->active_dcb->active_srb->cmd : NULL); + if (acb->active_dcb) return 1; - } - if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) { - dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd); + + if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) return 1; - } + /* Allow starting of SCSI commands half a second before we allow the mid-level * to queue them again after a reset */ - if (time_before(jiffies, acb->last_reset - HZ / 2)) { - dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n"); + if (time_before(jiffies, acb->last_reset - HZ / 2)) return 1; - } /* Flush FIFO */ clear_fifo(acb, "start_scsi"); @@ -1442,10 +1234,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, tag_number++; } if (tag_number >= dcb->max_command) { - dprintkl(KERN_WARNING, "start_scsi: (0x%p) " - "Out of tags target=<%02i-%i>)\n", - srb->cmd, srb->cmd->device->id, - (u8)srb->cmd->device->lun); srb->state = SRB_READY; DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); @@ -1462,9 +1250,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, #endif /*polling:*/ /* Send CDB ..command block ......... */ - dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun, - srb->cmd->cmnd[0], srb->tag_number); if (srb->flag & AUTO_REQSENSE) { DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5)); @@ -1486,8 +1271,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, * we caught an interrupt (must be reset or reselection ... ) * : Let's process it first! */ - dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n", - srb->cmd, dcb->target_id, dcb->target_lun); srb->state = SRB_READY; free_tag(dcb, srb); srb->msg_count = 0; @@ -1551,17 +1334,9 @@ static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb, /* This acknowledges the IRQ */ scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS); - if ((scsi_status & 0x2007) == 0x2002) - dprintkl(KERN_DEBUG, - "COP after COP completed? %04x\n", scsi_status); - if (debug_enabled(DBG_KG)) { - if (scsi_intstatus & INT_SELTIMEOUT) - dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n"); - } - /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */ if (timer_pending(&acb->selto_timer)) - del_timer(&acb->selto_timer); + timer_delete(&acb->selto_timer); if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) { disconnect(acb); /* bus free interrupt */ @@ -1571,27 +1346,21 @@ static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb, reselect(acb); goto out_unlock; } - if (scsi_intstatus & INT_SELECT) { - dprintkl(KERN_INFO, "Host does not support target mode!\n"); + if (scsi_intstatus & INT_SELECT) goto out_unlock; - } + if (scsi_intstatus & INT_SCSIRESET) { scsi_reset_detect(acb); goto out_unlock; } if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) { dcb = acb->active_dcb; - if (!dcb) { - dprintkl(KERN_DEBUG, - "Oops: BusService (%04x %02x) w/o ActiveDCB!\n", - scsi_status, scsi_intstatus); + if (!dcb) goto out_unlock; - } + srb = dcb->active_srb; - if (dcb->flag & ABORT_DEV_) { - dprintkdbg(DBG_0, "MsgOut Abort Device.....\n"); + if (dcb->flag & ABORT_DEV_) enable_msgout_abort(acb, srb); - } /* software sequential machine */ phase = (u16)srb->scsi_phase; @@ -1659,9 +1428,7 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id) } else if (dma_status & 0x20) { /* Error from the DMA engine */ - dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status); #if 0 - dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n"); if (acb->active_dcb) { acb->active_dcb-> flag |= ABORT_DEV_; if (acb->active_dcb->active_srb) @@ -1669,7 +1436,6 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id) } DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO); #else - dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n"); acb = NULL; #endif handled = IRQ_HANDLED; @@ -1682,7 +1448,6 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id) static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd); if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) *pscsi_status = PH_BUS_FREE; /*.. initial phase */ @@ -1696,18 +1461,12 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, { u16 i; u8 *ptr; - dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd); clear_fifo(acb, "msgout_phase1"); - if (!(srb->state & SRB_MSGOUT)) { + if (!(srb->state & SRB_MSGOUT)) srb->state |= SRB_MSGOUT; - dprintkl(KERN_DEBUG, - "msgout_phase1: (0x%p) Phase unexpected\n", - srb->cmd); /* So what ? */ - } + if (!srb->msg_count) { - dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n", - srb->cmd); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, NOP); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ @@ -1728,7 +1487,6 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); } @@ -1739,7 +1497,6 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, struct DeviceCtlBlk *dcb; u8 *ptr; u16 i; - dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd); clear_fifo(acb, "command_phase1"); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN); @@ -1768,26 +1525,6 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, /* - * Verify that the remaining space in the hw sg lists is the same as - * the count of remaining bytes in srb->total_xfer_length - */ -static void sg_verify_length(struct ScsiReqBlk *srb) -{ - if (debug_enabled(DBG_SG)) { - unsigned len = 0; - unsigned idx = srb->sg_index; - struct SGentry *psge = srb->segment_x + idx; - for (; idx < srb->sg_count; psge++, idx++) - len += psge->length; - if (len != srb->total_xfer_length) - dprintkdbg(DBG_SG, - "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n", - srb->total_xfer_length, len); - } -} - - -/* * Compute the next Scatter Gather list index and adjust its length * and address if necessary */ @@ -1797,15 +1534,11 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left) u32 xferred = srb->total_xfer_length - left; /* bytes transferred */ struct SGentry *psge = srb->segment_x + srb->sg_index; - dprintkdbg(DBG_0, - "sg_update_list: Transferred %i of %i bytes, %i remain\n", - xferred, srb->total_xfer_length, left); if (xferred == 0) { /* nothing to update since we did not transfer any data */ return; } - sg_verify_length(srb); srb->total_xfer_length = left; /* update remaining count */ for (idx = srb->sg_index; idx < srb->sg_count; idx++) { if (xferred >= psge->length) { @@ -1826,7 +1559,6 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left) } psge++; } - sg_verify_length(srb); } @@ -1882,8 +1614,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, struct DeviceCtlBlk *dcb = srb->dcb; u16 scsi_status = *pscsi_status; u32 d_left_counter = 0; - dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); /* * KG: We need to drain the buffers before we draw any conclusions! @@ -1897,14 +1627,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, * KG: Stop DMA engine pushing more data into the SCSI FIFO * If we need more data, the DMA SG list will be freshly set up, anyway */ - dprintkdbg(DBG_PIO, "data_out_phase0: " - "DMA{fifocnt=0x%02x fifostat=0x%02x} " - "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n", - DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), - DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), - DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), - DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status, - srb->total_xfer_length); DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO); if (!(srb->state & SRB_XFERPAD)) { @@ -1928,16 +1650,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, if (dcb->sync_period & WIDE_SYNC) d_left_counter <<= 1; - dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n" - "SCSI{fifocnt=0x%02x cnt=0x%08x} " - "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n", - DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), - (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes", - DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), - DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), - DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), - DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), - DC395x_read32(acb, TRM_S1040_DMA_CXCNT)); } /* * calculate all the residue data that not yet tranfered @@ -1958,9 +1670,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC && scsi_bufflen(srb->cmd) % 2) { d_left_counter = 0; - dprintkl(KERN_INFO, - "data_out_phase0: Discard 1 byte (0x%02x)\n", - scsi_status); } /* * KG: Oops again. Same thinko as above: The SCSI might have been @@ -1991,8 +1700,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, || ((oldxferred & ~PAGE_MASK) == (PAGE_SIZE - diff)) ) { - dprintkl(KERN_INFO, "data_out_phase0: " - "Work around chip bug (%i)?\n", diff); d_left_counter = srb->total_xfer_length - diff; sg_update_list(srb, d_left_counter); @@ -2003,17 +1710,14 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, } } } - if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) { + if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) cleanup_after_transfer(acb, srb); - } } static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); clear_fifo(acb, "data_out_phase1"); /* do prepare before transfer when data out phase */ data_io_transfer(acb, srb, XFERDATAOUT); @@ -2024,8 +1728,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, { u16 scsi_status = *pscsi_status; - dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); /* * KG: DataIn is much more tricky than DataOut. When the device is finished @@ -2045,8 +1747,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, unsigned int sc, fc; if (scsi_status & PARITYERROR) { - dprintkl(KERN_INFO, "data_in_phase0: (0x%p) " - "Parity Error\n", srb->cmd); srb->status |= PARITY_ERROR; } /* @@ -2058,26 +1758,14 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) { #if 0 int ctr = 6000000; - dprintkl(KERN_DEBUG, - "DIP0: Wait for DMA FIFO to flush ...\n"); /*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */ /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */ /*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */ while (! (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80) && --ctr); - if (ctr < 6000000 - 1) - dprintkl(KERN_DEBUG - "DIP0: Had to wait for DMA ...\n"); - if (!ctr) - dprintkl(KERN_ERR, - "Deadlock in DIP0 waiting for DMA FIFO empty!!\n"); /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */ #endif - dprintkdbg(DBG_KG, "data_in_phase0: " - "DMA{fifocnt=0x%02x fifostat=0x%02x}\n", - DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), - DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT)); } /* Now: Check remainig data: The SCSI counters should tell us ... */ sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER); @@ -2085,17 +1773,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, d_left_counter = sc + ((fc & 0x1f) << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 : 0)); - dprintkdbg(DBG_KG, "data_in_phase0: " - "SCSI{fifocnt=0x%02x%s ctr=0x%08x} " - "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} " - "Remain{totxfer=%i scsi_fifo+ctr=%i}\n", - fc, - (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes", - sc, - fc, - DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), - DC395x_read32(acb, TRM_S1040_DMA_CXCNT), - srb->total_xfer_length, d_left_counter); #if DC395x_LASTPIO /* KG: Less than or equal to 4 bytes can not be transferred via DMA, it seems. */ if (d_left_counter @@ -2104,12 +1781,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, /*u32 addr = (srb->segment_x[srb->sg_index].address); */ /*sg_update_list (srb, d_left_counter); */ - dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) " - "for remaining %i bytes:", - fc & 0x1f, - (srb->dcb->sync_period & WIDE_SYNC) ? - "words" : "bytes", - srb->total_xfer_length); if (srb->dcb->sync_period & WIDE_SYNC) DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, CFG2_WIDEFIFO); @@ -2133,9 +1804,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); *virt++ = byte; - if (debug_enabled(DBG_PIO)) - printk(" %02x", byte); - d_left_counter--; sg_subtract_one(srb); @@ -2158,8 +1826,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, *virt++ = byte; srb->total_xfer_length--; - if (debug_enabled(DBG_PIO)) - printk(" %02x", byte); } DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0); @@ -2168,10 +1834,7 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, scsi_kunmap_atomic_sg(base); local_irq_restore(flags); } - /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */ /*srb->total_xfer_length = 0; */ - if (debug_enabled(DBG_PIO)) - printk("\n"); } #endif /* DC395x_LASTPIO */ @@ -2207,9 +1870,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, TempDMAstatus = DC395x_read8(acb, TRM_S1040_DMA_STATUS); } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr); - if (!ctr) - dprintkl(KERN_ERR, - "Deadlock in DataInPhase0 waiting for DMA!!\n"); srb->total_xfer_length = 0; #endif srb->total_xfer_length = d_left_counter; @@ -2226,17 +1886,14 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, } } /* KG: The target may decide to disconnect: Empty FIFO before! */ - if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) { + if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) cleanup_after_transfer(acb, srb); - } } static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); data_io_transfer(acb, srb, XFERDATAIN); } @@ -2246,13 +1903,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, { struct DeviceCtlBlk *dcb = srb->dcb; u8 bval; - dprintkdbg(DBG_0, - "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun, - ((io_dir & DMACMD_DIR) ? 'r' : 'w'), - srb->total_xfer_length, srb->sg_index, srb->sg_count); - if (srb == acb->tmp_srb) - dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n"); + if (srb->sg_index >= srb->sg_count) { /* can't happen? out of bounds error */ return; @@ -2265,9 +1916,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, * Maybe, even ABORTXFER would be appropriate */ if (dma_status & XFERPENDING) { - dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! " - "Expect trouble!\n"); - dump_register_info(acb, dcb, srb); DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO); } /* clear_fifo(acb, "IO"); */ @@ -2346,9 +1994,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, left_io -= len; while (len--) { - if (debug_enabled(DBG_PIO)) - printk(" %02x", *virt); - DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++); sg_subtract_one(srb); @@ -2360,14 +2005,10 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, if (srb->dcb->sync_period & WIDE_SYNC) { if (ln % 2) { DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); - if (debug_enabled(DBG_PIO)) - printk(" |00"); } DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0); } /*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */ - if (debug_enabled(DBG_PIO)) - printk("\n"); DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT); } @@ -2419,8 +2060,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */ srb->state = SRB_COMPLETED; @@ -2433,8 +2072,6 @@ static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); srb->state = SRB_STATUS; DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP); @@ -2464,9 +2101,6 @@ static inline void msgin_reject(struct AdapterCtlBlk *acb, DC395x_ENABLE_MSGOUT; srb->state &= ~SRB_MSGIN; srb->state |= SRB_MSGOUT; - dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n", - srb->msgin_buf[0], - srb->dcb->target_id, srb->dcb->target_lun); } @@ -2475,13 +2109,6 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb, { struct ScsiReqBlk *srb = NULL; struct ScsiReqBlk *i; - dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n", - srb->cmd, tag, srb); - - if (!(dcb->tag_mask & (1 << tag))) - dprintkl(KERN_DEBUG, - "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n", - dcb->tag_mask, tag); if (list_empty(&dcb->srb_going_list)) goto mingx0; @@ -2494,8 +2121,6 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb, if (!srb) goto mingx0; - dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n", - srb->cmd, srb->dcb->target_id, srb->dcb->target_lun); if (dcb->flag & ABORT_DEV_) { /*srb->state = SRB_ABORT_SENT; */ enable_msgout_abort(acb, srb); @@ -2518,7 +2143,6 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb, srb->msgout_buf[0] = ABORT_TASK; srb->msg_count = 1; DC395x_ENABLE_MSGOUT; - dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag); return srb; } @@ -2537,8 +2161,6 @@ static inline void reprogram_regs(struct AdapterCtlBlk *acb, static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { struct DeviceCtlBlk *dcb = srb->dcb; - dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n", - dcb->target_id, dcb->target_lun); dcb->sync_mode &= ~(SYNC_NEGO_ENABLE); dcb->sync_mode |= SYNC_NEGO_DONE; @@ -2551,7 +2173,6 @@ static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) && !(dcb->sync_mode & WIDE_NEGO_DONE)) { build_wdtr(acb, dcb, srb); DC395x_ENABLE_MSGOUT; - dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n"); } } @@ -2562,12 +2183,6 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) struct DeviceCtlBlk *dcb = srb->dcb; u8 bval; int fact; - dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins " - "(%02i.%01i MHz) Offset %i\n", - dcb->target_id, srb->msgin_buf[3] << 2, - (250 / srb->msgin_buf[3]), - ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3], - srb->msgin_buf[4]); if (srb->msgin_buf[4] > 15) srb->msgin_buf[4] = 15; @@ -2584,10 +2199,7 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) || dcb->min_nego_period > clock_period[bval])) bval++; - if (srb->msgin_buf[3] < clock_period[bval]) - dprintkl(KERN_INFO, - "msgin_set_sync: Increase sync nego period to %ins\n", - clock_period[bval] << 2); + srb->msgin_buf[3] = clock_period[bval]; dcb->sync_period &= 0xf0; dcb->sync_period |= ALT_SYNC | bval; @@ -2598,18 +2210,8 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) else fact = 250; - dprintkl(KERN_INFO, - "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n", - dcb->target_id, (fact == 500) ? "Wide16" : "", - dcb->min_nego_period << 2, dcb->sync_offset, - (fact / dcb->min_nego_period), - ((fact % dcb->min_nego_period) * 10 + - dcb->min_nego_period / 2) / dcb->min_nego_period); - if (!(srb->state & SRB_DO_SYNC_NEGO)) { /* Reply with corrected SDTR Message */ - dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n", - srb->msgin_buf[3] << 2, srb->msgin_buf[4]); memcpy(srb->msgout_buf, srb->msgin_buf, 5); srb->msg_count = 5; @@ -2620,7 +2222,6 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) && !(dcb->sync_mode & WIDE_NEGO_DONE)) { build_wdtr(acb, dcb, srb); DC395x_ENABLE_MSGOUT; - dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n"); } } srb->state &= ~SRB_DO_SYNC_NEGO; @@ -2634,7 +2235,6 @@ static inline void msgin_set_nowide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { struct DeviceCtlBlk *dcb = srb->dcb; - dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id); dcb->sync_period &= ~WIDE_SYNC; dcb->sync_mode &= ~(WIDE_NEGO_ENABLE); @@ -2645,7 +2245,6 @@ static inline void msgin_set_nowide(struct AdapterCtlBlk *acb, && !(dcb->sync_mode & SYNC_NEGO_DONE)) { build_sdtr(acb, dcb, srb); DC395x_ENABLE_MSGOUT; - dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n"); } } @@ -2654,15 +2253,11 @@ static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) struct DeviceCtlBlk *dcb = srb->dcb; u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO && acb->config & HCC_WIDE_CARD) ? 1 : 0; - dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id); if (srb->msgin_buf[3] > wide) srb->msgin_buf[3] = wide; /* Completed */ if (!(srb->state & SRB_DO_WIDE_NEGO)) { - dprintkl(KERN_DEBUG, - "msgin_set_wide: Wide nego initiated <%02i>\n", - dcb->target_id); memcpy(srb->msgout_buf, srb->msgin_buf, 4); srb->msg_count = 4; srb->state |= SRB_DO_WIDE_NEGO; @@ -2676,15 +2271,11 @@ static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) dcb->sync_period &= ~WIDE_SYNC; srb->state &= ~SRB_DO_WIDE_NEGO; /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */ - dprintkdbg(DBG_1, - "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n", - (8 << srb->msgin_buf[3]), dcb->target_id); reprogram_regs(acb, dcb); if ((dcb->sync_mode & SYNC_NEGO_ENABLE) && !(dcb->sync_mode & SYNC_NEGO_DONE)) { build_sdtr(acb, dcb, srb); DC395x_ENABLE_MSGOUT; - dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n"); } } @@ -2705,7 +2296,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { struct DeviceCtlBlk *dcb = acb->active_dcb; - dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd); srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); if (msgin_completed(srb->msgin_buf, acb->msg_len)) { @@ -2759,7 +2349,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, case IGNORE_WIDE_RESIDUE: /* Discard wide residual */ - dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n"); break; case COMMAND_COMPLETE: @@ -2771,20 +2360,12 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, * SAVE POINTER may be ignored as we have the struct * ScsiReqBlk* associated with the scsi command. */ - dprintkdbg(DBG_0, "msgin_phase0: (0x%p) " - "SAVE POINTER rem=%i Ignore\n", - srb->cmd, srb->total_xfer_length); break; case RESTORE_POINTERS: - dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n"); break; case ABORT: - dprintkdbg(DBG_0, "msgin_phase0: (0x%p) " - "<%02i-%i> ABORT msg\n", - srb->cmd, dcb->target_id, - dcb->target_lun); dcb->flag |= ABORT_DEV_; enable_msgout_abort(acb, srb); break; @@ -2792,7 +2373,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, default: /* reject unknown messages */ if (srb->msgin_buf[0] & IDENTIFY_BASE) { - dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n"); srb->msg_count = 1; srb->msgout_buf[0] = dcb->identify_msg; DC395x_ENABLE_MSGOUT; @@ -2815,7 +2395,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd); clear_fifo(acb, "msgin_phase1"); DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1); if (!(srb->state & SRB_MSGIN)) { @@ -2869,7 +2448,6 @@ static void disconnect(struct AdapterCtlBlk *acb) struct ScsiReqBlk *srb; if (!dcb) { - dprintkl(KERN_ERR, "disconnect: No such device\n"); udelay(500); /* Suspend queue for a while */ acb->last_reset = @@ -2881,21 +2459,16 @@ static void disconnect(struct AdapterCtlBlk *acb) } srb = dcb->active_srb; acb->active_dcb = NULL; - dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd); srb->scsi_phase = PH_BUS_FREE; /* initial phase */ clear_fifo(acb, "disconnect"); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); if (srb->state & SRB_UNEXPECT_RESEL) { - dprintkl(KERN_ERR, - "disconnect: Unexpected reselection <%02i-%i>\n", - dcb->target_id, dcb->target_lun); srb->state = 0; waiting_process_next(acb); } else if (srb->state & SRB_ABORT_SENT) { dcb->flag &= ~ABORT_DEV_; acb->last_reset = jiffies + HZ / 2 + 1; - dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n"); doing_srb_done(acb, DID_ABORT, srb->cmd, 1); waiting_process_next(acb); } else { @@ -2910,16 +2483,10 @@ static void disconnect(struct AdapterCtlBlk *acb) if (srb->state != SRB_START_ && srb->state != SRB_MSGOUT) { srb->state = SRB_READY; - dprintkl(KERN_DEBUG, - "disconnect: (0x%p) Unexpected\n", - srb->cmd); srb->target_status = SCSI_STAT_SEL_TIMEOUT; goto disc1; } else { /* Normal selection timeout */ - dprintkdbg(DBG_KG, "disconnect: (0x%p) " - "<%02i-%i> SelTO\n", srb->cmd, - dcb->target_id, dcb->target_lun); if (srb->retry_count++ > DC395x_MAX_RETRIES || acb->scan_devices) { srb->target_status = @@ -2928,9 +2495,6 @@ static void disconnect(struct AdapterCtlBlk *acb) } free_tag(dcb, srb); list_move(&srb->list, &dcb->srb_waiting_list); - dprintkdbg(DBG_KG, - "disconnect: (0x%p) Retry\n", - srb->cmd); waiting_set_timer(acb, HZ / 20); } } else if (srb->state & SRB_DISCONNECT) { @@ -2939,9 +2503,6 @@ static void disconnect(struct AdapterCtlBlk *acb) * SRB_DISCONNECT (This is what we expect!) */ if (bval & 0x40) { - dprintkdbg(DBG_0, "disconnect: SCSI bus stat " - " 0x%02x: ACK set! Other controllers?\n", - bval); /* It could come from another initiator, therefore don't do much ! */ } else waiting_process_next(acb); @@ -2965,7 +2526,6 @@ static void reselect(struct AdapterCtlBlk *acb) struct ScsiReqBlk *srb = NULL; u16 rsel_tar_lun_id; u8 id, lun; - dprintkdbg(DBG_0, "reselect: acb=%p\n", acb); clear_fifo(acb, "reselect"); /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */ @@ -2974,18 +2534,11 @@ static void reselect(struct AdapterCtlBlk *acb) if (dcb) { /* Arbitration lost but Reselection win */ srb = dcb->active_srb; if (!srb) { - dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, " - "but active_srb == NULL\n"); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ return; } /* Why the if ? */ if (!acb->scan_devices) { - dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> " - "Arb lost but Resel win rsel=%i stat=0x%04x\n", - srb->cmd, dcb->target_id, - dcb->target_lun, rsel_tar_lun_id, - DC395x_read16(acb, TRM_S1040_SCSI_STATUS)); /*srb->state |= SRB_DISCONNECT; */ srb->state = SRB_READY; @@ -2997,25 +2550,15 @@ static void reselect(struct AdapterCtlBlk *acb) } } /* Read Reselected Target Id and LUN */ - if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8))) - dprintkl(KERN_DEBUG, "reselect: Expects identify msg. " - "Got %i!\n", rsel_tar_lun_id); id = rsel_tar_lun_id & 0xff; lun = (rsel_tar_lun_id >> 8) & 7; dcb = find_dcb(acb, id, lun); if (!dcb) { - dprintkl(KERN_ERR, "reselect: From non existent device " - "<%02i-%i>\n", id, lun); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ return; } acb->active_dcb = dcb; - if (!(dcb->dev_mode & NTC_DO_DISCONNECT)) - dprintkl(KERN_DEBUG, "reselect: in spite of forbidden " - "disconnection? <%02i-%i>\n", - dcb->target_id, dcb->target_lun); - if (dcb->sync_mode & EN_TAG_QUEUEING) { srb = acb->tmp_srb; dcb->active_srb = srb; @@ -3026,9 +2569,6 @@ static void reselect(struct AdapterCtlBlk *acb) /* * abort command */ - dprintkl(KERN_DEBUG, - "reselect: w/o disconnected cmds <%02i-%i>\n", - dcb->target_id, dcb->target_lun); srb = acb->tmp_srb; srb->state = SRB_UNEXPECT_RESEL; dcb->active_srb = srb; @@ -3045,7 +2585,6 @@ static void reselect(struct AdapterCtlBlk *acb) srb->scsi_phase = PH_BUS_FREE; /* initial phase */ /* Program HA ID, target ID, period and offset */ - dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id); DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */ DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */ DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */ @@ -3111,12 +2650,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) if (scsi_sg_count(cmd) && dir != DMA_NONE) { /* unmap DC395x SG list */ - dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", - srb->sg_bus_addr, SEGMENTX_LEN); dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN, DMA_TO_DEVICE); - dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", - scsi_sg_count(cmd), scsi_bufflen(cmd)); /* unmap the sg segments */ scsi_dma_unmap(cmd); } @@ -3130,8 +2665,6 @@ static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb, if (!(srb->flag & AUTO_REQSENSE)) return; /* Unmap sense buffer */ - dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n", - srb->segment_x[0].address); dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address, srb->segment_x[0].length, DMA_FROM_DEVICE); /* Restore SG stuff */ @@ -3155,16 +2688,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, enum dma_data_direction dir = cmd->sc_data_direction; int ckc_only = 1; - dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd, - srb->cmd->device->id, (u8)srb->cmd->device->lun); - dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n", - srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count, - scsi_sgtalbe(cmd)); status = srb->target_status; set_host_byte(cmd, DID_OK); set_status_byte(cmd, SAM_STAT_GOOD); if (srb->flag & AUTO_REQSENSE) { - dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n"); pci_unmap_srb_sense(acb, srb); /* ** target status.......................... @@ -3172,57 +2699,11 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, srb->flag &= ~AUTO_REQSENSE; srb->adapter_status = 0; srb->target_status = SAM_STAT_CHECK_CONDITION; - if (debug_enabled(DBG_1)) { - switch (cmd->sense_buffer[2] & 0x0f) { - case NOT_READY: - dprintkl(KERN_DEBUG, - "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", - cmd->cmnd[0], dcb->target_id, - dcb->target_lun, status, acb->scan_devices); - break; - case UNIT_ATTENTION: - dprintkl(KERN_DEBUG, - "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", - cmd->cmnd[0], dcb->target_id, - dcb->target_lun, status, acb->scan_devices); - break; - case ILLEGAL_REQUEST: - dprintkl(KERN_DEBUG, - "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", - cmd->cmnd[0], dcb->target_id, - dcb->target_lun, status, acb->scan_devices); - break; - case MEDIUM_ERROR: - dprintkl(KERN_DEBUG, - "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", - cmd->cmnd[0], dcb->target_id, - dcb->target_lun, status, acb->scan_devices); - break; - case HARDWARE_ERROR: - dprintkl(KERN_DEBUG, - "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", - cmd->cmnd[0], dcb->target_id, - dcb->target_lun, status, acb->scan_devices); - break; - } - if (cmd->sense_buffer[7] >= 6) - printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x " - "(0x%08x 0x%08x)\n", - cmd->sense_buffer[2], cmd->sense_buffer[12], - cmd->sense_buffer[13], - *((unsigned int *)(cmd->sense_buffer + 3)), - *((unsigned int *)(cmd->sense_buffer + 8))); - else - printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n", - cmd->sense_buffer[2], - *((unsigned int *)(cmd->sense_buffer + 3))); - } if (status == SAM_STAT_CHECK_CONDITION) { set_host_byte(cmd, DID_BAD_TARGET); goto ckc_e; } - dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n"); set_status_byte(cmd, SAM_STAT_CHECK_CONDITION); @@ -3239,8 +2720,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, return; } else if (status == SAM_STAT_TASK_SET_FULL) { tempcnt = (u8)list_size(&dcb->srb_going_list); - dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n", - dcb->target_id, dcb->target_lun, tempcnt); if (tempcnt > 1) tempcnt--; dcb->max_command = tempcnt; @@ -3314,21 +2793,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, /* Here is the info for Doug Gilbert's sg3 ... */ scsi_set_resid(cmd, srb->total_xfer_length); - if (debug_enabled(DBG_KG)) { - if (srb->total_xfer_length) - dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> " - "cmnd=0x%02x Missed %i bytes\n", - cmd, cmd->device->id, (u8)cmd->device->lun, - cmd->cmnd[0], srb->total_xfer_length); - } if (srb != acb->tmp_srb) { /* Add to free list */ - dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n", - cmd, cmd->result); list_move_tail(&srb->list, &acb->srb_free_list); - } else { - dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n"); } scsi_done(cmd); @@ -3341,7 +2809,6 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag, struct scsi_cmnd *cmd, u8 force) { struct DeviceCtlBlk *dcb; - dprintkl(KERN_INFO, "doing_srb_done: pids "); list_for_each_entry(dcb, &acb->dcb_list, list) { struct ScsiReqBlk *srb; @@ -3365,15 +2832,6 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag, scsi_done(p); } } - if (!list_empty(&dcb->srb_going_list)) - dprintkl(KERN_DEBUG, - "How could the ML send cmnds to the Going queue? <%02i-%i>\n", - dcb->target_id, dcb->target_lun); - if (dcb->tag_mask) - dprintkl(KERN_DEBUG, - "tag_mask for <%02i-%i> should be empty, is %08x!\n", - dcb->target_id, dcb->target_lun, - dcb->tag_mask); /* Waiting queue */ list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) { @@ -3392,19 +2850,13 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag, scsi_done(cmd); } } - if (!list_empty(&dcb->srb_waiting_list)) - dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n", - list_size(&dcb->srb_waiting_list), dcb->target_id, - dcb->target_lun); dcb->flag &= ~ABORT_DEV_; } - printk("\n"); } static void reset_scsi_bus(struct AdapterCtlBlk *acb) { - dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb); acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI); @@ -3451,10 +2903,9 @@ static void set_basic_config(struct AdapterCtlBlk *acb) static void scsi_reset_detect(struct AdapterCtlBlk *acb) { - dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb); /* delay half a second */ if (timer_pending(&acb->waiting_timer)) - del_timer(&acb->waiting_timer); + timer_delete(&acb->waiting_timer); DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE); DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE); @@ -3488,8 +2939,6 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { struct scsi_cmnd *cmd = srb->cmd; - dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n", - cmd, cmd->device->id, (u8)cmd->device->lun); srb->flag |= AUTO_REQSENSE; srb->adapter_status = 0; @@ -3511,16 +2960,10 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, srb->segment_x[0].address = dma_map_single(&acb->dev->dev, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); - dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n", - cmd->sense_buffer, srb->segment_x[0].address, - SCSI_SENSE_BUFFERSIZE); srb->sg_count = 1; srb->sg_index = 0; if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */ - dprintkl(KERN_DEBUG, - "request_sense: (0x%p) failed <%02i-%i>\n", - srb->cmd, dcb->target_id, dcb->target_lun); list_move(&srb->list, &dcb->srb_waiting_list); waiting_set_timer(acb, HZ / 100); } @@ -3548,7 +2991,6 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb; dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC); - dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun); if (!dcb) return NULL; dcb->acb = NULL; @@ -3598,10 +3040,6 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, return NULL; } - dprintkdbg(DBG_1, - "device_alloc: <%02i-%i> copy from <%02i-%i>\n", - dcb->target_id, dcb->target_lun, - p->target_id, p->target_lun); dcb->sync_mode = p->sync_mode; dcb->sync_period = p->sync_period; dcb->min_nego_period = p->min_nego_period; @@ -3651,8 +3089,6 @@ static void adapter_remove_device(struct AdapterCtlBlk *acb, { struct DeviceCtlBlk *i; struct DeviceCtlBlk *tmp; - dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n", - dcb->target_id, dcb->target_lun); /* fix up any pointers to this device that we have in the adapter */ if (acb->active_dcb == dcb) @@ -3685,10 +3121,6 @@ static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb) { if (list_size(&dcb->srb_going_list) > 1) { - dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> " - "Won't remove because of %i active requests.\n", - dcb->target_id, dcb->target_lun, - list_size(&dcb->srb_going_list)); return; } adapter_remove_device(acb, dcb); @@ -3706,8 +3138,6 @@ static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb) { struct DeviceCtlBlk *dcb; struct DeviceCtlBlk *tmp; - dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n", - list_size(&acb->dcb_list)); list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list) adapter_remove_and_free_device(acb, dcb); @@ -3715,13 +3145,13 @@ static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb) /** - * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new + * dc395x_sdev_init - Called by the scsi mid layer to tell us about a new * scsi device that we need to deal with. We allocate a new device and then * insert that device into the adapters device list. * * @scsi_device: The new scsi device that we need to handle. **/ -static int dc395x_slave_alloc(struct scsi_device *scsi_device) +static int dc395x_sdev_init(struct scsi_device *scsi_device) { struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata; struct DeviceCtlBlk *dcb; @@ -3736,12 +3166,12 @@ static int dc395x_slave_alloc(struct scsi_device *scsi_device) /** - * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a + * dc395x_sdev_destroy - Called by the scsi mid layer to tell us about a * device that is going away. * * @scsi_device: The new scsi device that we need to handle. **/ -static void dc395x_slave_destroy(struct scsi_device *scsi_device) +static void dc395x_sdev_destroy(struct scsi_device *scsi_device) { struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata; struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun); @@ -4002,8 +3432,6 @@ static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port) * Checksum is wrong. * Load a set of defaults into the eeprom buffer */ - dprintkl(KERN_WARNING, - "EEProm checksum error: using default values and options.\n"); eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM; eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8); eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040; @@ -4055,15 +3483,6 @@ static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port) **/ static void print_eeprom_settings(struct NvRamType *eeprom) { - dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n", - eeprom->scsi_id, - eeprom->target[0].period, - clock_speed[eeprom->target[0].period] / 10, - clock_speed[eeprom->target[0].period] % 10, - eeprom->target[0].cfg0); - dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n", - eeprom->channel_cfg, eeprom->max_tag, - 1 << eeprom->max_tag, eeprom->delay_time); } @@ -4094,15 +3513,12 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb) for (i = 0; i < DC395x_MAX_SRB_CNT; i++) acb->srb_array[i].segment_x = NULL; - dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); while (pages--) { ptr = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!ptr) { adapter_sg_tables_free(acb); return 1; } - dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n", - PAGE_SIZE, ptr, srb_idx); i = 0; while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT) acb->srb_array[srb_idx++].segment_x = @@ -4111,8 +3527,6 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb) if (i < srbs_per_page) acb->srb.segment_x = ptr + (i * DC395x_MAX_SG_LISTENTRY); - else - dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n"); return 0; } @@ -4132,8 +3546,6 @@ static void adapter_print_config(struct AdapterCtlBlk *acb) u8 bval; bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS); - dprintkl(KERN_INFO, "%sConnectors: ", - ((bval & WIDESCSI) ? "(Wide) " : "")); if (!(bval & CON5068)) printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50"); if (!(bval & CON68)) @@ -4293,7 +3705,6 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb) acb->config |= HCC_SCSI_RESET; if (acb->config & HCC_SCSI_RESET) { - dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n"); DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI); /*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */ @@ -4327,7 +3738,6 @@ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port, u32 io_port_len, unsigned int irq) { if (!request_region(io_port, io_port_len, DC395X_NAME)) { - dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port); goto failed; } /* store port base to indicate we have registered it */ @@ -4336,7 +3746,6 @@ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port, if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) { /* release the region we just claimed */ - dprintkl(KERN_INFO, "Failed to register IRQ\n"); goto failed; } /* store irq to indicate we have registered it */ @@ -4353,18 +3762,12 @@ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port, adapter_print_config(acb); if (adapter_sg_tables_alloc(acb)) { - dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n"); goto failed; } adapter_init_scsi_host(acb->scsi_host); adapter_init_chip(acb); set_basic_config(acb); - dprintkdbg(DBG_0, - "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p " - "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n", - acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk), - sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk)); return 0; failed: @@ -4415,9 +3818,9 @@ static void adapter_uninit(struct AdapterCtlBlk *acb) /* remove timers */ if (timer_pending(&acb->waiting_timer)) - del_timer(&acb->waiting_timer); + timer_delete(&acb->waiting_timer); if (timer_pending(&acb->selto_timer)) - del_timer(&acb->selto_timer); + timer_delete(&acb->selto_timer); adapter_uninit_chip(acb); adapter_remove_and_free_all_devices(acb); @@ -4528,14 +3931,6 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) seq_putc(m, '\n'); } - if (debug_enabled(DBG_1)) { - seq_printf(m, "DCB list for ACB %p:\n", acb); - list_for_each_entry(dcb, &acb->dcb_list, list) { - seq_printf(m, "%p -> ", dcb); - } - seq_puts(m, "END\n"); - } - DC395x_UNLOCK_IO(acb->scsi_host, flags); return 0; } @@ -4547,8 +3942,8 @@ static const struct scsi_host_template dc395x_driver_template = { .show_info = dc395x_show_info, .name = DC395X_BANNER " " DC395X_VERSION, .queuecommand = dc395x_queue_command, - .slave_alloc = dc395x_slave_alloc, - .slave_destroy = dc395x_slave_destroy, + .sdev_init = dc395x_sdev_init, + .sdev_destroy = dc395x_sdev_destroy, .can_queue = DC395x_MAX_CAN_QUEUE, .this_id = 7, .sg_tablesize = DC395x_MAX_SG_TABLESIZE, @@ -4560,21 +3955,6 @@ static const struct scsi_host_template dc395x_driver_template = { /** - * banner_display - Display banner on first instance of driver - * initialized. - **/ -static void banner_display(void) -{ - static int banner_done = 0; - if (!banner_done) - { - dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION); - banner_done = 1; - } -} - - -/** * dc395x_init_one - Initialise a single instance of the adapter. * * The PCI layer will call this once for each instance of the adapter @@ -4595,33 +3975,25 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id) unsigned int io_port_len; unsigned int irq; - dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev)); - banner_display(); - if (pci_enable_device(dev)) - { - dprintkl(KERN_INFO, "PCI Enable device failed.\n"); return -ENODEV; - } + io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK; io_port_len = pci_resource_len(dev, 0); irq = dev->irq; - dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq); /* allocate scsi host information (includes out adapter) */ scsi_host = scsi_host_alloc(&dc395x_driver_template, sizeof(struct AdapterCtlBlk)); - if (!scsi_host) { - dprintkl(KERN_INFO, "scsi_host_alloc failed\n"); + if (!scsi_host) goto fail; - } + acb = (struct AdapterCtlBlk*)scsi_host->hostdata; acb->scsi_host = scsi_host; acb->dev = dev; /* initialise the adapter and everything we need */ if (adapter_init(acb, io_port_base, io_port_len, irq)) { - dprintkl(KERN_INFO, "adapter init failed\n"); acb = NULL; goto fail; } @@ -4629,10 +4001,9 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id) pci_set_master(dev); /* get the scsi mid level to scan for new devices on the bus */ - if (scsi_add_host(scsi_host, &dev->dev)) { - dprintkl(KERN_ERR, "scsi_add_host failed\n"); + if (scsi_add_host(scsi_host, &dev->dev)) goto fail; - } + pci_set_drvdata(dev, scsi_host); scsi_scan_host(scsi_host); @@ -4659,8 +4030,6 @@ static void dc395x_remove_one(struct pci_dev *dev) struct Scsi_Host *scsi_host = pci_get_drvdata(dev); struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata); - dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb); - scsi_remove_host(scsi_host); adapter_uninit(acb); pci_disable_device(dev); @@ -4668,7 +4037,7 @@ static void dc395x_remove_one(struct pci_dev *dev) } -static struct pci_device_id dc395x_pci_table[] = { +static const struct pci_device_id dc395x_pci_table[] = { { .vendor = PCI_VENDOR_ID_TEKRAM, .device = PCI_DEVICE_ID_TEKRAM_TRMS1040, diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index a226dc1b65d7..1bf5948d1188 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -8,7 +8,7 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_proto.h> #include <scsi/scsi_dbg.h> @@ -414,28 +414,40 @@ static char print_alua_state(unsigned char state) } } -static enum scsi_disposition alua_check_sense(struct scsi_device *sdev, - struct scsi_sense_hdr *sense_hdr) +static void alua_handle_state_transition(struct scsi_device *sdev) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (pg) + pg->state = SCSI_ACCESS_STATE_TRANSITIONING; + rcu_read_unlock(); + alua_check(sdev, false); +} + +static enum scsi_disposition alua_check_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { /* * LUN Not Accessible - ALUA state transition */ - rcu_read_lock(); - pg = rcu_dereference(h->pg); - if (pg) - pg->state = SCSI_ACCESS_STATE_TRANSITIONING; - rcu_read_unlock(); - alua_check(sdev, false); + alua_handle_state_transition(sdev); return NEEDS_RETRY; } break; case UNIT_ATTENTION: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { + /* + * LUN Not Accessible - ALUA state transition + */ + alua_handle_state_transition(sdev); + return NEEDS_RETRY; + } if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) { /* * Power On, Reset, or Bus Device Reset. @@ -502,7 +514,8 @@ static int alua_tur(struct scsi_device *sdev) retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ, ALUA_FAILOVER_RETRIES, &sense_hdr); - if (sense_hdr.sense_key == NOT_READY && + if ((sense_hdr.sense_key == NOT_READY || + sense_hdr.sense_key == UNIT_ATTENTION) && sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) return SCSI_DH_RETRY; else if (retval) diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index f8a09e3eba58..6e1b252cea0e 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -822,7 +822,8 @@ static int __init rdac_init(void) /* * Create workqueue to handle mode selects for rdac */ - kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd"); + kmpath_rdacd = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "kmpath_rdacd"); if (!kmpath_rdacd) { scsi_unregister_device_handler(&rdac_dh); printk(KERN_ERR "kmpath_rdacd creation failed.\n"); diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c index dfb091d34363..d6d091b2f3c7 100644 --- a/drivers/scsi/dmx3191d.c +++ b/drivers/scsi/dmx3191d.c @@ -127,7 +127,7 @@ static void dmx3191d_remove_one(struct pci_dev *pdev) pci_disable_device(pdev); } -static struct pci_device_id dmx3191d_pci_tbl[] = { +static const struct pci_device_id dmx3191d_pci_tbl[] = { {PCI_VENDOR_ID_DOMEX, PCI_DEVICE_ID_DOMEX_DMX3191D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, { } diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c index 49fd2cfed70c..1bd42f7db177 100644 --- a/drivers/scsi/elx/efct/efct_driver.c +++ b/drivers/scsi/elx/efct/efct_driver.c @@ -310,7 +310,7 @@ efct_fw_reset(struct efct *efct) * during attach. */ if (timer_pending(&efct->xport->stats_timer)) - del_timer(&efct->xport->stats_timer); + timer_delete(&efct->xport->stats_timer); if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) { efc_log_info(efct, "failed to reset firmware\n"); @@ -470,7 +470,7 @@ out: return rc; } -static struct pci_device_id efct_pci_table[] = { +static const struct pci_device_id efct_pci_table[] = { {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0}, {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0}, {} /* terminate list */ @@ -735,7 +735,7 @@ efct_pci_io_resume(struct pci_dev *pdev) MODULE_DEVICE_TABLE(pci, efct_pci_table); -static struct pci_error_handlers efct_pci_err_handler = { +static const struct pci_error_handlers efct_pci_err_handler = { .error_detected = efct_pci_io_error_detected, .slot_reset = efct_pci_io_slot_reset, .resume = efct_pci_io_resume, @@ -778,5 +778,6 @@ static void __exit efct_exit(void) module_init(efct_init); module_exit(efct_exit); MODULE_VERSION(EFCT_DRIVER_VERSION); +MODULE_DESCRIPTION("Emulex Fibre Channel Target driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Broadcom"); diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c index 6a6ec32c46bd..9ac69356b13e 100644 --- a/drivers/scsi/elx/efct/efct_lio.c +++ b/drivers/scsi/elx/efct/efct_lio.c @@ -1114,7 +1114,8 @@ int efct_scsi_tgt_new_device(struct efct *efct) atomic_set(&efct->tgt_efct.watermark_hit, 0); atomic_set(&efct->tgt_efct.initiator_count, 0); - lio_wq = create_singlethread_workqueue("efct_lio_worker"); + lio_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, + "efct_lio_worker"); if (!lio_wq) { efc_log_err(efct, "workqueue create failed\n"); return -EIO; diff --git a/drivers/scsi/elx/efct/efct_xport.c b/drivers/scsi/elx/efct/efct_xport.c index cf4dced20b8b..2aca60f6428e 100644 --- a/drivers/scsi/elx/efct/efct_xport.c +++ b/drivers/scsi/elx/efct/efct_xport.c @@ -508,7 +508,7 @@ efct_xport_detach(struct efct_xport *xport) /*Shutdown FC Statistics timer*/ if (timer_pending(&xport->stats_timer)) - del_timer(&xport->stats_timer); + timer_delete(&xport->stats_timer); efct_hw_teardown(&efct->hw); diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c index 9661eea93aa1..cf7e738c4edc 100644 --- a/drivers/scsi/elx/libefc/efc_fabric.c +++ b/drivers/scsi/elx/libefc/efc_fabric.c @@ -888,7 +888,7 @@ gidpt_delay_timer_cb(struct timer_list *t) { struct efc_node *node = from_timer(node, t, gidpt_delay_timer); - del_timer(&node->gidpt_delay_timer); + timer_delete(&node->gidpt_delay_timer); efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL); } diff --git a/drivers/scsi/elx/libefc/efc_node.c b/drivers/scsi/elx/libefc/efc_node.c index a1b4ce6a27b4..f17e052fe537 100644 --- a/drivers/scsi/elx/libefc/efc_node.c +++ b/drivers/scsi/elx/libefc/efc_node.c @@ -149,7 +149,7 @@ efc_node_free(struct efc_node *node) /* if the gidpt_delay_timer is still running, then delete it */ if (timer_pending(&node->gidpt_delay_timer)) - del_timer(&node->gidpt_delay_timer); + timer_delete(&node->gidpt_delay_timer); xa_erase(&nport->lookup, node->rnode.fc_id); diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c index 2e83a667901f..1a7437f4328e 100644 --- a/drivers/scsi/elx/libefc/efc_nport.c +++ b/drivers/scsi/elx/libefc/efc_nport.c @@ -705,9 +705,9 @@ efc_nport_vport_del(struct efc *efc, struct efc_domain *domain, spin_lock_irqsave(&efc->lock, flags); list_for_each_entry(nport, &domain->nport_list, list_entry) { if (nport->wwpn == wwpn && nport->wwnn == wwnn) { - kref_put(&nport->ref, nport->release); /* Shutdown this NPORT */ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); + kref_put(&nport->ref, nport->release); break; } } diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c index 5e7fb110bc3f..d9a231fc0e0d 100644 --- a/drivers/scsi/elx/libefc_sli/sli4.c +++ b/drivers/scsi/elx/libefc_sli/sli4.c @@ -3804,7 +3804,7 @@ sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc, wr_obj->desired_write_len_dword = cpu_to_le32(dwflags); wr_obj->write_offset = cpu_to_le32(offset); - strncpy(wr_obj->object_name, obj_name, sizeof(wr_obj->object_name) - 1); + strscpy(wr_obj->object_name, obj_name); wr_obj->host_buffer_descriptor_count = cpu_to_le32(1); bde = (struct sli4_bde *)wr_obj->host_buffer_descriptor; @@ -3833,7 +3833,7 @@ sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *obj_name) SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN(cmn_delete_object)); - strncpy(req->object_name, obj_name, sizeof(req->object_name) - 1); + strscpy(req->object_name, obj_name); return 0; } @@ -3856,7 +3856,7 @@ sli_cmd_common_read_object(struct sli4 *sli4, void *buf, u32 desired_read_len, cpu_to_le32(desired_read_len & SLI4_REQ_DESIRE_READLEN); rd_obj->read_offset = cpu_to_le32(offset); - strncpy(rd_obj->object_name, obj_name, sizeof(rd_obj->object_name) - 1); + strscpy(rd_obj->object_name, obj_name); rd_obj->host_buffer_descriptor_count = cpu_to_le32(1); bde = (struct sli4_bde *)rd_obj->host_buffer_descriptor; diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h index ed63f7a9ea54..c48275d53aef 100644 --- a/drivers/scsi/esas2r/esas2r.h +++ b/drivers/scsi/esas2r/esas2r.h @@ -929,7 +929,6 @@ struct esas2r_adapter { struct list_head fw_event_list; spinlock_t fw_event_lock; u8 fw_events_off; /* if '1', then ignore events */ - char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN]; /* * intr_mode stores the interrupt mode currently being used by this * adapter. it is based on the interrupt_mode module parameter, but @@ -1046,10 +1045,6 @@ void esas2r_build_mgt_req(struct esas2r_adapter *a, u32 length, void *data); void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq); -void esas2r_build_cli_req(struct esas2r_adapter *a, - struct esas2r_request *rq, - u32 length, - u32 cmd_rsp_len); void esas2r_build_ioctl_req(struct esas2r_adapter *a, struct esas2r_request *rq, u32 length, @@ -1416,11 +1411,11 @@ static inline void esas2r_comp_list_drain(struct esas2r_adapter *a, } /* sysfs handlers */ -extern struct bin_attribute bin_attr_fw; -extern struct bin_attribute bin_attr_fs; -extern struct bin_attribute bin_attr_vda; -extern struct bin_attribute bin_attr_hw; -extern struct bin_attribute bin_attr_live_nvram; -extern struct bin_attribute bin_attr_default_nvram; +extern const struct bin_attribute bin_attr_fw; +extern const struct bin_attribute bin_attr_fs; +extern const struct bin_attribute bin_attr_vda; +extern const struct bin_attribute bin_attr_hw; +extern const struct bin_attribute bin_attr_live_nvram; +extern const struct bin_attribute bin_attr_default_nvram; #endif /* ESAS2R_H */ diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index c1a5ab662dc8..04a07fe57be2 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -311,9 +311,8 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, sema_init(&a->nvram_semaphore, 1); esas2r_fw_event_off(a); - snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d", - a->index); - a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name); + a->fw_event_q = + alloc_ordered_workqueue("esas2r/%d", WQ_MEM_RECLAIM, a->index); init_waitqueue_head(&a->buffered_ioctl_waiter); init_waitqueue_head(&a->nvram_waiter); @@ -440,7 +439,7 @@ static void esas2r_adapter_power_down(struct esas2r_adapter *a, if ((test_bit(AF2_INIT_DONE, &a->flags2)) && (!test_bit(AF_DEGRADED_MODE, &a->flags))) { if (!power_management) { - del_timer_sync(&a->timer); + timer_delete_sync(&a->timer); tasklet_kill(&a->tasklet); } esas2r_power_down(a); diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index f700a16cd885..44871746944a 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -66,7 +66,7 @@ static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj) } static ssize_t read_fw(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); @@ -75,7 +75,7 @@ static ssize_t read_fw(struct file *file, struct kobject *kobj, } static ssize_t write_fw(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); @@ -84,7 +84,7 @@ static ssize_t write_fw(struct file *file, struct kobject *kobj, } static ssize_t read_fs(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); @@ -93,7 +93,7 @@ static ssize_t read_fs(struct file *file, struct kobject *kobj, } static ssize_t write_fs(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); @@ -109,7 +109,7 @@ static ssize_t write_fs(struct file *file, struct kobject *kobj, } static ssize_t read_vda(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); @@ -118,7 +118,7 @@ static ssize_t read_vda(struct file *file, struct kobject *kobj, } static ssize_t write_vda(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); @@ -127,7 +127,7 @@ static ssize_t write_vda(struct file *file, struct kobject *kobj, } static ssize_t read_live_nvram(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); @@ -138,7 +138,7 @@ static ssize_t read_live_nvram(struct file *file, struct kobject *kobj, } static ssize_t write_live_nvram(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); @@ -158,7 +158,7 @@ static ssize_t write_live_nvram(struct file *file, struct kobject *kobj, } static ssize_t read_default_nvram(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); @@ -169,7 +169,7 @@ static ssize_t read_default_nvram(struct file *file, struct kobject *kobj, } static ssize_t read_hw(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); @@ -187,7 +187,7 @@ static ssize_t read_hw(struct file *file, struct kobject *kobj, } static ssize_t write_hw(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); @@ -211,12 +211,12 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj, } #define ESAS2R_RW_BIN_ATTR(_name) \ - struct bin_attribute bin_attr_ ## _name = { \ + const struct bin_attribute bin_attr_ ## _name = { \ .attr = \ { .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \ .size = 0, \ - .read = read_ ## _name, \ - .write = write_ ## _name } + .read_new = read_ ## _name, \ + .write_new = write_ ## _name } ESAS2R_RW_BIN_ATTR(fw); ESAS2R_RW_BIN_ATTR(fs); @@ -224,10 +224,10 @@ ESAS2R_RW_BIN_ATTR(vda); ESAS2R_RW_BIN_ATTR(hw); ESAS2R_RW_BIN_ATTR(live_nvram); -struct bin_attribute bin_attr_default_nvram = { +const struct bin_attribute bin_attr_default_nvram = { .attr = { .name = "default_nvram", .mode = S_IRUGO }, .size = 0, - .read = read_default_nvram, + .read_new = read_default_nvram, .write = NULL }; diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c index 30028e56df63..5aa728704dfc 100644 --- a/drivers/scsi/esas2r/esas2r_vda.c +++ b/drivers/scsi/esas2r/esas2r_vda.c @@ -444,23 +444,6 @@ void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq) } } -/* Build a VDA CLI request. */ -void esas2r_build_cli_req(struct esas2r_adapter *a, - struct esas2r_request *rq, - u32 length, - u32 cmd_rsp_len) -{ - struct atto_vda_cli_req *vrq = &rq->vrq->cli; - - clear_vda_request(rq); - - rq->vrq->scsi.function = VDA_FUNC_CLI; - - vrq->length = cpu_to_le32(length); - vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len); - vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge); -} - /* Build a VDA IOCTL request. */ void esas2r_build_ioctl_req(struct esas2r_adapter *a, struct esas2r_request *rq, diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index 0175d2282b45..802718ffad84 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -2261,7 +2261,7 @@ static void esp_init_swstate(struct esp *esp) INIT_LIST_HEAD(&esp->active_cmds); INIT_LIST_HEAD(&esp->esp_cmd_pool); - /* Start with a clear state, domain validation (via ->slave_configure, + /* Start with a clear state, domain validation (via ->sdev_configure, * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged * commands. */ @@ -2441,7 +2441,7 @@ static void esp_target_destroy(struct scsi_target *starget) tp->starget = NULL; } -static int esp_slave_alloc(struct scsi_device *dev) +static int esp_sdev_init(struct scsi_device *dev) { struct esp *esp = shost_priv(dev->host); struct esp_target_data *tp = &esp->target[dev->id]; @@ -2463,7 +2463,7 @@ static int esp_slave_alloc(struct scsi_device *dev) return 0; } -static int esp_slave_configure(struct scsi_device *dev) +static int esp_sdev_configure(struct scsi_device *dev, struct queue_limits *lim) { struct esp *esp = shost_priv(dev->host); struct esp_target_data *tp = &esp->target[dev->id]; @@ -2479,7 +2479,7 @@ static int esp_slave_configure(struct scsi_device *dev) return 0; } -static void esp_slave_destroy(struct scsi_device *dev) +static void esp_sdev_destroy(struct scsi_device *dev) { struct esp_lun_data *lp = dev->hostdata; @@ -2667,9 +2667,9 @@ const struct scsi_host_template scsi_esp_template = { .queuecommand = esp_queuecommand, .target_alloc = esp_target_alloc, .target_destroy = esp_target_destroy, - .slave_alloc = esp_slave_alloc, - .slave_configure = esp_slave_configure, - .slave_destroy = esp_slave_destroy, + .sdev_init = esp_sdev_init, + .sdev_configure = esp_sdev_configure, + .sdev_destroy = esp_sdev_destroy, .eh_abort_handler = esp_eh_abort_handler, .eh_bus_reset_handler = esp_eh_bus_reset_handler, .eh_host_reset_handler = esp_eh_host_reset_handler, diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h index 00cd7c0ccc76..7bb0b69bff24 100644 --- a/drivers/scsi/esp_scsi.h +++ b/drivers/scsi/esp_scsi.h @@ -80,7 +80,7 @@ /* ESP config register 4 read-write */ #define ESP_CONFIG4_BBTE 0x01 /* Back-to-back transfers (fsc) */ -#define ESP_CONGIG4_TEST 0x02 /* Transfer counter test mode (fsc) */ +#define ESP_CONFIG4_TEST 0x02 /* Transfer counter test mode (fsc) */ #define ESP_CONFIG4_RADE 0x04 /* Active negation (am53c974/fsc) */ #define ESP_CONFIG4_RAE 0x08 /* Act. negation REQ/ACK (am53c974) */ #define ESP_CONFIG4_PWD 0x20 /* Reduced power feature (am53c974) */ diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index f1429f270170..b911fdb387f3 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -269,7 +269,7 @@ static const struct scsi_host_template fcoe_shost_template = { .eh_abort_handler = fc_eh_abort, .eh_device_reset_handler = fc_eh_device_reset, .eh_host_reset_handler = fc_eh_host_reset, - .slave_alloc = fc_slave_alloc, + .sdev_init = fc_sdev_init, .change_queue_depth = scsi_change_queue_depth, .this_id = -1, .cmd_per_lun = 3, @@ -722,7 +722,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) * will return 0, so do this first. */ mfs = netdev->mtu; - if (netdev->features & NETIF_F_FCOE_MTU) { + if (netdev->fcoe_mtu) { mfs = FCOE_MTU; FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs); } @@ -1013,7 +1013,7 @@ static void fcoe_if_destroy(struct fc_lport *lport) fc_lport_destroy(lport); /* Stop the transmit retry timer */ - del_timer_sync(&port->timer); + timer_delete_sync(&port->timer); /* Free existing transmit skbs */ fcoe_clean_pending_queue(lport); @@ -1863,7 +1863,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, case NETDEV_CHANGE: break; case NETDEV_CHANGEMTU: - if (netdev->features & NETIF_F_FCOE_MTU) + if (netdev->fcoe_mtu) break; mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof)); diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 5c8d1ba3f8f3..56d270526c9c 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -302,7 +302,7 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) fcoe_ctlr_set_state(fip, FIP_ST_DISABLED); fcoe_ctlr_reset_fcfs(fip); mutex_unlock(&fip->ctlr_mutex); - del_timer_sync(&fip->timer); + timer_delete_sync(&fip->timer); cancel_work_sync(&fip->timer_work); } EXPORT_SYMBOL(fcoe_ctlr_destroy); @@ -478,7 +478,7 @@ EXPORT_SYMBOL(fcoe_ctlr_link_up); static void fcoe_ctlr_reset(struct fcoe_ctlr *fip) { fcoe_ctlr_reset_fcfs(fip); - del_timer(&fip->timer); + timer_delete(&fip->timer); fip->ctlr_ka_time = 0; fip->port_ka_time = 0; fip->sol_time = 0; diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 453665ac6020..0609ca6b9353 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -45,12 +45,8 @@ MODULE_PARM_DESC(fcf_dev_loss_tmo, */ #define fcoe_ctlr_id(x) \ ((x)->id) -#define fcoe_ctlr_work_q_name(x) \ - ((x)->work_q_name) #define fcoe_ctlr_work_q(x) \ ((x)->work_q) -#define fcoe_ctlr_devloss_work_q_name(x) \ - ((x)->devloss_work_q_name) #define fcoe_ctlr_devloss_work_q(x) \ ((x)->devloss_work_q) #define fcoe_ctlr_mode(x) \ @@ -600,7 +596,7 @@ static const struct attribute_group *fcoe_fcf_attr_groups[] = { static const struct bus_type fcoe_bus_type; static int fcoe_bus_match(struct device *dev, - struct device_driver *drv) + const struct device_driver *drv) { if (dev->bus == &fcoe_bus_type) return 1; @@ -797,18 +793,14 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo; - snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name), - "ctlr_wq_%d", ctlr->id); - ctlr->work_q = create_singlethread_workqueue( - ctlr->work_q_name); + ctlr->work_q = alloc_ordered_workqueue("ctlr_wq_%d", WQ_MEM_RECLAIM, + ctlr->id); if (!ctlr->work_q) goto out_del; - snprintf(ctlr->devloss_work_q_name, - sizeof(ctlr->devloss_work_q_name), - "ctlr_dl_wq_%d", ctlr->id); - ctlr->devloss_work_q = create_singlethread_workqueue( - ctlr->devloss_work_q_name); + ctlr->devloss_work_q = alloc_ordered_workqueue("ctlr_dl_wq_%d", + WQ_MEM_RECLAIM, + ctlr->id); if (!ctlr->devloss_work_q) goto out_del_q; diff --git a/drivers/scsi/fdomain_pci.c b/drivers/scsi/fdomain_pci.c index 3e05ce7b89e5..c15b2ce76e9f 100644 --- a/drivers/scsi/fdomain_pci.c +++ b/drivers/scsi/fdomain_pci.c @@ -47,7 +47,7 @@ static void fdomain_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -static struct pci_device_id fdomain_pci_table[] = { +static const struct pci_device_id fdomain_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70) }, {} }; diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile index 6214a6b2e96d..c025e875009e 100644 --- a/drivers/scsi/fnic/Makefile +++ b/drivers/scsi/fnic/Makefile @@ -2,11 +2,13 @@ obj-$(CONFIG_FCOE_FNIC) += fnic.o fnic-y := \ + fip.o\ fnic_attrs.o \ fnic_isr.o \ fnic_main.o \ fnic_res.o \ fnic_fcs.o \ + fdls_disc.o \ fnic_scsi.o \ fnic_trace.o \ fnic_debugfs.o \ @@ -15,4 +17,5 @@ fnic-y := \ vnic_intr.o \ vnic_rq.o \ vnic_wq_copy.o \ - vnic_wq.o + vnic_wq.o \ + fnic_pci_subsys_devid.o diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c new file mode 100644 index 000000000000..c2b6f4eb338e --- /dev/null +++ b/drivers/scsi/fnic/fdls_disc.c @@ -0,0 +1,4980 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include <linux/workqueue.h> +#include "fnic.h" +#include "fdls_fc.h" +#include "fnic_fdls.h" +#include <scsi/fc/fc_fcp.h> +#include <scsi/scsi_transport_fc.h> +#include <linux/utsname.h> + +#define FC_FC4_TYPE_SCSI 0x08 +#define PORT_SPEED_BIT_8 8 +#define PORT_SPEED_BIT_9 9 +#define PORT_SPEED_BIT_14 14 +#define PORT_SPEED_BIT_15 15 + +/* FNIC FDMI Register HBA Macros */ +#define FNIC_FDMI_NUM_PORTS 1 +#define FNIC_FDMI_NUM_HBA_ATTRS 9 +#define FNIC_FDMI_TYPE_NODE_NAME 0X1 +#define FNIC_FDMI_TYPE_MANUFACTURER 0X2 +#define FNIC_FDMI_MANUFACTURER "Cisco Systems" +#define FNIC_FDMI_TYPE_SERIAL_NUMBER 0X3 +#define FNIC_FDMI_TYPE_MODEL 0X4 +#define FNIC_FDMI_TYPE_MODEL_DES 0X5 +#define FNIC_FDMI_MODEL_DESCRIPTION "Cisco Virtual Interface Card" +#define FNIC_FDMI_TYPE_HARDWARE_VERSION 0X6 +#define FNIC_FDMI_TYPE_DRIVER_VERSION 0X7 +#define FNIC_FDMI_TYPE_ROM_VERSION 0X8 +#define FNIC_FDMI_TYPE_FIRMWARE_VERSION 0X9 +#define FNIC_FDMI_NN_LEN 8 +#define FNIC_FDMI_MANU_LEN 20 +#define FNIC_FDMI_SERIAL_LEN 16 +#define FNIC_FDMI_MODEL_LEN 12 +#define FNIC_FDMI_MODEL_DES_LEN 56 +#define FNIC_FDMI_HW_VER_LEN 16 +#define FNIC_FDMI_DR_VER_LEN 28 +#define FNIC_FDMI_ROM_VER_LEN 8 +#define FNIC_FDMI_FW_VER_LEN 16 + +/* FNIC FDMI Register PA Macros */ +#define FNIC_FDMI_TYPE_FC4_TYPES 0X1 +#define FNIC_FDMI_TYPE_SUPPORTED_SPEEDS 0X2 +#define FNIC_FDMI_TYPE_CURRENT_SPEED 0X3 +#define FNIC_FDMI_TYPE_MAX_FRAME_SIZE 0X4 +#define FNIC_FDMI_TYPE_OS_NAME 0X5 +#define FNIC_FDMI_TYPE_HOST_NAME 0X6 +#define FNIC_FDMI_NUM_PORT_ATTRS 6 +#define FNIC_FDMI_FC4_LEN 32 +#define FNIC_FDMI_SUPP_SPEED_LEN 4 +#define FNIC_FDMI_CUR_SPEED_LEN 4 +#define FNIC_FDMI_MFS_LEN 4 +#define FNIC_FDMI_MFS 0x800 +#define FNIC_FDMI_OS_NAME_LEN 16 +#define FNIC_FDMI_HN_LEN 24 + +#define FDLS_FDMI_PLOGI_PENDING 0x1 +#define FDLS_FDMI_REG_HBA_PENDING 0x2 +#define FDLS_FDMI_RPA_PENDING 0x4 +#define FDLS_FDMI_ABORT_PENDING 0x8 +#define FDLS_FDMI_MAX_RETRY 3 + +#define RETRIES_EXHAUSTED(iport) \ + (iport->fabric.retry_counter == FABRIC_LOGO_MAX_RETRY) + +#define FNIC_TPORT_MAX_NEXUS_RESTART (8) + +#define SCHEDULE_OXID_FREE_RETRY_TIME (300) + +/* Private Functions */ +static void fdls_fdmi_register_hba(struct fnic_iport_s *iport); +static void fdls_fdmi_register_pa(struct fnic_iport_s *iport); +static void fdls_send_rpn_id(struct fnic_iport_s *iport); +static void fdls_process_flogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, + void *rx_frame); +static void fnic_fdls_start_plogi(struct fnic_iport_s *iport); +static void fnic_fdls_start_flogi(struct fnic_iport_s *iport); +static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport, + uint32_t fcid, + uint64_t wwpn); +static void fdls_target_restart_nexus(struct fnic_tport_s *tport); +static void fdls_start_tport_timer(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, int timeout); +static void fdls_tport_timer_callback(struct timer_list *t); +static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport); +static void fdls_start_fabric_timer(struct fnic_iport_s *iport, + int timeout); +static void fdls_init_plogi_frame(uint8_t *frame, struct fnic_iport_s *iport); +static void fdls_init_els_acc_frame(uint8_t *frame, struct fnic_iport_s *iport); +static void fdls_init_els_rjt_frame(uint8_t *frame, struct fnic_iport_s *iport); +static void fdls_init_logo_frame(uint8_t *frame, struct fnic_iport_s *iport); +static void fdls_init_fabric_abts_frame(uint8_t *frame, + struct fnic_iport_s *iport); + +uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport) +{ + struct fnic *fnic = iport->fnic; + uint8_t *frame = NULL; + + frame = mempool_alloc(fnic->frame_pool, GFP_ATOMIC); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame"); + return NULL; + } + + memset(frame, 0, FNIC_FCOE_FRAME_MAXSZ); + return frame; +} + +/** + * fdls_alloc_oxid - Allocate an oxid from the bitmap based oxid pool + * @iport: Handle to iport instance + * @oxid_frame_type: Type of frame to allocate + * @active_oxid: the oxid which is in use + * + * Called with fnic lock held + */ +uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type, + uint16_t *active_oxid) +{ + struct fnic *fnic = iport->fnic; + struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; + int idx; + uint16_t oxid; + + lockdep_assert_held(&fnic->fnic_lock); + + /* + * Allocate next available oxid from bitmap + */ + idx = find_next_zero_bit(oxid_pool->bitmap, FNIC_OXID_POOL_SZ, oxid_pool->next_idx); + if (idx == FNIC_OXID_POOL_SZ) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Alloc oxid: all oxid slots are busy iport state:%d\n", + iport->state); + return FNIC_UNASSIGNED_OXID; + } + + WARN_ON(test_and_set_bit(idx, oxid_pool->bitmap)); + oxid_pool->next_idx = (idx + 1) % FNIC_OXID_POOL_SZ; /* cycle through the bitmap */ + + oxid = FNIC_OXID_ENCODE(idx, oxid_frame_type); + *active_oxid = oxid; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "alloc oxid: 0x%x, iport state: %d\n", + oxid, iport->state); + return oxid; +} + +/** + * fdls_free_oxid_idx - Free the oxid using the idx + * @iport: Handle to iport instance + * @oxid_idx: The index to free + * + * Free the oxid immediately and make it available for new requests + * Called with fnic lock held + */ +static void fdls_free_oxid_idx(struct fnic_iport_s *iport, uint16_t oxid_idx) +{ + struct fnic *fnic = iport->fnic; + struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; + + lockdep_assert_held(&fnic->fnic_lock); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "free oxid idx: 0x%x\n", oxid_idx); + + WARN_ON(!test_and_clear_bit(oxid_idx, oxid_pool->bitmap)); +} + +/** + * fdls_reclaim_oxid_handler - Callback handler for delayed_oxid_work + * @work: Handle to work_struct + * + * Scheduled when an oxid is to be freed later + * After freeing expired oxid(s), the handler schedules + * another callback with the remaining time + * of next unexpired entry in the reclaim list. + */ +void fdls_reclaim_oxid_handler(struct work_struct *work) +{ + struct fnic_oxid_pool_s *oxid_pool = container_of(work, + struct fnic_oxid_pool_s, oxid_reclaim_work.work); + struct fnic_iport_s *iport = container_of(oxid_pool, + struct fnic_iport_s, oxid_pool); + struct fnic *fnic = iport->fnic; + struct reclaim_entry_s *reclaim_entry, *next; + unsigned long delay_j, cur_jiffies; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Reclaim oxid callback\n"); + + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + /* Though the work was scheduled for one entry, + * walk through and free the expired entries which might have been scheduled + * at around the same time as the first entry + */ + list_for_each_entry_safe(reclaim_entry, next, + &(oxid_pool->oxid_reclaim_list), links) { + + /* The list is always maintained in the order of expiry time */ + cur_jiffies = jiffies; + if (time_before(cur_jiffies, reclaim_entry->expires)) + break; + + list_del(&reclaim_entry->links); + fdls_free_oxid_idx(iport, reclaim_entry->oxid_idx); + kfree(reclaim_entry); + } + + /* schedule to free up the next entry */ + if (!list_empty(&oxid_pool->oxid_reclaim_list)) { + reclaim_entry = list_first_entry(&oxid_pool->oxid_reclaim_list, + struct reclaim_entry_s, links); + + delay_j = reclaim_entry->expires - cur_jiffies; + schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Scheduling next callback at:%ld jiffies\n", delay_j); + } + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); +} + +/** + * fdls_free_oxid - Helper function to free the oxid + * @iport: Handle to iport instance + * @oxid: oxid to free + * @active_oxid: the oxid which is in use + * + * Called with fnic lock held + */ +void fdls_free_oxid(struct fnic_iport_s *iport, + uint16_t oxid, uint16_t *active_oxid) +{ + fdls_free_oxid_idx(iport, FNIC_OXID_IDX(oxid)); + *active_oxid = FNIC_UNASSIGNED_OXID; +} + +/** + * fdls_schedule_oxid_free - Schedule oxid to be freed later + * @iport: Handle to iport instance + * @active_oxid: the oxid which is in use + * + * Gets called in a rare case scenario when both a command + * (fdls or target discovery) timed out and the following ABTS + * timed out as well, without a link change. + * + * Called with fnic lock held + */ +void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid) +{ + struct fnic *fnic = iport->fnic; + struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; + struct reclaim_entry_s *reclaim_entry; + unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport)); + int oxid_idx = FNIC_OXID_IDX(*active_oxid); + + lockdep_assert_held(&fnic->fnic_lock); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Schedule oxid free. oxid: 0x%x\n", *active_oxid); + + *active_oxid = FNIC_UNASSIGNED_OXID; + + reclaim_entry = (struct reclaim_entry_s *) + kzalloc(sizeof(struct reclaim_entry_s), GFP_ATOMIC); + + if (!reclaim_entry) { + FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num, + "Failed to allocate memory for reclaim struct for oxid idx: %d\n", + oxid_idx); + + /* Retry the scheduling */ + WARN_ON(test_and_set_bit(oxid_idx, oxid_pool->pending_schedule_free)); + schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry, 0); + return; + } + + reclaim_entry->oxid_idx = oxid_idx; + reclaim_entry->expires = round_jiffies(jiffies + delay_j); + + list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list); + + schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j); +} + +/** + * fdls_schedule_oxid_free_retry_work - Thread to schedule the + * oxid to be freed later + * + * @work: Handle to the work struct + */ +void fdls_schedule_oxid_free_retry_work(struct work_struct *work) +{ + struct fnic_oxid_pool_s *oxid_pool = container_of(work, + struct fnic_oxid_pool_s, schedule_oxid_free_retry.work); + struct fnic_iport_s *iport = container_of(oxid_pool, + struct fnic_iport_s, oxid_pool); + struct fnic *fnic = iport->fnic; + struct reclaim_entry_s *reclaim_entry; + unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport)); + unsigned long flags; + int idx; + + for_each_set_bit(idx, oxid_pool->pending_schedule_free, FNIC_OXID_POOL_SZ) { + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Schedule oxid free. oxid idx: %d\n", idx); + + reclaim_entry = kzalloc(sizeof(*reclaim_entry), GFP_KERNEL); + if (!reclaim_entry) { + schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry, + msecs_to_jiffies(SCHEDULE_OXID_FREE_RETRY_TIME)); + return; + } + + clear_bit(idx, oxid_pool->pending_schedule_free); + reclaim_entry->oxid_idx = idx; + reclaim_entry->expires = round_jiffies(jiffies + delay_j); + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j); + } +} + +static bool fdls_is_oxid_fabric_req(uint16_t oxid) +{ + int oxid_frame_type = FNIC_FRAME_TYPE(oxid); + + switch (oxid_frame_type) { + case FNIC_FRAME_TYPE_FABRIC_FLOGI: + case FNIC_FRAME_TYPE_FABRIC_PLOGI: + case FNIC_FRAME_TYPE_FABRIC_RPN: + case FNIC_FRAME_TYPE_FABRIC_RFT: + case FNIC_FRAME_TYPE_FABRIC_RFF: + case FNIC_FRAME_TYPE_FABRIC_GPN_FT: + case FNIC_FRAME_TYPE_FABRIC_LOGO: + break; + default: + return false; + } + return true; +} + +static bool fdls_is_oxid_fdmi_req(uint16_t oxid) +{ + int oxid_frame_type = FNIC_FRAME_TYPE(oxid); + + switch (oxid_frame_type) { + case FNIC_FRAME_TYPE_FDMI_PLOGI: + case FNIC_FRAME_TYPE_FDMI_RHBA: + case FNIC_FRAME_TYPE_FDMI_RPA: + break; + default: + return false; + } + return true; +} + +static bool fdls_is_oxid_tgt_req(uint16_t oxid) +{ + int oxid_frame_type = FNIC_FRAME_TYPE(oxid); + + switch (oxid_frame_type) { + case FNIC_FRAME_TYPE_TGT_PLOGI: + case FNIC_FRAME_TYPE_TGT_PRLI: + case FNIC_FRAME_TYPE_TGT_ADISC: + case FNIC_FRAME_TYPE_TGT_LOGO: + break; + default: + return false; + } + return true; +} + +static void fdls_reset_oxid_pool(struct fnic_iport_s *iport) +{ + struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; + + oxid_pool->next_idx = 0; +} + +void fnic_del_fabric_timer_sync(struct fnic *fnic) +{ + fnic->iport.fabric.del_timer_inprogress = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + timer_delete_sync(&fnic->iport.fabric.retry_timer); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + fnic->iport.fabric.del_timer_inprogress = 0; +} + +void fnic_del_tport_timer_sync(struct fnic *fnic, + struct fnic_tport_s *tport) +{ + tport->del_timer_inprogress = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + timer_delete_sync(&tport->retry_timer); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + tport->del_timer_inprogress = 0; +} + +static void +fdls_start_fabric_timer(struct fnic_iport_s *iport, int timeout) +{ + u64 fabric_tov; + struct fnic *fnic = iport->fnic; + + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x: Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + iport->fabric.timer_pending = 0; + } + + if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) + iport->fabric.retry_counter++; + + fabric_tov = jiffies + msecs_to_jiffies(timeout); + mod_timer(&iport->fabric.retry_timer, round_jiffies(fabric_tov)); + iport->fabric.timer_pending = 1; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fabric timer is %d ", timeout); +} + +static void +fdls_start_tport_timer(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, int timeout) +{ + u64 fabric_tov; + struct fnic *fnic = iport->fnic; + + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + tport->timer_pending = 0; + } + + if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) + tport->retry_counter++; + + fabric_tov = jiffies + msecs_to_jiffies(timeout); + mod_timer(&tport->retry_timer, round_jiffies(fabric_tov)); + tport->timer_pending = 1; +} + +void fdls_init_plogi_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_std_flogi *pplogi; + uint8_t s_id[3]; + + pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pplogi = (struct fc_std_flogi) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFC}, + .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .els = { + .fl_cmd = ELS_PLOGI, + .fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI, + .sp_lo_ver = FNIC_FC_PH_VER_LO, + .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT), + .sp_features = cpu_to_be16(FC_SP_FT_CIRO), + .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ), + .sp_tot_seq = cpu_to_be16(FNIC_FC_CONCUR_SEQS), + .sp_rel_off = cpu_to_be16(FNIC_FC_RO_INFO), + .sp_e_d_tov = cpu_to_be32(FC_DEF_E_D_TOV)}, + .fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ), + .fl_cssp[2].cp_rdfs = cpu_to_be16(0x800), + .fl_cssp[2].cp_con_seq = cpu_to_be16(0xFF), + .fl_cssp[2].cp_open_seq = 1} + }; + + FNIC_STD_SET_NPORT_NAME(&pplogi->els.fl_wwpn, iport->wwpn); + FNIC_STD_SET_NODE_NAME(&pplogi->els.fl_wwnn, iport->wwnn); + FNIC_LOGI_SET_RDF_SIZE(pplogi->els, iport->max_payload_size); + + hton24(s_id, iport->fcid); + FNIC_STD_SET_S_ID(pplogi->fchdr, s_id); +} + +static void fdls_init_els_acc_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_std_els_acc_rsp *pels_acc; + uint8_t s_id[3]; + + pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pels_acc = (struct fc_std_els_acc_rsp) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP, + .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}}, + .acc.la_cmd = ELS_LS_ACC, + }; + + hton24(s_id, iport->fcid); + FNIC_STD_SET_S_ID(pels_acc->fchdr, s_id); + FNIC_STD_SET_RX_ID(pels_acc->fchdr, FNIC_UNASSIGNED_RXID); +} + +static void fdls_init_els_rjt_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_std_els_rjt_rsp *pels_rjt; + + pels_rjt = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pels_rjt = (struct fc_std_els_rjt_rsp) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP, .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}}, + .rej.er_cmd = ELS_LS_RJT, + }; + + FNIC_STD_SET_RX_ID(pels_rjt->fchdr, FNIC_UNASSIGNED_RXID); +} + +static void fdls_init_logo_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_std_logo *plogo; + uint8_t s_id[3]; + + plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *plogo = (struct fc_std_logo) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}}, + .els.fl_cmd = ELS_LOGO, + }; + + hton24(s_id, iport->fcid); + FNIC_STD_SET_S_ID(plogo->fchdr, s_id); + memcpy(plogo->els.fl_n_port_id, s_id, 3); + + FNIC_STD_SET_NPORT_NAME(&plogo->els.fl_n_port_wwn, + iport->wwpn); +} + +static void fdls_init_fabric_abts_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_frame_header *pfabric_abts; + + pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pfabric_abts = (struct fc_frame_header) { + .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */ + .fh_s_id = {0x00, 0x00, 0x00}, + .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS, + .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00, + .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID), + .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */ + }; +} + +static void +fdls_send_rscn_resp(struct fnic_iport_s *iport, + struct fc_frame_header *rscn_fchdr) +{ + uint8_t *frame; + struct fc_std_els_acc_rsp *pels_acc; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_acc_rsp); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RSCN response"); + return; + } + + pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_acc_frame(frame, iport); + + FNIC_STD_SET_D_ID(pels_acc->fchdr, rscn_fchdr->fh_s_id); + + oxid = FNIC_STD_GET_OX_ID(rscn_fchdr); + FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send RSCN response with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_send_logo_resp(struct fnic_iport_s *iport, + struct fc_frame_header *req_fchdr) +{ + uint8_t *frame; + struct fc_std_els_acc_rsp *plogo_resp; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_acc_rsp); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send LOGO response"); + return; + } + + plogo_resp = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_acc_frame(frame, iport); + + FNIC_STD_SET_D_ID(plogo_resp->fchdr, req_fchdr->fh_s_id); + + oxid = FNIC_STD_GET_OX_ID(req_fchdr); + FNIC_STD_SET_OX_ID(plogo_resp->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send LOGO response with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +void +fdls_send_tport_abts(struct fnic_iport_s *iport, + struct fnic_tport_s *tport) +{ + uint8_t *frame; + uint8_t s_id[3]; + uint8_t d_id[3]; + struct fnic *fnic = iport->fnic; + struct fc_frame_header *ptport_abts; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_frame_header); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send tport ABTS"); + return; + } + + ptport_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *ptport_abts = (struct fc_frame_header) { + .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */ + .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS, + .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00, + .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID), + .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */ + }; + + hton24(s_id, iport->fcid); + hton24(d_id, tport->fcid); + FNIC_STD_SET_S_ID(*ptport_abts, s_id); + FNIC_STD_SET_D_ID(*ptport_abts, d_id); + tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED; + + FNIC_STD_SET_OX_ID(*ptport_abts, tport->active_oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send tport abts: tport->state: %d ", + iport->fcid, tport->state); + + fnic_send_fcoe_frame(iport, frame, frame_size); + + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov); +} +static void fdls_send_fabric_abts(struct fnic_iport_s *iport) +{ + uint8_t *frame; + uint8_t s_id[3]; + uint8_t d_id[3]; + struct fnic *fnic = iport->fnic; + struct fc_frame_header *pfabric_abts; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_frame_header); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send fabric ABTS"); + return; + } + + pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_fabric_abts_frame(frame, iport); + + hton24(s_id, iport->fcid); + + switch (iport->fabric.state) { + case FDLS_STATE_FABRIC_LOGO: + hton24(d_id, FC_FID_FLOGI); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_FABRIC_FLOGI: + hton24(d_id, FC_FID_FLOGI); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_FABRIC_PLOGI: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_RPN_ID: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_SCR: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_FCTRL); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_REGISTER_FC4_TYPES: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_REGISTER_FC4_FEATURES: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_GPN_FT: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + default: + return; + } + + oxid = iport->active_oxid_fabric_req; + FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send fabric abts. iport->fabric.state: %d oxid: 0x%x", + iport->fcid, iport->fabric.state, oxid); + + iport->fabric.flags |= FNIC_FDLS_FABRIC_ABORT_ISSUED; + + fnic_send_fcoe_frame(iport, frame, frame_size); + + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); + iport->fabric.timer_pending = 1; +} + +static void fdls_send_fdmi_abts(struct fnic_iport_s *iport) +{ + uint8_t *frame; + uint8_t d_id[3]; + struct fnic *fnic = iport->fnic; + struct fc_frame_header *pfabric_abts; + unsigned long fdmi_tov; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_frame_header); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FDMI ABTS"); + return; + } + + pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_fabric_abts_frame(frame, iport); + + hton24(d_id, FC_FID_MGMT_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + + if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) { + oxid = iport->active_oxid_fdmi_plogi; + FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); + fnic_send_fcoe_frame(iport, frame, frame_size); + } else { + if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) { + oxid = iport->active_oxid_fdmi_rhba; + FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); + fnic_send_fcoe_frame(iport, frame, frame_size); + } + if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) { + oxid = iport->active_oxid_fdmi_rpa; + FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); + fnic_send_fcoe_frame(iport, frame, frame_size); + } + } + + fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov); + mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov)); + iport->fabric.fdmi_pending |= FDLS_FDMI_ABORT_PENDING; +} + +static void fdls_send_fabric_flogi(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_flogi *pflogi; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_flogi); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FLOGI"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pflogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pflogi = (struct fc_std_flogi) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFE}, + .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .els.fl_cmd = ELS_FLOGI, + .els.fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI, + .sp_lo_ver = FNIC_FC_PH_VER_LO, + .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT), + .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ)}, + .els.fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ) + }; + + FNIC_STD_SET_NPORT_NAME(&pflogi->els.fl_wwpn, iport->wwpn); + FNIC_STD_SET_NODE_NAME(&pflogi->els.fl_wwnn, iport->wwnn); + FNIC_LOGI_SET_RDF_SIZE(pflogi->els, iport->max_payload_size); + FNIC_LOGI_SET_R_A_TOV(pflogi->els, iport->r_a_tov); + FNIC_LOGI_SET_E_D_TOV(pflogi->els, iport->e_d_tov); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send FLOGI", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pflogi->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send fabric FLOGI with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.fabric_flogi_sent); +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_fabric_plogi(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_flogi *pplogi; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_flogi); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send PLOGI"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_plogi_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_PLOGI, + &iport->active_oxid_fabric_req); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send fabric PLOGI", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send fabric PLOGI with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.fabric_plogi_sent); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_flogi *pplogi; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_flogi); + uint8_t d_id[3]; + u64 fdmi_tov; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FDMI PLOGI"); + goto err_out; + } + + pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_plogi_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_PLOGI, + &iport->active_oxid_fdmi_plogi); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send FDMI PLOGI", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + goto err_out; + } + FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid); + + hton24(d_id, FC_FID_MGMT_SERV); + FNIC_STD_SET_D_ID(pplogi->fchdr, d_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI PLOGI with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + +err_out: + fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov); + mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov)); + iport->fabric.fdmi_pending = FDLS_FDMI_PLOGI_PENDING; +} + +static void fdls_send_rpn_id(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_rpn_id *prpn_id; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_rpn_id); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RPN_ID"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + prpn_id = (struct fc_std_rpn_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prpn_id = (struct fc_std_rpn_id) { + .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, + .ct_fs_subtype = FC_NS_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_NS_RPN_ID)} + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prpn_id->fchdr, fcid); + + FNIC_STD_SET_PORT_ID(prpn_id->rpn_id, fcid); + FNIC_STD_SET_PORT_NAME(prpn_id->rpn_id, iport->wwpn); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RPN, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send RPN_ID", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(prpn_id->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send RPN ID with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_scr(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_scr *pscr; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_scr); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send SCR"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pscr = (struct fc_std_scr *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pscr = (struct fc_std_scr) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, + .fh_d_id = {0xFF, 0xFF, 0xFD}, .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .scr = {.scr_cmd = ELS_SCR, + .scr_reg_func = ELS_SCRF_FULL} + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(pscr->fchdr, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_SCR, + &iport->active_oxid_fabric_req); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send SCR", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pscr->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send SCR with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.fabric_scr_sent); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_gpn_ft(struct fnic_iport_s *iport, int fdls_state) +{ + uint8_t *frame; + struct fc_std_gpn_ft *pgpn_ft; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_gpn_ft); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send GPN FT"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pgpn_ft = (struct fc_std_gpn_ft *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pgpn_ft = (struct fc_std_gpn_ft) { + .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, + .ct_fs_subtype = FC_NS_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_NS_GPN_FT)}, + .gpn_ft.fn_fc4_type = 0x08 + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(pgpn_ft->fchdr, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_GPN_FT, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send GPN FT", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pgpn_ft->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send GPN FT with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); + fdls_set_state((&iport->fabric), fdls_state); +} + +static void +fdls_send_tgt_adisc(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + uint8_t *frame; + struct fc_std_els_adisc *padisc; + uint8_t s_id[3]; + uint8_t d_id[3]; + uint16_t oxid; + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_adisc); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send TGT ADISC"); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + padisc = (struct fc_std_els_adisc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + + hton24(s_id, iport->fcid); + hton24(d_id, tport->fcid); + memcpy(padisc->els.adisc_port_id, s_id, 3); + FNIC_STD_SET_S_ID(padisc->fchdr, s_id); + FNIC_STD_SET_D_ID(padisc->fchdr, d_id); + + FNIC_STD_SET_F_CTL(padisc->fchdr, FNIC_ELS_REQ_FCTL << 16); + FNIC_STD_SET_R_CTL(padisc->fchdr, FC_RCTL_ELS_REQ); + FNIC_STD_SET_TYPE(padisc->fchdr, FC_TYPE_ELS); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_ADISC, &tport->active_oxid); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send TGT ADISC", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(padisc->fchdr, oxid); + FNIC_STD_SET_RX_ID(padisc->fchdr, FNIC_UNASSIGNED_RXID); + + tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED; + + FNIC_STD_SET_NPORT_NAME(&padisc->els.adisc_wwpn, + iport->wwpn); + FNIC_STD_SET_NODE_NAME(&padisc->els.adisc_wwnn, + iport->wwnn); + + padisc->els.adisc_cmd = ELS_ADISC; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send ADISC to tgt fcid: 0x%x", + iport->fcid, tport->fcid); + + atomic64_inc(&iport->iport_stats.tport_adisc_sent); + + fnic_send_fcoe_frame(iport, frame, frame_size); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov); +} + +bool fdls_delete_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + struct fnic_tport_event_s *tport_del_evt; + struct fnic *fnic = iport->fnic; + + if ((tport->state == FDLS_TGT_STATE_OFFLINING) + || (tport->state == FDLS_TGT_STATE_OFFLINE)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: tport state is offlining/offline\n", + tport->fcid); + return false; + } + + fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING); + /* + * By setting this flag, the tport will not be seen in a look-up + * in an RSCN. Even if we move to multithreaded model, this tport + * will be destroyed and a new RSCN will have to create a new one + */ + tport->flags |= FNIC_FDLS_TPORT_TERMINATING; + + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + tport->timer_pending = 0; + } + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + fnic_rport_exch_reset(iport->fnic, tport->fcid); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + if (tport->flags & FNIC_FDLS_SCSI_REGISTERED) { + tport_del_evt = + kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC); + if (!tport_del_evt) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate memory for tport fcid: 0x%0x\n", + tport->fcid); + return false; + } + tport_del_evt->event = TGT_EV_RPORT_DEL; + tport_del_evt->arg1 = (void *) tport; + list_add_tail(&tport_del_evt->links, &fnic->tport_event_list); + queue_work(fnic_event_queue, &fnic->tport_work); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport 0x%x not reg with scsi_transport. Freeing locally", + tport->fcid); + list_del(&tport->links); + kfree(tport); + } + return true; +} + +static void +fdls_send_tgt_plogi(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + uint8_t *frame; + struct fc_std_flogi *pplogi; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_flogi); + uint8_t d_id[3]; + uint32_t timeout; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send TGT PLOGI"); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_plogi_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PLOGI, &tport->active_oxid); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate oxid to send PLOGI to fcid: 0x%x", + iport->fcid, tport->fcid); + mempool_free(frame, fnic->frame_pool); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid); + + tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED; + + hton24(d_id, tport->fcid); + FNIC_STD_SET_D_ID(pplogi->fchdr, d_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send tgt PLOGI to tgt: 0x%x with oxid: 0x%x", + iport->fcid, tport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.tport_plogi_sent); + +err_out: + timeout = max(2 * iport->e_d_tov, iport->plogi_timeout); + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_tport_timer(iport, tport, timeout); +} + +static uint16_t +fnic_fc_plogi_rsp_rdf(struct fnic_iport_s *iport, + struct fc_std_flogi *plogi_rsp) +{ + uint16_t b2b_rdf_size = + be16_to_cpu(FNIC_LOGI_RDF_SIZE(plogi_rsp->els)); + uint16_t spc3_rdf_size = + be16_to_cpu(plogi_rsp->els.fl_cssp[2].cp_rdfs) & FNIC_FC_C3_RDF; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MFS: b2b_rdf_size: 0x%x spc3_rdf_size: 0x%x", + b2b_rdf_size, spc3_rdf_size); + + return min(b2b_rdf_size, spc3_rdf_size); +} + +static void fdls_send_register_fc4_types(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_rft_id *prft_id; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_rft_id); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RFT"); + return; + } + + prft_id = (struct fc_std_rft_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prft_id = (struct fc_std_rft_id) { + .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, + .ct_fs_subtype = FC_NS_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_NS_RFT_ID)} + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prft_id->fchdr, fcid); + FNIC_STD_SET_PORT_ID(prft_id->rft_id, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFT, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send RFT", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(prft_id->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send RFT with oxid: 0x%x", iport->fcid, + oxid); + + prft_id->rft_id.fr_fts.ff_type_map[0] = + cpu_to_be32(1 << FC_TYPE_FCP); + + prft_id->rft_id.fr_fts.ff_type_map[1] = + cpu_to_be32(1 << (FC_TYPE_CT % FC_NS_BPW)); + + fnic_send_fcoe_frame(iport, frame, frame_size); + + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_register_fc4_features(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_rff_id *prff_id; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_rff_id); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RFF"); + return; + } + + prff_id = (struct fc_std_rff_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prff_id = (struct fc_std_rff_id) { + .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, + .ct_fs_subtype = FC_NS_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_NS_RFF_ID)}, + .rff_id.fr_feat = 0x2, + .rff_id.fr_type = FC_TYPE_FCP + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prff_id->fchdr, fcid); + FNIC_STD_SET_PORT_ID(prff_id->rff_id, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFF, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send RFF", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(prff_id->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send RFF with oxid: 0x%x", iport->fcid, + oxid); + + prff_id->rff_id.fr_type = FC_TYPE_FCP; + + fnic_send_fcoe_frame(iport, frame, frame_size); + + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void +fdls_send_tgt_prli(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + uint8_t *frame; + struct fc_std_els_prli *pprli; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_prli); + uint8_t s_id[3]; + uint8_t d_id[3]; + uint32_t timeout; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send TGT PRLI"); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pprli = (struct fc_std_els_prli *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pprli = (struct fc_std_els_prli) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .els_prli = {.prli_cmd = ELS_PRLI, + .prli_spp_len = 16, + .prli_len = cpu_to_be16(0x14)}, + .sp = {.spp_type = 0x08, .spp_flags = 0x0020, + .spp_params = cpu_to_be32(0xA2)} + }; + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PRLI, &tport->active_oxid); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send TGT PRLI to 0x%x", + iport->fcid, tport->fcid); + mempool_free(frame, fnic->frame_pool); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED; + + hton24(s_id, iport->fcid); + hton24(d_id, tport->fcid); + + FNIC_STD_SET_OX_ID(pprli->fchdr, oxid); + FNIC_STD_SET_S_ID(pprli->fchdr, s_id); + FNIC_STD_SET_D_ID(pprli->fchdr, d_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send PRLI to tgt: 0x%x with oxid: 0x%x", + iport->fcid, tport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.tport_prli_sent); + +err_out: + timeout = max(2 * iport->e_d_tov, iport->plogi_timeout); + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_tport_timer(iport, tport, timeout); +} + +/** + * fdls_send_fabric_logo - Send flogo to the fcf + * @iport: Handle to fnic iport + * + * This function does not change or check the fabric state. + * It the caller's responsibility to set the appropriate iport fabric + * state when this is called. Normally it is FDLS_STATE_FABRIC_LOGO. + * Currently this assumes to be called with fnic lock held. + */ +void fdls_send_fabric_logo(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_logo *plogo; + struct fnic *fnic = iport->fnic; + uint8_t d_id[3]; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_logo); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send fabric LOGO"); + return; + } + + plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_logo_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_LOGO, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send fabric LOGO", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(plogo->fchdr, oxid); + + hton24(d_id, FC_FID_FLOGI); + FNIC_STD_SET_D_ID(plogo->fchdr, d_id); + + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send fabric LOGO with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +/** + * fdls_tgt_logout - Send plogo to the remote port + * @iport: Handle to fnic iport + * @tport: Handle to remote port + * + * This function does not change or check the fabric/tport state. + * It the caller's responsibility to set the appropriate tport/fabric + * state when this is called. Normally that is fdls_tgt_state_plogo. + * This could be used to send plogo to nameserver process + * also not just target processes + */ +void fdls_tgt_logout(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + uint8_t *frame; + struct fc_std_logo *plogo; + struct fnic *fnic = iport->fnic; + uint8_t d_id[3]; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_logo); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send fabric LOGO"); + return; + } + + plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_logo_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_LOGO, &tport->active_oxid); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send tgt LOGO", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(plogo->fchdr, oxid); + + hton24(d_id, tport->fcid); + FNIC_STD_SET_D_ID(plogo->fchdr, d_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send tgt LOGO with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + + atomic64_inc(&iport->iport_stats.tport_logo_sent); +} + +static void fdls_tgt_discovery_start(struct fnic_iport_s *iport) +{ + struct fnic_tport_s *tport, *next; + u32 old_link_down_cnt = iport->fnic->link_down_cnt; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Starting FDLS target discovery", iport->fcid); + + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + if ((old_link_down_cnt != iport->fnic->link_down_cnt) + || (iport->state != FNIC_IPORT_STATE_READY)) { + break; + } + /* if we marked the tport as deleted due to GPN_FT + * We should not send ADISC anymore + */ + if ((tport->state == FDLS_TGT_STATE_OFFLINING) || + (tport->state == FDLS_TGT_STATE_OFFLINE)) + continue; + + /* For tports which have received RSCN */ + if (tport->flags & FNIC_FDLS_TPORT_SEND_ADISC) { + tport->retry_counter = 0; + fdls_set_tport_state(tport, FDLS_TGT_STATE_ADISC); + tport->flags &= ~FNIC_FDLS_TPORT_SEND_ADISC; + fdls_send_tgt_adisc(iport, tport); + continue; + } + if (fdls_get_tport_state(tport) != FDLS_TGT_STATE_INIT) { + /* Not a new port, skip */ + continue; + } + tport->retry_counter = 0; + fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI); + fdls_send_tgt_plogi(iport, tport); + } + fdls_set_state((&iport->fabric), FDLS_STATE_TGT_DISCOVERY); +} + +/* + * Function to restart the IT nexus if we received any out of + * sequence PLOGI/PRLI response from the target. + * The memory for the new tport structure is allocated + * inside fdls_create_tport and added to the iport's tport list. + * This will get freed later during tport_offline/linkdown + * or module unload. The new_tport pointer will go out of scope + * safely since the memory it is + * pointing to it will be freed later + */ +static void fdls_target_restart_nexus(struct fnic_tport_s *tport) +{ + struct fnic_iport_s *iport = tport->iport; + struct fnic_tport_s *new_tport = NULL; + uint32_t fcid; + uint64_t wwpn; + int nexus_restart_count; + struct fnic *fnic = iport->fnic; + bool retval = true; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid: 0x%x state: %d restart_count: %d", + tport->fcid, tport->state, tport->nexus_restart_count); + + fcid = tport->fcid; + wwpn = tport->wwpn; + nexus_restart_count = tport->nexus_restart_count; + + retval = fdls_delete_tport(iport, tport); + if (retval != true) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Error deleting tport: 0x%x", fcid); + return; + } + + if (nexus_restart_count >= FNIC_TPORT_MAX_NEXUS_RESTART) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded nexus restart retries tport: 0x%x", + fcid); + return; + } + + /* + * Allocate memory for the new tport and add it to + * iport's tport list. + * This memory will be freed during tport_offline/linkdown + * or module unload. The pointer new_tport is safe to go + * out of scope when this function returns, since the memory + * it is pointing to is guaranteed to be freed later + * as mentioned above. + */ + new_tport = fdls_create_tport(iport, fcid, wwpn); + if (!new_tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Error creating new tport: 0x%x", fcid); + return; + } + + new_tport->nexus_restart_count = nexus_restart_count + 1; + fdls_send_tgt_plogi(iport, new_tport); + fdls_set_tport_state(new_tport, FDLS_TGT_STATE_PLOGI); +} + +struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport, + uint32_t fcid) +{ + struct fnic_tport_s *tport, *next; + + list_for_each_entry_safe(tport, next, &(iport->tport_list), links) { + if ((tport->fcid == fcid) + && !(tport->flags & FNIC_FDLS_TPORT_TERMINATING)) + return tport; + } + return NULL; +} + +static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport, + uint32_t fcid, uint64_t wwpn) +{ + struct fnic_tport_s *tport; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS create tport: fcid: 0x%x wwpn: 0x%llx", fcid, wwpn); + + tport = kzalloc(sizeof(struct fnic_tport_s), GFP_ATOMIC); + if (!tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Memory allocation failure while creating tport: 0x%x\n", + fcid); + return NULL; + } + + tport->max_payload_size = FNIC_FCOE_MAX_FRAME_SZ; + tport->r_a_tov = FC_DEF_R_A_TOV; + tport->e_d_tov = FC_DEF_E_D_TOV; + tport->fcid = fcid; + tport->wwpn = wwpn; + tport->iport = iport; + + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Need to setup tport timer callback"); + + timer_setup(&tport->retry_timer, fdls_tport_timer_callback, 0); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Added tport 0x%x", tport->fcid); + fdls_set_tport_state(tport, FDLS_TGT_STATE_INIT); + list_add_tail(&tport->links, &iport->tport_list); + atomic_set(&tport->in_flight, 0); + return tport; +} + +struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport, + uint64_t wwpn) +{ + struct fnic_tport_s *tport, *next; + + list_for_each_entry_safe(tport, next, &(iport->tport_list), links) { + if ((tport->wwpn == wwpn) + && !(tport->flags & FNIC_FDLS_TPORT_TERMINATING)) + return tport; + } + return NULL; +} + +static void +fnic_fdmi_attr_set(void *attr_start, u16 type, u16 len, + void *data, u32 *off) +{ + u16 size = len + FC_FDMI_ATTR_ENTRY_HEADER_LEN; + struct fc_fdmi_attr_entry *fdmi_attr = (struct fc_fdmi_attr_entry *) + ((u8 *)attr_start + *off); + + put_unaligned_be16(type, &fdmi_attr->type); + put_unaligned_be16(size, &fdmi_attr->len); + memcpy(fdmi_attr->value, data, len); + *off += size; +} + +static void fdls_fdmi_register_hba(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_fdmi_rhba *prhba; + struct fc_fdmi_attr_entry *fdmi_attr; + uint8_t fcid[3]; + int err; + struct fnic *fnic = iport->fnic; + struct vnic_devcmd_fw_info *fw_info = NULL; + uint16_t oxid; + u32 attr_off_bytes, len; + u8 data[64]; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FDMI RHBA"); + return; + } + + prhba = (struct fc_std_fdmi_rhba *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prhba = (struct fc_std_fdmi_rhba) { + .fchdr = { + .fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0XFF, 0XFA}, + .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID) + }, + .fc_std_ct_hdr = { + .ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT, + .ct_fs_subtype = FC_FDMI_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_FDMI_RHBA) + }, + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prhba->fchdr, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RHBA, + &iport->active_oxid_fdmi_rhba); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send FDMI RHBA", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(prhba->fchdr, oxid); + + put_unaligned_be64(iport->wwpn, &prhba->rhba.hbaid.id); + put_unaligned_be32(FNIC_FDMI_NUM_PORTS, &prhba->rhba.port.numport); + put_unaligned_be64(iport->wwpn, &prhba->rhba.port.port[0].portname); + put_unaligned_be32(FNIC_FDMI_NUM_HBA_ATTRS, + &prhba->rhba.hba_attrs.numattrs); + + fdmi_attr = prhba->rhba.hba_attrs.attr; + attr_off_bytes = 0; + + put_unaligned_be64(iport->wwnn, data); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_NODE_NAME, + FNIC_FDMI_NN_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "NN set, off=%d", attr_off_bytes); + + strscpy_pad(data, FNIC_FDMI_MANUFACTURER, FNIC_FDMI_MANU_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MANUFACTURER, + FNIC_FDMI_MANU_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MFG set <%s>, off=%d", data, attr_off_bytes); + + err = vnic_dev_fw_info(fnic->vdev, &fw_info); + if (!err) { + strscpy_pad(data, fw_info->hw_serial_number, + FNIC_FDMI_SERIAL_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SERIAL_NUMBER, + FNIC_FDMI_SERIAL_LEN, data, &attr_off_bytes); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SERIAL set <%s>, off=%d", data, attr_off_bytes); + + } + + if (fnic->subsys_desc_len >= FNIC_FDMI_MODEL_LEN) + fnic->subsys_desc_len = FNIC_FDMI_MODEL_LEN - 1; + strscpy_pad(data, fnic->subsys_desc, FNIC_FDMI_MODEL_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL, FNIC_FDMI_MODEL_LEN, + data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MODEL set <%s>, off=%d", data, attr_off_bytes); + + strscpy_pad(data, FNIC_FDMI_MODEL_DESCRIPTION, FNIC_FDMI_MODEL_DES_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL_DES, + FNIC_FDMI_MODEL_DES_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MODEL_DESC set <%s>, off=%d", data, attr_off_bytes); + + if (!err) { + strscpy_pad(data, fw_info->hw_version, FNIC_FDMI_HW_VER_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HARDWARE_VERSION, + FNIC_FDMI_HW_VER_LEN, data, &attr_off_bytes); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "HW_VER set <%s>, off=%d", data, attr_off_bytes); + + } + + strscpy_pad(data, DRV_VERSION, FNIC_FDMI_DR_VER_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_DRIVER_VERSION, + FNIC_FDMI_DR_VER_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "DRV_VER set <%s>, off=%d", data, attr_off_bytes); + + strscpy_pad(data, "N/A", FNIC_FDMI_ROM_VER_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_ROM_VERSION, + FNIC_FDMI_ROM_VER_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ROM_VER set <%s>, off=%d", data, attr_off_bytes); + + if (!err) { + strscpy_pad(data, fw_info->fw_version, FNIC_FDMI_FW_VER_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FIRMWARE_VERSION, + FNIC_FDMI_FW_VER_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FW_VER set <%s>, off=%d", data, attr_off_bytes); + } + + len = sizeof(struct fc_std_fdmi_rhba) + attr_off_bytes; + frame_size += len; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI RHBA with oxid: 0x%x fs: %d", iport->fcid, + oxid, frame_size); + + fnic_send_fcoe_frame(iport, frame, frame_size); + iport->fabric.fdmi_pending |= FDLS_FDMI_REG_HBA_PENDING; +} + +static void fdls_fdmi_register_pa(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_fdmi_rpa *prpa; + struct fc_fdmi_attr_entry *fdmi_attr; + uint8_t fcid[3]; + struct fnic *fnic = iport->fnic; + u32 port_speed_bm; + u32 port_speed = vnic_dev_port_speed(fnic->vdev); + uint16_t oxid; + u32 attr_off_bytes, len; + u8 tmp_data[16], data[64]; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FDMI RPA"); + return; + } + + prpa = (struct fc_std_fdmi_rpa *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prpa = (struct fc_std_fdmi_rpa) { + .fchdr = { + .fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFA}, + .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID) + }, + .fc_std_ct_hdr = { + .ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT, + .ct_fs_subtype = FC_FDMI_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_FDMI_RPA) + }, + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prpa->fchdr, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RPA, + &iport->active_oxid_fdmi_rpa); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send FDMI RPA", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(prpa->fchdr, oxid); + + put_unaligned_be64(iport->wwpn, &prpa->rpa.port.portname); + put_unaligned_be32(FNIC_FDMI_NUM_PORT_ATTRS, + &prpa->rpa.hba_attrs.numattrs); + + /* MDS does not support GIGE speed. + * Bit shift standard definitions from scsi_transport_fc.h to + * match FC spec. + */ + switch (port_speed) { + case DCEM_PORTSPEED_10G: + case DCEM_PORTSPEED_20G: + /* There is no bit for 20G */ + port_speed_bm = FC_PORTSPEED_10GBIT << PORT_SPEED_BIT_14; + break; + case DCEM_PORTSPEED_25G: + port_speed_bm = FC_PORTSPEED_25GBIT << PORT_SPEED_BIT_8; + break; + case DCEM_PORTSPEED_40G: + case DCEM_PORTSPEED_4x10G: + port_speed_bm = FC_PORTSPEED_40GBIT << PORT_SPEED_BIT_9; + break; + case DCEM_PORTSPEED_100G: + port_speed_bm = FC_PORTSPEED_100GBIT << PORT_SPEED_BIT_8; + break; + default: + port_speed_bm = FC_PORTSPEED_1GBIT << PORT_SPEED_BIT_15; + break; + } + attr_off_bytes = 0; + + fdmi_attr = prpa->rpa.hba_attrs.attr; + + put_unaligned_be64(iport->wwnn, data); + + memset(data, 0, FNIC_FDMI_FC4_LEN); + data[2] = 1; + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FC4_TYPES, + FNIC_FDMI_FC4_LEN, data, &attr_off_bytes); + + put_unaligned_be32(port_speed_bm, data); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SUPPORTED_SPEEDS, + FNIC_FDMI_SUPP_SPEED_LEN, data, &attr_off_bytes); + + put_unaligned_be32(port_speed_bm, data); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_CURRENT_SPEED, + FNIC_FDMI_CUR_SPEED_LEN, data, &attr_off_bytes); + + put_unaligned_be32(FNIC_FDMI_MFS, data); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MAX_FRAME_SIZE, + FNIC_FDMI_MFS_LEN, data, &attr_off_bytes); + + snprintf(tmp_data, FNIC_FDMI_OS_NAME_LEN - 1, "host%d", + fnic->host->host_no); + strscpy_pad(data, tmp_data, FNIC_FDMI_OS_NAME_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_OS_NAME, + FNIC_FDMI_OS_NAME_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "OS name set <%s>, off=%d", data, attr_off_bytes); + + sprintf(fc_host_system_hostname(fnic->host), "%s", utsname()->nodename); + strscpy_pad(data, fc_host_system_hostname(fnic->host), + FNIC_FDMI_HN_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HOST_NAME, + FNIC_FDMI_HN_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Host name set <%s>, off=%d", data, attr_off_bytes); + + len = sizeof(struct fc_std_fdmi_rpa) + attr_off_bytes; + frame_size += len; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI RPA with oxid: 0x%x fs: %d", iport->fcid, + oxid, frame_size); + + fnic_send_fcoe_frame(iport, frame, frame_size); + iport->fabric.fdmi_pending |= FDLS_FDMI_RPA_PENDING; +} + +void fdls_fabric_timer_callback(struct timer_list *t) +{ + struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, retry_timer); + struct fnic_iport_s *iport = + container_of(fabric, struct fnic_iport_s, fabric); + struct fnic *fnic = iport->fnic; + unsigned long flags; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tp: %d fab state: %d fab retry counter: %d max_flogi_retries: %d", + iport->fabric.timer_pending, iport->fabric.state, + iport->fabric.retry_counter, iport->max_flogi_retries); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (!iport->fabric.timer_pending) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + if (iport->fabric.del_timer_inprogress) { + iport->fabric.del_timer_inprogress = 0; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fabric_del_timer inprogress(%d). Skip timer cb", + iport->fabric.del_timer_inprogress); + return; + } + + iport->fabric.timer_pending = 0; + + /* The fabric state indicates which frames have time out, and we retry */ + switch (iport->fabric.state) { + case FDLS_STATE_FABRIC_FLOGI: + /* Flogi received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < iport->max_flogi_retries)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_fabric_flogi(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + /* Flogi has time out 2*ed_tov send abts */ + fdls_send_fabric_abts(iport); + } else { + /* ABTS has timed out + * Mark the OXID to be freed after 2 * r_a_tov and retry the req + */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + if (iport->fabric.retry_counter < iport->max_flogi_retries) { + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + fdls_send_fabric_flogi(iport); + } else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded max FLOGI retries"); + } + break; + case FDLS_STATE_FABRIC_PLOGI: + /* Plogi received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < iport->max_plogi_retries)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_fabric_plogi(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + /* Plogi has timed out 2*ed_tov send abts */ + fdls_send_fabric_abts(iport); + } else { + /* ABTS has timed out + * Mark the OXID to be freed after 2 * r_a_tov and retry the req + */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + if (iport->fabric.retry_counter < iport->max_plogi_retries) { + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + fdls_send_fabric_plogi(iport); + } else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded max PLOGI retries"); + } + break; + case FDLS_STATE_RPN_ID: + /* Rpn_id received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_rpn_id(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) + /* RPN has timed out. Send abts */ + fdls_send_fabric_abts(iport); + else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FDLS_STATE_SCR: + /* scr received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_scr(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) + /* scr has timed out. Send abts */ + fdls_send_fabric_abts(iport); + else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ABTS timed out. Starting PLOGI: %p", iport); + fnic_fdls_start_plogi(iport); + } + break; + case FDLS_STATE_REGISTER_FC4_TYPES: + /* scr received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_register_fc4_types(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + /* RFT_ID timed out send abts */ + fdls_send_fabric_abts(iport); + } else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ABTS timed out. Starting PLOGI: %p", iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FDLS_STATE_REGISTER_FC4_FEATURES: + /* scr received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_register_fc4_features(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) + /* SCR has timed out. Send abts */ + fdls_send_fabric_abts(iport); + else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ABTS timed out. Starting PLOGI %p", iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FDLS_STATE_RSCN_GPN_FT: + case FDLS_STATE_SEND_GPNFT: + case FDLS_STATE_GPN_FT: + /* GPN_FT received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_gpn_ft(iport, iport->fabric.state); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + /* gpn_ft has timed out. Send abts */ + fdls_send_fabric_abts(iport); + } else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) { + fdls_send_gpn_ft(iport, iport->fabric.state); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ABTS timeout for fabric GPN_FT. Check name server: %p", + iport); + } + } + break; + default: + break; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +void fdls_fdmi_timer_callback(struct timer_list *t) +{ + struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, fdmi_timer); + struct fnic_iport_s *iport = + container_of(fabric, struct fnic_iport_s, fabric); + struct fnic *fnic = iport->fnic; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + + if (!iport->fabric.fdmi_pending) { + /* timer expired after fdmi responses received. */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + + /* if not abort pending, send an abort */ + if (!(iport->fabric.fdmi_pending & FDLS_FDMI_ABORT_PENDING)) { + fdls_send_fdmi_abts(iport); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + + /* ABTS pending for an active fdmi request that is pending. + * That means FDMI ABTS timed out + * Schedule to free the OXID after 2*r_a_tov and proceed + */ + if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) { + fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_plogi); + } else { + if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) + fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rhba); + if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) + fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rpa); + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + + iport->fabric.fdmi_pending = 0; + /* If max retries not exhaused, start over from fdmi plogi */ + if (iport->fabric.fdmi_retry < FDLS_FDMI_MAX_RETRY) { + iport->fabric.fdmi_retry++; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "retry fdmi timer %d", iport->fabric.fdmi_retry); + fdls_send_fdmi_plogi(iport); + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +static void fdls_send_delete_tport_msg(struct fnic_tport_s *tport) +{ + struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport; + struct fnic *fnic = iport->fnic; + struct fnic_tport_event_s *tport_del_evt; + + tport_del_evt = kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC); + if (!tport_del_evt) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate memory for tport event fcid: 0x%x", + tport->fcid); + return; + } + tport_del_evt->event = TGT_EV_TPORT_DELETE; + tport_del_evt->arg1 = (void *) tport; + list_add_tail(&tport_del_evt->links, &fnic->tport_event_list); + queue_work(fnic_event_queue, &fnic->tport_work); +} + +static void fdls_tport_timer_callback(struct timer_list *t) +{ + struct fnic_tport_s *tport = from_timer(tport, t, retry_timer); + struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (!tport->timer_pending) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + if (iport->state != FNIC_IPORT_STATE_READY) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + if (tport->del_timer_inprogress) { + tport->del_timer_inprogress = 0; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport_del_timer inprogress. Skip timer cb tport fcid: 0x%x\n", + tport->fcid); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid: 0x%x timer pending: %d state: %d retry counter: %d", + tport->fcid, tport->timer_pending, tport->state, + tport->retry_counter); + + tport->timer_pending = 0; + oxid = tport->active_oxid; + + /* We retry plogi/prli/adisc frames depending on the tport state */ + switch (tport->state) { + case FDLS_TGT_STATE_PLOGI: + /* PLOGI frame received a LS_RJT with busy, we retry from here */ + if ((tport->flags & FNIC_FDLS_RETRY_FRAME) + && (tport->retry_counter < iport->max_plogi_retries)) { + tport->flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_tgt_plogi(iport, tport); + } else if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + /* Plogi frame has timed out, send abts */ + fdls_send_tport_abts(iport, tport); + } else if (tport->retry_counter < iport->max_plogi_retries) { + /* + * ABTS has timed out + */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + fdls_send_tgt_plogi(iport, tport); + } else { + /* exceeded plogi retry count */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + fdls_send_delete_tport_msg(tport); + } + break; + case FDLS_TGT_STATE_PRLI: + /* PRLI received a LS_RJT with busy , hence we retry from here */ + if ((tport->flags & FNIC_FDLS_RETRY_FRAME) + && (tport->retry_counter < FDLS_RETRY_COUNT)) { + tport->flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_tgt_prli(iport, tport); + } else if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + /* PRLI has time out, send abts */ + fdls_send_tport_abts(iport, tport); + } else { + /* ABTS has timed out for prli, we go back to PLOGI */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + fdls_send_tgt_plogi(iport, tport); + fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI); + } + break; + case FDLS_TGT_STATE_ADISC: + /* ADISC timed out send an ABTS */ + if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + fdls_send_tport_abts(iport, tport); + } else if ((tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED) + && (tport->retry_counter < FDLS_RETRY_COUNT)) { + /* + * ABTS has timed out + */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + fdls_send_tgt_adisc(iport, tport); + } else { + /* exceeded retry count */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ADISC not responding. Deleting target port: 0x%x", + tport->fcid); + fdls_send_delete_tport_msg(tport); + } + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "oxid: 0x%x Unknown tport state: 0x%x", oxid, tport->state); + break; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +static void fnic_fdls_start_flogi(struct fnic_iport_s *iport) +{ + iport->fabric.retry_counter = 0; + fdls_send_fabric_flogi(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_FLOGI); + iport->fabric.flags = 0; +} + +static void fnic_fdls_start_plogi(struct fnic_iport_s *iport) +{ + iport->fabric.retry_counter = 0; + fdls_send_fabric_plogi(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_PLOGI); + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + + if ((fnic_fdmi_support == 1) && (!(iport->flags & FNIC_FDMI_ACTIVE))) { + /* we can do FDMI at the same time */ + iport->fabric.fdmi_retry = 0; + timer_setup(&iport->fabric.fdmi_timer, fdls_fdmi_timer_callback, + 0); + fdls_send_fdmi_plogi(iport); + iport->flags |= FNIC_FDMI_ACTIVE; + } +} +static void +fdls_process_tgt_adisc_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t tgt_fcid; + struct fnic_tport_s *tport; + uint8_t *fcid; + uint64_t frame_wwnn; + uint64_t frame_wwpn; + uint16_t oxid; + struct fc_std_els_adisc *adisc_rsp = (struct fc_std_els_adisc *)fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr; + struct fnic *fnic = iport->fnic; + + fcid = FNIC_STD_GET_S_ID(fchdr); + tgt_fcid = ntoh24(fcid); + tport = fnic_find_tport_by_fcid(iport, tgt_fcid); + + if (!tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Tgt ADISC response tport not found: 0x%x", tgt_fcid); + return; + } + if ((iport->state != FNIC_IPORT_STATE_READY) + || (tport->state != FDLS_TGT_STATE_ADISC) + || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping this ADISC response"); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport state: %d tport state: %d Is abort issued on PRLI? %d", + iport->state, tport->state, + (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)); + return; + } + if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping frame from target: 0x%x", + tgt_fcid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Reason: Stale ADISC/Aborted ADISC/OOO frame delivery"); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fdls_free_oxid(iport, oxid, &tport->active_oxid); + + switch (adisc_rsp->els.adisc_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.tport_adisc_ls_accepts); + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport 0x%p Canceling fabric disc timer\n", + tport); + fnic_del_tport_timer_sync(fnic, tport); + } + tport->timer_pending = 0; + tport->retry_counter = 0; + frame_wwnn = get_unaligned_be64(&adisc_rsp->els.adisc_wwnn); + frame_wwpn = get_unaligned_be64(&adisc_rsp->els.adisc_wwpn); + if ((frame_wwnn == tport->wwnn) && (frame_wwpn == tport->wwpn)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ADISC accepted from target: 0x%x. Target logged in", + tgt_fcid); + fdls_set_tport_state(tport, FDLS_TGT_STATE_READY); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Error mismatch frame: ADISC"); + } + break; + + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.tport_adisc_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (tport->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ADISC ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x", + tgt_fcid); + + /* Retry ADISC again from the timer routine. */ + tport->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ADISC returned ELS_LS_RJT from target: 0x%x", + tgt_fcid); + fdls_delete_tport(iport, tport); + } + break; + } +} +static void +fdls_process_tgt_plogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t tgt_fcid; + struct fnic_tport_s *tport; + uint8_t *fcid; + uint16_t oxid; + struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *)fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr; + uint16_t max_payload_size; + struct fnic *fnic = iport->fnic; + + fcid = FNIC_STD_GET_S_ID(fchdr); + tgt_fcid = ntoh24(fcid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS processing target PLOGI response: tgt_fcid: 0x%x", + tgt_fcid); + + tport = fnic_find_tport_by_fcid(iport, tgt_fcid); + if (!tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport not found: 0x%x", tgt_fcid); + return; + } + if ((iport->state != FNIC_IPORT_STATE_READY) + || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping frame! iport state: %d tport state: %d", + iport->state, tport->state); + return; + } + + if (tport->state != FDLS_TGT_STATE_PLOGI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI rsp recvd in wrong state. Drop the frame and restart nexus"); + fdls_target_restart_nexus(tport); + return; + } + + if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI response from target: 0x%x. Dropping frame", + tgt_fcid); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fdls_free_oxid(iport, oxid, &tport->active_oxid); + + switch (plogi_rsp->els.fl_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.tport_plogi_ls_accepts); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI accepted by target: 0x%x", tgt_fcid); + break; + + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.tport_plogi_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (tport->retry_counter < iport->max_plogi_retries)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x", + tgt_fcid); + /* Retry plogi again from the timer routine. */ + tport->flags |= FNIC_FDLS_RETRY_FRAME; + return; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI returned ELS_LS_RJT from target: 0x%x", + tgt_fcid); + fdls_delete_tport(iport, tport); + return; + + default: + atomic64_inc(&iport->iport_stats.tport_plogi_misc_rejects); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI not accepted from target fcid: 0x%x", + tgt_fcid); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Found the PLOGI target: 0x%x and state: %d", + (unsigned int) tgt_fcid, tport->state); + + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + } + + tport->timer_pending = 0; + tport->wwpn = get_unaligned_be64(&FNIC_LOGI_PORT_NAME(plogi_rsp->els)); + tport->wwnn = get_unaligned_be64(&FNIC_LOGI_NODE_NAME(plogi_rsp->els)); + + /* Learn the Service Params */ + + /* Max frame size - choose the lowest */ + max_payload_size = fnic_fc_plogi_rsp_rdf(iport, plogi_rsp); + tport->max_payload_size = + min(max_payload_size, iport->max_payload_size); + + if (tport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MFS: tport max frame size below spec bounds: %d", + tport->max_payload_size); + tport->max_payload_size = FNIC_MIN_DATA_FIELD_SIZE; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MAX frame size: %u iport max_payload_size: %d tport mfs: %d", + max_payload_size, iport->max_payload_size, + tport->max_payload_size); + + tport->max_concur_seqs = FNIC_FC_PLOGI_RSP_CONCUR_SEQ(plogi_rsp); + + tport->retry_counter = 0; + fdls_set_tport_state(tport, FDLS_TGT_STATE_PRLI); + fdls_send_tgt_prli(iport, tport); +} +static void +fdls_process_tgt_prli_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t tgt_fcid; + struct fnic_tport_s *tport; + uint8_t *fcid; + uint16_t oxid; + struct fc_std_els_prli *prli_rsp = (struct fc_std_els_prli *)fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr; + struct fnic_tport_event_s *tport_add_evt; + struct fnic *fnic = iport->fnic; + bool mismatched_tgt = false; + + fcid = FNIC_STD_GET_S_ID(fchdr); + tgt_fcid = ntoh24(fcid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process tgt PRLI response: 0x%x", tgt_fcid); + + tport = fnic_find_tport_by_fcid(iport, tgt_fcid); + if (!tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport not found: 0x%x", tgt_fcid); + /* Handle or just drop? */ + return; + } + + if ((iport->state != FNIC_IPORT_STATE_READY) + || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping frame! iport st: %d tport st: %d tport fcid: 0x%x", + iport->state, tport->state, tport->fcid); + return; + } + + if (tport->state != FDLS_TGT_STATE_PRLI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI rsp recvd in wrong state. Drop frame. Restarting nexus"); + fdls_target_restart_nexus(tport); + return; + } + + if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping PRLI response from target: 0x%x ", + tgt_fcid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Reason: Stale PRLI response/Aborted PDISC/OOO frame delivery"); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fdls_free_oxid(iport, oxid, &tport->active_oxid); + + switch (prli_rsp->els_prli.prli_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.tport_prli_ls_accepts); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI accepted from target: 0x%x", tgt_fcid); + + if (prli_rsp->sp.spp_type != FC_FC4_TYPE_SCSI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "mismatched target zoned with FC SCSI initiator: 0x%x", + tgt_fcid); + mismatched_tgt = true; + } + if (mismatched_tgt) { + fdls_tgt_logout(iport, tport); + fdls_delete_tport(iport, tport); + return; + } + break; + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.tport_prli_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (tport->retry_counter < FDLS_RETRY_COUNT)) { + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x", + tgt_fcid); + + /*Retry Plogi again from the timer routine. */ + tport->flags |= FNIC_FDLS_RETRY_FRAME; + return; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI returned ELS_LS_RJT from target: 0x%x", + tgt_fcid); + + fdls_tgt_logout(iport, tport); + fdls_delete_tport(iport, tport); + return; + default: + atomic64_inc(&iport->iport_stats.tport_prli_misc_rejects); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI not accepted from target: 0x%x", tgt_fcid); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Found the PRLI target: 0x%x and state: %d", + (unsigned int) tgt_fcid, tport->state); + + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + } + tport->timer_pending = 0; + + /* Learn Service Params */ + tport->fcp_csp = be32_to_cpu(prli_rsp->sp.spp_params); + tport->retry_counter = 0; + + if (tport->fcp_csp & FCP_SPPF_RETRY) + tport->tgt_flags |= FNIC_FC_RP_FLAGS_RETRY; + + /* Check if the device plays Target Mode Function */ + if (!(tport->fcp_csp & FCP_PRLI_FUNC_TARGET)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Remote port(0x%x): no target support. Deleting it\n", + tgt_fcid); + fdls_tgt_logout(iport, tport); + fdls_delete_tport(iport, tport); + return; + } + + fdls_set_tport_state(tport, FDLS_TGT_STATE_READY); + + /* Inform the driver about new target added */ + tport_add_evt = kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC); + if (!tport_add_evt) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport event memory allocation failure: 0x%0x\n", + tport->fcid); + return; + } + tport_add_evt->event = TGT_EV_RPORT_ADD; + tport_add_evt->arg1 = (void *) tport; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x add tport event fcid: 0x%x\n", + tport->fcid, iport->fcid); + list_add_tail(&tport_add_evt->links, &fnic->tport_event_list); + queue_work(fnic_event_queue, &fnic->tport_work); +} + + +static void +fdls_process_rff_id_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fnic *fnic = iport->fnic; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_rff_id *rff_rsp = (struct fc_std_rff_id *) fchdr; + uint16_t rsp; + uint8_t reason_code; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_FEATURES) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFF_ID resp recvd in state(%d). Dropping.", + fdls_get_state(fdls)); + return; + } + + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + return; + } + + rsp = FNIC_STD_GET_FC_CT_CMD((&rff_rsp->fc_std_ct_hdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS process RFF ID response: 0x%04x", iport->fcid, + (uint32_t) rsp); + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (rsp) { + case FC_FS_ACC: + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + fdls->retry_counter = 0; + fdls_set_state((&iport->fabric), FDLS_STATE_SCR); + fdls_send_scr(iport); + break; + case FC_FS_RJT: + reason_code = rff_rsp->fc_std_ct_hdr.ct_reason; + if (((reason_code == FC_FS_RJT_BSY) + || (reason_code == FC_FS_RJT_UNABL)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFF_ID ret ELS_LS_RJT BUSY. Retry from timer routine %p", + iport); + + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFF_ID returned ELS_LS_RJT. Halting discovery %p", + iport); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + fdls->timer_pending = 0; + fdls->retry_counter = 0; + } + break; + default: + break; + } +} + +static void +fdls_process_rft_id_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_rft_id *rft_rsp = (struct fc_std_rft_id *) fchdr; + uint16_t rsp; + uint8_t reason_code; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_TYPES) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFT_ID resp recvd in state(%d). Dropping.", + fdls_get_state(fdls)); + return; + } + + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + return; + } + + + rsp = FNIC_STD_GET_FC_CT_CMD((&rft_rsp->fc_std_ct_hdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS process RFT ID response: 0x%04x", iport->fcid, + (uint32_t) rsp); + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (rsp) { + case FC_FS_ACC: + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + fdls->retry_counter = 0; + fdls_send_register_fc4_features(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_FEATURES); + break; + case FC_FS_RJT: + reason_code = rft_rsp->fc_std_ct_hdr.ct_reason; + if (((reason_code == FC_FS_RJT_BSY) + || (reason_code == FC_FS_RJT_UNABL)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: RFT_ID ret ELS_LS_RJT BUSY. Retry from timer routine", + iport->fcid); + + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: RFT_ID REJ. Halting discovery reason %d expl %d", + iport->fcid, reason_code, + rft_rsp->fc_std_ct_hdr.ct_explan); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + fdls->timer_pending = 0; + fdls->retry_counter = 0; + } + break; + default: + break; + } +} + +static void +fdls_process_rpn_id_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_rpn_id *rpn_rsp = (struct fc_std_rpn_id *) fchdr; + uint16_t rsp; + uint8_t reason_code; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (fdls_get_state(fdls) != FDLS_STATE_RPN_ID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RPN_ID resp recvd in state(%d). Dropping.", + fdls_get_state(fdls)); + return; + } + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + return; + } + + rsp = FNIC_STD_GET_FC_CT_CMD((&rpn_rsp->fc_std_ct_hdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS process RPN ID response: 0x%04x", iport->fcid, + (uint32_t) rsp); + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (rsp) { + case FC_FS_ACC: + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + fdls->retry_counter = 0; + fdls_send_register_fc4_types(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_TYPES); + break; + case FC_FS_RJT: + reason_code = rpn_rsp->fc_std_ct_hdr.ct_reason; + if (((reason_code == FC_FS_RJT_BSY) + || (reason_code == FC_FS_RJT_UNABL)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RPN_ID returned REJ BUSY. Retry from timer routine %p", + iport); + + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RPN_ID ELS_LS_RJT. Halting discovery %p", iport); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + fdls->timer_pending = 0; + fdls->retry_counter = 0; + } + break; + default: + break; + } +} + +static void +fdls_process_scr_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_scr *scr_rsp = (struct fc_std_scr *) fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process SCR response: 0x%04x", + (uint32_t) scr_rsp->scr.scr_cmd); + + if (fdls_get_state(fdls) != FDLS_STATE_SCR) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SCR resp recvd in state(%d). Dropping.", + fdls_get_state(fdls)); + return; + } + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + } + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (scr_rsp->scr.scr_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.fabric_scr_ls_accepts); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + fdls_send_gpn_ft(iport, FDLS_STATE_GPN_FT); + break; + + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.fabric_scr_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SCR ELS_LS_RJT BUSY. Retry from timer routine %p", + iport); + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SCR returned ELS_LS_RJT. Halting discovery %p", + iport); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", + iport); + fnic_del_fabric_timer_sync(fnic); + } + fdls->timer_pending = 0; + fdls->retry_counter = 0; + } + break; + + default: + atomic64_inc(&iport->iport_stats.fabric_scr_misc_rejects); + break; + } +} + +static void +fdls_process_gpn_ft_tgt_list(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, int len) +{ + struct fc_gpn_ft_rsp_iu *gpn_ft_tgt; + struct fnic_tport_s *tport, *next; + uint32_t fcid; + uint64_t wwpn; + int rem_len = len; + u32 old_link_down_cnt = iport->fnic->link_down_cnt; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS process GPN_FT tgt list", iport->fcid); + + gpn_ft_tgt = + (struct fc_gpn_ft_rsp_iu *)((uint8_t *) fchdr + + sizeof(struct fc_frame_header) + + sizeof(struct fc_ct_hdr)); + len -= sizeof(struct fc_frame_header) + sizeof(struct fc_ct_hdr); + + while (rem_len > 0) { + + fcid = ntoh24(gpn_ft_tgt->fcid); + wwpn = be64_to_cpu(gpn_ft_tgt->wwpn); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport: 0x%x: ctrl:0x%x", fcid, gpn_ft_tgt->ctrl); + + if (fcid == iport->fcid) { + if (gpn_ft_tgt->ctrl & FC_NS_FID_LAST) + break; + gpn_ft_tgt++; + rem_len -= sizeof(struct fc_gpn_ft_rsp_iu); + continue; + } + + tport = fnic_find_tport_by_wwpn(iport, wwpn); + if (!tport) { + /* + * New port registered with the switch or first time query + */ + tport = fdls_create_tport(iport, fcid, wwpn); + if (!tport) + return; + } + /* + * check if this was an existing tport with same fcid + * but whose wwpn has changed now ,then remove it and + * create a new one + */ + if (tport->fcid != fcid) { + fdls_delete_tport(iport, tport); + tport = fdls_create_tport(iport, fcid, wwpn); + if (!tport) + return; + } + + /* + * If this GPN_FT rsp is after RSCN then mark the tports which + * matches with the new GPN_FT list, if some tport is not + * found in GPN_FT we went to delete that tport later. + */ + if (fdls_get_state((&iport->fabric)) == FDLS_STATE_RSCN_GPN_FT) + tport->flags |= FNIC_FDLS_TPORT_IN_GPN_FT_LIST; + + if (gpn_ft_tgt->ctrl & FC_NS_FID_LAST) + break; + + gpn_ft_tgt++; + rem_len -= sizeof(struct fc_gpn_ft_rsp_iu); + } + if (rem_len <= 0) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPN_FT response: malformed/corrupt frame rxlen: %d remlen: %d", + len, rem_len); +} + + /*remove those ports which was not listed in GPN_FT */ + if (fdls_get_state((&iport->fabric)) == FDLS_STATE_RSCN_GPN_FT) { + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + + if (!(tport->flags & FNIC_FDLS_TPORT_IN_GPN_FT_LIST)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Remove port: 0x%x not found in GPN_FT list", + tport->fcid); + fdls_delete_tport(iport, tport); + } else { + tport->flags &= ~FNIC_FDLS_TPORT_IN_GPN_FT_LIST; + } + if ((old_link_down_cnt != iport->fnic->link_down_cnt) + || (iport->state != FNIC_IPORT_STATE_READY)) { + return; + } + } + } +} + +static void +fdls_process_gpn_ft_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, int len) +{ + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_gpn_ft *gpn_ft_rsp = (struct fc_std_gpn_ft *) fchdr; + uint16_t rsp; + uint8_t reason_code; + int count = 0; + struct fnic_tport_s *tport, *next; + u32 old_link_down_cnt = iport->fnic->link_down_cnt; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process GPN_FT response: iport state: %d len: %d", + iport->state, len); + + /* + * GPNFT response :- + * FDLS_STATE_GPN_FT : GPNFT send after SCR state + * during fabric discovery(FNIC_IPORT_STATE_FABRIC_DISC) + * FDLS_STATE_RSCN_GPN_FT : GPNFT send in response to RSCN + * FDLS_STATE_SEND_GPNFT : GPNFT send after deleting a Target, + * e.g. after receiving Target LOGO + * FDLS_STATE_TGT_DISCOVERY :Target discovery is currently in progress + * from previous GPNFT response,a new GPNFT response has come. + */ + if (!(((iport->state == FNIC_IPORT_STATE_FABRIC_DISC) + && (fdls_get_state(fdls) == FDLS_STATE_GPN_FT)) + || ((iport->state == FNIC_IPORT_STATE_READY) + && ((fdls_get_state(fdls) == FDLS_STATE_RSCN_GPN_FT) + || (fdls_get_state(fdls) == FDLS_STATE_SEND_GPNFT) + || (fdls_get_state(fdls) == FDLS_STATE_TGT_DISCOVERY))))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPNFT resp recvd in fab state(%d) iport_state(%d). Dropping.", + fdls_get_state(fdls), iport->state); + return; + } + + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + } + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + iport->state = FNIC_IPORT_STATE_READY; + rsp = FNIC_STD_GET_FC_CT_CMD((&gpn_ft_rsp->fc_std_ct_hdr)); + + switch (rsp) { + + case FC_FS_ACC: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: GPNFT_RSP accept", iport->fcid); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + fdls_process_gpn_ft_tgt_list(iport, fchdr, len); + + /* + * iport state can change only if link down event happened + * We don't need to undo fdls_process_gpn_ft_tgt_list, + * that will be taken care in next link up event + */ + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Halting target discovery: fab st: %d iport st: %d ", + fdls_get_state(fdls), iport->state); + break; + } + fdls_tgt_discovery_start(iport); + break; + + case FC_FS_RJT: + reason_code = gpn_ft_rsp->fc_std_ct_hdr.ct_reason; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: GPNFT_RSP Reject reason: %d", iport->fcid, reason_code); + + if (((reason_code == FC_FS_RJT_BSY) + || (reason_code == FC_FS_RJT_UNABL)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: GPNFT_RSP ret REJ/BSY. Retry from timer routine", + iport->fcid); + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: GPNFT_RSP reject", iport->fcid); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + /* + * If GPN_FT ls_rjt then we should delete + * all existing tports + */ + count = 0; + list_for_each_entry_safe(tport, next, &iport->tport_list, + links) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPN_FT_REJECT: Remove port: 0x%x", + tport->fcid); + fdls_delete_tport(iport, tport); + if ((old_link_down_cnt != iport->fnic->link_down_cnt) + || (iport->state != FNIC_IPORT_STATE_READY)) { + return; + } + count++; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPN_FT_REJECT: Removed (0x%x) ports", count); + } + break; + + default: + break; + } +} + +/** + * fdls_process_fabric_logo_rsp - Handle an flogo response from the fcf + * @iport: Handle to fnic iport + * @fchdr: Incoming frame + */ +static void +fdls_process_fabric_logo_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fc_std_flogi *flogo_rsp = (struct fc_std_flogi *) fchdr; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + } + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (flogo_rsp->els.fl_cmd) { + case ELS_LS_ACC: + if (iport->fabric.state != FDLS_STATE_FABRIC_LOGO) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Flogo response. Fabric not in LOGO state. Dropping! %p", + iport); + return; + } + + iport->fabric.state = FDLS_STATE_FLOGO_DONE; + iport->state = FNIC_IPORT_STATE_LINK_WAIT; + + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport 0x%p Canceling fabric disc timer\n", + iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Flogo response from Fabric for did: 0x%x", + ntoh24(fchdr->fh_d_id)); + return; + + case ELS_LS_RJT: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Flogo response from Fabric for did: 0x%x returned ELS_LS_RJT", + ntoh24(fchdr->fh_d_id)); + return; + + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGO response not accepted or rejected: 0x%x", + flogo_rsp->els.fl_cmd); + } +} + +static void +fdls_process_flogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, void *rx_frame) +{ + struct fnic_fdls_fabric_s *fabric = &iport->fabric; + struct fc_std_flogi *flogi_rsp = (struct fc_std_flogi *) fchdr; + uint8_t *fcid; + uint16_t rdf_size; + uint8_t fcmac[6] = { 0x0E, 0XFC, 0x00, 0x00, 0x00, 0x00 }; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS processing FLOGI response", iport->fcid); + + if (fdls_get_state(fabric) != FDLS_STATE_FABRIC_FLOGI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI response received in state (%d). Dropping frame", + fdls_get_state(fabric)); + return; + } + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fabric), oxid, iport->active_oxid_fabric_req); + return; + } + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (flogi_rsp->els.fl_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.fabric_flogi_ls_accepts); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + fcid = FNIC_STD_GET_D_ID(fchdr); + iport->fcid = ntoh24(fcid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FLOGI response accepted", iport->fcid); + + /* Learn the Service Params */ + rdf_size = be16_to_cpu(FNIC_LOGI_RDF_SIZE(flogi_rsp->els)); + if ((rdf_size >= FNIC_MIN_DATA_FIELD_SIZE) + && (rdf_size < FNIC_FC_MAX_PAYLOAD_LEN)) + iport->max_payload_size = min(rdf_size, + iport->max_payload_size); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "max_payload_size from fabric: %u set: %d", rdf_size, + iport->max_payload_size); + + iport->r_a_tov = be32_to_cpu(FNIC_LOGI_R_A_TOV(flogi_rsp->els)); + iport->e_d_tov = be32_to_cpu(FNIC_LOGI_E_D_TOV(flogi_rsp->els)); + + if (FNIC_LOGI_FEATURES(flogi_rsp->els) & FNIC_FC_EDTOV_NSEC) + iport->e_d_tov = iport->e_d_tov / FNIC_NSEC_TO_MSEC; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "From fabric: R_A_TOV: %d E_D_TOV: %d", + iport->r_a_tov, iport->e_d_tov); + + fc_host_fabric_name(iport->fnic->host) = + get_unaligned_be64(&FNIC_LOGI_NODE_NAME(flogi_rsp->els)); + fc_host_port_id(iport->fnic->host) = iport->fcid; + + fnic_fdls_learn_fcoe_macs(iport, rx_frame, fcid); + + if (fnic_fdls_register_portid(iport, iport->fcid, rx_frame) != 0) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FLOGI registration failed", iport->fcid); + break; + } + + memcpy(&fcmac[3], fcid, 3); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Adding vNIC device MAC addr: %02x:%02x:%02x:%02x:%02x:%02x", + fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], + fcmac[5]); + vnic_dev_add_addr(iport->fnic->vdev, fcmac); + + if (fdls_get_state(fabric) == FDLS_STATE_FABRIC_FLOGI) { + fnic_fdls_start_plogi(iport); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI response received. Starting PLOGI"); + } else { + /* From FDLS_STATE_FABRIC_FLOGI state fabric can only go to + * FDLS_STATE_LINKDOWN + * state, hence we don't have to worry about undoing: + * the fnic_fdls_register_portid and vnic_dev_add_addr + */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI response received in state (%d). Dropping frame", + fdls_get_state(fabric)); + } + break; + + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.fabric_flogi_ls_rejects); + if (fabric->retry_counter < iport->max_flogi_retries) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI returned ELS_LS_RJT BUSY. Retry from timer routine %p", + iport); + + /* Retry Flogi again from the timer routine. */ + fabric->flags |= FNIC_FDLS_RETRY_FRAME; + + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI returned ELS_LS_RJT. Halting discovery %p", + iport); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport 0x%p Canceling fabric disc timer\n", + iport); + fnic_del_fabric_timer_sync(fnic); + } + fabric->timer_pending = 0; + fabric->retry_counter = 0; + } + break; + + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI response not accepted: 0x%x", + flogi_rsp->els.fl_cmd); + atomic64_inc(&iport->iport_stats.fabric_flogi_misc_rejects); + break; + } +} + +static void +fdls_process_fabric_plogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *) fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (fdls_get_state((&iport->fabric)) != FDLS_STATE_FABRIC_PLOGI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Fabric PLOGI response received in state (%d). Dropping frame", + fdls_get_state(&iport->fabric)); + return; + } + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + return; + } + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (plogi_rsp->els.fl_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.fabric_plogi_ls_accepts); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x fabric PLOGI response: Accepted\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + fdls_set_state(&iport->fabric, FDLS_STATE_RPN_ID); + fdls_send_rpn_id(iport); + break; + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.fabric_plogi_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (iport->fabric.retry_counter < iport->max_plogi_retries)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Fabric PLOGI ELS_LS_RJT BUSY. Retry from timer routine", + iport->fcid); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Fabric PLOGI ELS_LS_RJT. Halting discovery", + iport->fcid); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + return; + } + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI response not accepted: 0x%x", + plogi_rsp->els.fl_cmd); + atomic64_inc(&iport->iport_stats.fabric_plogi_misc_rejects); + break; + } +} + +static void fdls_process_fdmi_plogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *)fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fnic *fnic = iport->fnic; + u64 fdmi_tov; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (iport->active_oxid_fdmi_plogi != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fdmi_plogi); + return; + } + + iport->fabric.fdmi_pending &= ~FDLS_FDMI_PLOGI_PENDING; + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi); + + if (ntoh24(fchdr->fh_s_id) == FC_FID_MGMT_SERV) { + timer_delete_sync(&iport->fabric.fdmi_timer); + iport->fabric.fdmi_pending = 0; + switch (plogi_rsp->els.fl_cmd) { + case ELS_LS_ACC: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process fdmi PLOGI response status: ELS_LS_ACC\n"); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Sending fdmi registration for port 0x%x\n", + iport->fcid); + + fdls_fdmi_register_hba(iport); + fdls_fdmi_register_pa(iport); + fdmi_tov = jiffies + msecs_to_jiffies(5000); + mod_timer(&iport->fabric.fdmi_timer, + round_jiffies(fdmi_tov)); + break; + case ELS_LS_RJT: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Fabric FDMI PLOGI returned ELS_LS_RJT reason: 0x%x", + els_rjt->rej.er_reason); + + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (iport->fabric.fdmi_retry < 7)) { + iport->fabric.fdmi_retry++; + fdls_send_fdmi_plogi(iport); + } + break; + default: + break; + } + } +} +static void fdls_process_fdmi_reg_ack(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, + int rsp_type) +{ + struct fnic *fnic = iport->fnic; + uint16_t oxid; + + if (!iport->fabric.fdmi_pending) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received FDMI ack while not waiting: 0x%x\n", + FNIC_STD_GET_OX_ID(fchdr)); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + + if ((iport->active_oxid_fdmi_rhba != oxid) && + (iport->active_oxid_fdmi_rpa != oxid)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. oxid recvd: 0x%x, active oxids(rhba,rpa): 0x%x, 0x%x\n", + oxid, iport->active_oxid_fdmi_rhba, iport->active_oxid_fdmi_rpa); + return; + } + if (FNIC_FRAME_TYPE(oxid) == FNIC_FRAME_TYPE_FDMI_RHBA) { + iport->fabric.fdmi_pending &= ~FDLS_FDMI_REG_HBA_PENDING; + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba); + } else { + iport->fabric.fdmi_pending &= ~FDLS_FDMI_RPA_PENDING; + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa); + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x: Received FDMI registration ack\n", + iport->fcid); + + if (!iport->fabric.fdmi_pending) { + timer_delete_sync(&iport->fabric.fdmi_timer); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x: Canceling FDMI timer\n", + iport->fcid); + } +} + +static void fdls_process_fdmi_abts_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t s_id; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + + s_id = ntoh24(FNIC_STD_GET_S_ID(fchdr)); + + if (!(s_id != FC_FID_MGMT_SERV)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp with invalid SID: 0x%x. Dropping frame", + s_id); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + + switch (FNIC_FRAME_TYPE(oxid)) { + case FNIC_FRAME_TYPE_FDMI_PLOGI: + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi); + break; + case FNIC_FRAME_TYPE_FDMI_RHBA: + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba); + break; + case FNIC_FRAME_TYPE_FDMI_RPA: + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa); + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp with invalid oxid: 0x%x. Dropping frame", + oxid); + break; + } + + timer_delete_sync(&iport->fabric.fdmi_timer); + iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING; + + fdls_send_fdmi_plogi(iport); +} + +static void +fdls_process_fabric_abts_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t s_id; + struct fc_std_abts_ba_acc *ba_acc = (struct fc_std_abts_ba_acc *)fchdr; + struct fc_std_abts_ba_rjt *ba_rjt; + uint32_t fabric_state = iport->fabric.state; + struct fnic *fnic = iport->fnic; + int frame_type; + uint16_t oxid; + + s_id = ntoh24(fchdr->fh_s_id); + ba_rjt = (struct fc_std_abts_ba_rjt *) fchdr; + + if (!((s_id == FC_FID_DIR_SERV) || (s_id == FC_FID_FLOGI) + || (s_id == FC_FID_FCTRL))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp with invalid SID: 0x%x. Dropping frame", + s_id); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp with invalid oxid: 0x%x. Dropping frame", + oxid); + return; + } + + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + + if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp BA_ACC for fabric_state: %d OX_ID: 0x%x", + fabric_state, be16_to_cpu(ba_acc->acc.ba_ox_id)); + } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "BA_RJT fs: %d OX_ID: 0x%x rc: 0x%x rce: 0x%x", + fabric_state, FNIC_STD_GET_OX_ID(&ba_rjt->fchdr), + ba_rjt->rjt.br_reason, ba_rjt->rjt.br_explan); + } + + frame_type = FNIC_FRAME_TYPE(oxid); + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + /* currently error handling/retry logic is same for ABTS BA_ACC & BA_RJT */ + switch (frame_type) { + case FNIC_FRAME_TYPE_FABRIC_FLOGI: + if (iport->fabric.retry_counter < iport->max_flogi_retries) + fdls_send_fabric_flogi(iport); + else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded max FLOGI retries"); + break; + case FNIC_FRAME_TYPE_FABRIC_LOGO: + if (iport->fabric.retry_counter < FABRIC_LOGO_MAX_RETRY) + fdls_send_fabric_logo(iport); + break; + case FNIC_FRAME_TYPE_FABRIC_PLOGI: + if (iport->fabric.retry_counter < iport->max_plogi_retries) + fdls_send_fabric_plogi(iport); + else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded max PLOGI retries"); + break; + case FNIC_FRAME_TYPE_FABRIC_RPN: + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) + fdls_send_rpn_id(iport); + else + /* go back to fabric Plogi */ + fnic_fdls_start_plogi(iport); + break; + case FNIC_FRAME_TYPE_FABRIC_SCR: + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) + fdls_send_scr(iport); + else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SCR exhausted retries. Start fabric PLOGI %p", + iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FNIC_FRAME_TYPE_FABRIC_RFT: + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) + fdls_send_register_fc4_types(iport); + else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFT exhausted retries. Start fabric PLOGI %p", + iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FNIC_FRAME_TYPE_FABRIC_RFF: + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) + fdls_send_register_fc4_features(iport); + else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFF exhausted retries. Start fabric PLOGI %p", + iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FNIC_FRAME_TYPE_FABRIC_GPN_FT: + if (iport->fabric.retry_counter <= FDLS_RETRY_COUNT) + fdls_send_gpn_ft(iport, fabric_state); + else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPN FT exhausted retries. Start fabric PLOGI %p", + iport); + break; + default: + /* + * We should not be here since we already validated rx oxid with + * our active_oxid_fabric_req + */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Invalid OXID/active oxid 0x%x\n", oxid); + WARN_ON(true); + return; + } +} + +static void +fdls_process_abts_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr) +{ + uint8_t *frame; + struct fc_std_abts_ba_acc *pba_acc; + uint32_t nport_id; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + struct fnic_tport_s *tport; + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_abts_ba_acc); + + nport_id = ntoh24(fchdr->fh_s_id); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abort from SID 0x%8x", nport_id); + + tport = fnic_find_tport_by_fcid(iport, nport_id); + if (tport) { + if (tport->active_oxid == oxid) { + tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED; + fdls_free_oxid(iport, oxid, &tport->active_oxid); + } + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate frame to send response for ABTS req", + iport->fcid); + return; + } + + pba_acc = (struct fc_std_abts_ba_acc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pba_acc = (struct fc_std_abts_ba_acc) { + .fchdr = {.fh_r_ctl = FC_RCTL_BA_ACC, + .fh_f_ctl = {FNIC_FCP_RSP_FCTL, 0, 0}}, + .acc = {.ba_low_seq_cnt = 0, .ba_high_seq_cnt = cpu_to_be16(0xFFFF)} + }; + + FNIC_STD_SET_S_ID(pba_acc->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(pba_acc->fchdr, fchdr->fh_s_id); + FNIC_STD_SET_OX_ID(pba_acc->fchdr, FNIC_STD_GET_OX_ID(fchdr)); + FNIC_STD_SET_RX_ID(pba_acc->fchdr, FNIC_STD_GET_RX_ID(fchdr)); + + pba_acc->acc.ba_rx_id = cpu_to_be16(FNIC_STD_GET_RX_ID(fchdr)); + pba_acc->acc.ba_ox_id = cpu_to_be16(FNIC_STD_GET_OX_ID(fchdr)); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send BA ACC with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_unsupported_els_req(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint8_t *frame; + struct fc_std_els_rjt_rsp *pls_rsp; + uint16_t oxid; + uint32_t d_id = ntoh24(fchdr->fh_d_id); + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_rjt_rsp); + + if (iport->fcid != d_id) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping unsupported ELS with illegal frame bits 0x%x\n", + d_id); + atomic64_inc(&iport->iport_stats.unsupported_frames_dropped); + return; + } + + if ((iport->state != FNIC_IPORT_STATE_READY) + && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping unsupported ELS request in iport state: %d", + iport->state); + atomic64_inc(&iport->iport_stats.unsupported_frames_dropped); + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send response to unsupported ELS request"); + return; + } + + pls_rsp = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_rjt_frame(frame, iport); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Process unsupported ELS request from SID: 0x%x", + iport->fcid, ntoh24(fchdr->fh_s_id)); + + /* We don't support this ELS request, send a reject */ + pls_rsp->rej.er_reason = 0x0B; + pls_rsp->rej.er_explan = 0x0; + pls_rsp->rej.er_vendor = 0x0; + + FNIC_STD_SET_S_ID(pls_rsp->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(pls_rsp->fchdr, fchdr->fh_s_id); + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(pls_rsp->fchdr, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_rls_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr) +{ + uint8_t *frame; + struct fc_std_rls_acc *prls_acc_rsp; + uint16_t oxid; + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_rls_acc); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Process RLS request %d", iport->fnic->fnic_num); + + if ((iport->state != FNIC_IPORT_STATE_READY) + && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received RLS req in iport state: %d. Dropping the frame.", + iport->state); + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RLS accept"); + return; + } + prls_acc_rsp = (struct fc_std_rls_acc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + + FNIC_STD_SET_S_ID(prls_acc_rsp->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(prls_acc_rsp->fchdr, fchdr->fh_s_id); + + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(prls_acc_rsp->fchdr, oxid); + FNIC_STD_SET_RX_ID(prls_acc_rsp->fchdr, FNIC_UNASSIGNED_RXID); + + FNIC_STD_SET_F_CTL(prls_acc_rsp->fchdr, FNIC_ELS_REP_FCTL << 16); + FNIC_STD_SET_R_CTL(prls_acc_rsp->fchdr, FC_RCTL_ELS_REP); + FNIC_STD_SET_TYPE(prls_acc_rsp->fchdr, FC_TYPE_ELS); + + prls_acc_rsp->els.rls_cmd = ELS_LS_ACC; + prls_acc_rsp->els.rls_lesb.lesb_link_fail = + cpu_to_be32(iport->fnic->link_down_cnt); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_els_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr, + uint32_t len) +{ + uint8_t *frame; + struct fc_std_els_acc_rsp *pels_acc; + uint16_t oxid; + uint8_t *fc_payload; + uint8_t type; + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET; + + fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header); + type = *fc_payload; + + if ((iport->state != FNIC_IPORT_STATE_READY) + && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping ELS frame type: 0x%x in iport state: %d", + type, iport->state); + return; + } + switch (type) { + case ELS_ECHO: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "sending LS_ACC for ECHO request %d\n", + iport->fnic->fnic_num); + break; + + case ELS_RRQ: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "sending LS_ACC for RRQ request %d\n", + iport->fnic->fnic_num); + break; + + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "sending LS_ACC for 0x%x ELS frame\n", type); + break; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send ELS response for 0x%x", + type); + return; + } + + if (type == ELS_ECHO) { + /* Brocade sends a longer payload, copy all frame back */ + memcpy(frame, fchdr, len); + } + + pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_acc_frame(frame, iport); + + FNIC_STD_SET_D_ID(pels_acc->fchdr, fchdr->fh_s_id); + + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid); + + if (type == ELS_ECHO) + frame_size += len; + else + frame_size += sizeof(struct fc_std_els_acc_rsp); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_tgt_abts_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t s_id; + struct fnic_tport_s *tport; + uint32_t tport_state; + struct fc_std_abts_ba_acc *ba_acc; + struct fc_std_abts_ba_rjt *ba_rjt; + uint16_t oxid; + struct fnic *fnic = iport->fnic; + int frame_type; + + s_id = ntoh24(fchdr->fh_s_id); + ba_acc = (struct fc_std_abts_ba_acc *)fchdr; + ba_rjt = (struct fc_std_abts_ba_rjt *)fchdr; + + tport = fnic_find_tport_by_fcid(iport, s_id); + if (!tport) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received tgt abts rsp with invalid SID: 0x%x", s_id); + return; + } + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport 0x%p Canceling fabric disc timer\n", tport); + fnic_del_tport_timer_sync(fnic, tport); + } + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received tgt abts rsp in iport state(%d). Dropping.", + iport->state); + return; + } + tport->timer_pending = 0; + tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED; + tport_state = tport->state; + oxid = FNIC_STD_GET_OX_ID(fchdr); + + /*This abort rsp is for ADISC */ + frame_type = FNIC_FRAME_TYPE(oxid); + switch (frame_type) { + case FNIC_FRAME_TYPE_TGT_ADISC: + if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "OX_ID: 0x%x tgt_fcid: 0x%x rcvd tgt adisc abts resp BA_ACC", + be16_to_cpu(ba_acc->acc.ba_ox_id), + tport->fcid); + } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "ADISC BA_RJT rcvd tport_fcid: 0x%x tport_state: %d ", + tport->fcid, tport_state); + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "reason code: 0x%x reason code explanation:0x%x ", + ba_rjt->rjt.br_reason, + ba_rjt->rjt.br_explan); + } + if ((tport->retry_counter < FDLS_RETRY_COUNT) + && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) { + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_send_tgt_adisc(iport, tport); + return; + } + fdls_free_oxid(iport, oxid, &tport->active_oxid); + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "ADISC not responding. Deleting target port: 0x%x", + tport->fcid); + fdls_delete_tport(iport, tport); + /* Restart discovery of targets */ + if ((iport->state == FNIC_IPORT_STATE_READY) + && (iport->fabric.state != FDLS_STATE_SEND_GPNFT) + && (iport->fabric.state != FDLS_STATE_RSCN_GPN_FT)) { + fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT); + } + break; + case FNIC_FRAME_TYPE_TGT_PLOGI: + if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received tgt PLOGI abts response BA_ACC tgt_fcid: 0x%x", + tport->fcid); + } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI BA_RJT received for tport_fcid: 0x%x OX_ID: 0x%x", + tport->fcid, FNIC_STD_GET_OX_ID(fchdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "reason code: 0x%x reason code explanation: 0x%x", + ba_rjt->rjt.br_reason, + ba_rjt->rjt.br_explan); + } + if ((tport->retry_counter < iport->max_plogi_retries) + && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) { + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_send_tgt_plogi(iport, tport); + return; + } + + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_delete_tport(iport, tport); + /* Restart discovery of targets */ + if ((iport->state == FNIC_IPORT_STATE_READY) + && (iport->fabric.state != FDLS_STATE_SEND_GPNFT) + && (iport->fabric.state != FDLS_STATE_RSCN_GPN_FT)) { + fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT); + } + break; + case FNIC_FRAME_TYPE_TGT_PRLI: + if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Received tgt PRLI abts response BA_ACC", + tport->fcid); + } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI BA_RJT received for tport_fcid: 0x%x OX_ID: 0x%x ", + tport->fcid, FNIC_STD_GET_OX_ID(fchdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "reason code: 0x%x reason code explanation: 0x%x", + ba_rjt->rjt.br_reason, + ba_rjt->rjt.br_explan); + } + if ((tport->retry_counter < FDLS_RETRY_COUNT) + && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) { + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_send_tgt_prli(iport, tport); + return; + } + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_send_tgt_plogi(iport, tport); /* go back to plogi */ + fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI); + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received ABTS response for unknown frame %p", iport); + break; + } + +} + +static void +fdls_process_plogi_req(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint8_t *frame; + struct fc_std_els_rjt_rsp *pplogi_rsp; + uint16_t oxid; + uint32_t d_id = ntoh24(fchdr->fh_d_id); + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_rjt_rsp); + + if (iport->fcid != d_id) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received PLOGI with illegal frame bits. Dropping frame from 0x%x", + d_id); + return; + } + + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received PLOGI request in iport state: %d Dropping frame", + iport->state); + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send response to PLOGI request"); + return; + } + + pplogi_rsp = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_rjt_frame(frame, iport); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Process PLOGI request from SID: 0x%x", + iport->fcid, ntoh24(fchdr->fh_s_id)); + + /* We don't support PLOGI request, send a reject */ + pplogi_rsp->rej.er_reason = 0x0B; + pplogi_rsp->rej.er_explan = 0x0; + pplogi_rsp->rej.er_vendor = 0x0; + + FNIC_STD_SET_S_ID(pplogi_rsp->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(pplogi_rsp->fchdr, fchdr->fh_s_id); + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(pplogi_rsp->fchdr, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_logo_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr) +{ + struct fc_std_logo *logo = (struct fc_std_logo *)fchdr; + uint32_t nport_id; + uint64_t nport_name; + struct fnic_tport_s *tport; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + + nport_id = ntoh24(logo->els.fl_n_port_id); + nport_name = be64_to_cpu(logo->els.fl_n_port_wwn); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Process LOGO request from fcid: 0x%x", nport_id); + + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Dropping LOGO req from 0x%x in iport state: %d", + nport_id, iport->state); + return; + } + + tport = fnic_find_tport_by_fcid(iport, nport_id); + + if (!tport) { + /* We are not logged in with the nport, log and drop... */ + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received LOGO from an nport not logged in: 0x%x(0x%llx)", + nport_id, nport_name); + return; + } + if (tport->fcid != nport_id) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received LOGO with invalid target port fcid: 0x%x(0x%llx)", + nport_id, nport_name); + return; + } + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + tport->timer_pending = 0; + } + + /* got a logo in response to adisc to a target which has logged out */ + if (tport->state == FDLS_TGT_STATE_ADISC) { + tport->retry_counter = 0; + oxid = tport->active_oxid; + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_delete_tport(iport, tport); + fdls_send_logo_resp(iport, &logo->fchdr); + if ((iport->state == FNIC_IPORT_STATE_READY) + && (fdls_get_state(&iport->fabric) != FDLS_STATE_SEND_GPNFT) + && (fdls_get_state(&iport->fabric) != FDLS_STATE_RSCN_GPN_FT)) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Sending GPNFT in response to LOGO from Target:0x%x", + nport_id); + fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT); + return; + } + } else { + fdls_delete_tport(iport, tport); + } + if (iport->state == FNIC_IPORT_STATE_READY) { + fdls_send_logo_resp(iport, &logo->fchdr); + if ((fdls_get_state(&iport->fabric) != FDLS_STATE_SEND_GPNFT) && + (fdls_get_state(&iport->fabric) != FDLS_STATE_RSCN_GPN_FT)) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Sending GPNFT in response to LOGO from Target:0x%x", + nport_id); + fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT); + } + } +} + +static void +fdls_process_rscn(struct fnic_iport_s *iport, struct fc_frame_header *fchdr) +{ + struct fc_std_rscn *rscn; + struct fc_els_rscn_page *rscn_port = NULL; + int num_ports; + struct fnic_tport_s *tport, *next; + uint32_t nport_id; + uint8_t fcid[3]; + int newports = 0; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fnic *fnic = iport->fnic; + int rscn_type = NOT_PC_RSCN; + uint32_t sid = ntoh24(fchdr->fh_s_id); + unsigned long reset_fnic_list_lock_flags = 0; + uint16_t rscn_payload_len; + + atomic64_inc(&iport->iport_stats.num_rscns); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process RSCN %p", iport); + + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS RSCN received in state(%d). Dropping", + fdls_get_state(fdls)); + return; + } + + rscn = (struct fc_std_rscn *)fchdr; + rscn_payload_len = be16_to_cpu(rscn->els.rscn_plen); + + /* frame validation */ + if ((rscn_payload_len % 4 != 0) || (rscn_payload_len < 8) + || (rscn_payload_len > 1024) + || (rscn->els.rscn_page_len != 4)) { + num_ports = 0; + if ((rscn_payload_len == 0xFFFF) + && (sid == FC_FID_FCTRL)) { + rscn_type = PC_RSCN; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "pcrscn: PCRSCN received. sid: 0x%x payload len: 0x%x", + sid, rscn_payload_len); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN payload_len: 0x%x page_len: 0x%x", + rscn_payload_len, rscn->els.rscn_page_len); + /* if this happens then we need to send ADISC to all the tports. */ + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + if (tport->state == FDLS_TGT_STATE_READY) + tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN for port id: 0x%x", tport->fcid); + } + } /* end else */ + } else { + num_ports = (rscn_payload_len - 4) / rscn->els.rscn_page_len; + rscn_port = (struct fc_els_rscn_page *)(rscn + 1); + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN received for num_ports: %d payload_len: %d page_len: %d ", + num_ports, rscn_payload_len, rscn->els.rscn_page_len); + + /* + * RSCN have at least one Port_ID page , but may not have any port_id + * in it. If no port_id is specified in the Port_ID page , we send + * ADISC to all the tports + */ + + while (num_ports) { + + memcpy(fcid, rscn_port->rscn_fid, 3); + + nport_id = ntoh24(fcid); + rscn_port++; + num_ports--; + /* if this happens then we need to send ADISC to all the tports. */ + if (nport_id == 0) { + list_for_each_entry_safe(tport, next, &iport->tport_list, + links) { + if (tport->state == FDLS_TGT_STATE_READY) + tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN for port id: 0x%x", tport->fcid); + } + break; + } + tport = fnic_find_tport_by_fcid(iport, nport_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN port id list: 0x%x", nport_id); + + if (!tport) { + newports++; + continue; + } + if (tport->state == FDLS_TGT_STATE_READY) + tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC; + } + + if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON && + rscn_type == PC_RSCN && fnic->role == FNIC_ROLE_FCP_INITIATOR) { + + if (fnic->pc_rscn_handling_status == PC_RSCN_HANDLING_IN_PROGRESS) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PCRSCN handling already in progress. Skip host reset: %d", + iport->fnic->fnic_num); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Processing PCRSCN. Queuing fnic for host reset: %d", + iport->fnic->fnic_num); + fnic->pc_rscn_handling_status = PC_RSCN_HANDLING_IN_PROGRESS; + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + + spin_lock_irqsave(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); + list_add_tail(&fnic->links, &reset_fnic_list); + spin_unlock_irqrestore(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); + + queue_work(reset_fnic_work_queue, &reset_fnic_work); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process RSCN sending GPN_FT: newports: %d", newports); + fdls_send_gpn_ft(iport, FDLS_STATE_RSCN_GPN_FT); + fdls_send_rscn_resp(iport, fchdr); + } +} + +void fnic_fdls_disc_start(struct fnic_iport_s *iport) +{ + struct fnic *fnic = iport->fnic; + + fc_host_fabric_name(iport->fnic->host) = 0; + fc_host_post_event(iport->fnic->host, fc_get_event_number(), + FCH_EVT_LIPRESET, 0); + + if (!iport->usefip) { + if (iport->flags & FNIC_FIRST_LINK_UP) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + fnic_scsi_fcpio_reset(iport->fnic); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + iport->flags &= ~FNIC_FIRST_LINK_UP; + } + fnic_fdls_start_flogi(iport); + } else + fnic_fdls_start_plogi(iport); +} + +static void +fdls_process_adisc_req(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fc_std_els_adisc *padisc_acc; + struct fc_std_els_adisc *adisc_req = (struct fc_std_els_adisc *)fchdr; + uint64_t frame_wwnn; + uint64_t frame_wwpn; + uint32_t tgt_fcid; + struct fnic_tport_s *tport; + uint8_t *fcid; + uint8_t *rjt_frame; + uint8_t *acc_frame; + struct fc_std_els_rjt_rsp *prjts_rsp; + uint16_t oxid; + struct fnic *fnic = iport->fnic; + uint16_t rjt_frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_rjt_rsp); + uint16_t acc_frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_adisc); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Process ADISC request %d", iport->fnic->fnic_num); + + fcid = FNIC_STD_GET_S_ID(fchdr); + tgt_fcid = ntoh24(fcid); + tport = fnic_find_tport_by_fcid(iport, tgt_fcid); + if (!tport) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport for fcid: 0x%x not found. Dropping ADISC req.", + tgt_fcid); + return; + } + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Dropping ADISC req from fcid: 0x%x in iport state: %d", + tgt_fcid, iport->state); + return; + } + + frame_wwnn = be64_to_cpu(adisc_req->els.adisc_wwnn); + frame_wwpn = be64_to_cpu(adisc_req->els.adisc_wwpn); + + if ((frame_wwnn != tport->wwnn) || (frame_wwpn != tport->wwpn)) { + /* send reject */ + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "ADISC req from fcid: 0x%x mismatch wwpn: 0x%llx wwnn: 0x%llx", + tgt_fcid, frame_wwpn, frame_wwnn); + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "local tport wwpn: 0x%llx wwnn: 0x%llx. Sending RJT", + tport->wwpn, tport->wwnn); + + rjt_frame = fdls_alloc_frame(iport); + if (rjt_frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate rjt_frame to send response to ADISC request"); + return; + } + + prjts_rsp = (struct fc_std_els_rjt_rsp *) (rjt_frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_rjt_frame(rjt_frame, iport); + + prjts_rsp->rej.er_reason = 0x03; /* logical error */ + prjts_rsp->rej.er_explan = 0x1E; /* N_port login required */ + prjts_rsp->rej.er_vendor = 0x0; + + FNIC_STD_SET_S_ID(prjts_rsp->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(prjts_rsp->fchdr, fchdr->fh_s_id); + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(prjts_rsp->fchdr, oxid); + + fnic_send_fcoe_frame(iport, rjt_frame, rjt_frame_size); + return; + } + + acc_frame = fdls_alloc_frame(iport); + if (acc_frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send ADISC accept"); + return; + } + + padisc_acc = (struct fc_std_els_adisc *) (acc_frame + FNIC_ETH_FCOE_HDRS_OFFSET); + + FNIC_STD_SET_S_ID(padisc_acc->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(padisc_acc->fchdr, fchdr->fh_s_id); + + FNIC_STD_SET_F_CTL(padisc_acc->fchdr, FNIC_ELS_REP_FCTL << 16); + FNIC_STD_SET_R_CTL(padisc_acc->fchdr, FC_RCTL_ELS_REP); + FNIC_STD_SET_TYPE(padisc_acc->fchdr, FC_TYPE_ELS); + + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(padisc_acc->fchdr, oxid); + FNIC_STD_SET_RX_ID(padisc_acc->fchdr, FNIC_UNASSIGNED_RXID); + + padisc_acc->els.adisc_cmd = ELS_LS_ACC; + + FNIC_STD_SET_NPORT_NAME(&padisc_acc->els.adisc_wwpn, + iport->wwpn); + FNIC_STD_SET_NODE_NAME(&padisc_acc->els.adisc_wwnn, + iport->wwnn); + memcpy(padisc_acc->els.adisc_port_id, fchdr->fh_d_id, 3); + + fnic_send_fcoe_frame(iport, acc_frame, acc_frame_size); +} + +/* + * Performs a validation for all FCOE frames and return the frame type + */ +int +fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint8_t type; + uint8_t *fc_payload; + uint16_t oxid; + uint32_t s_id; + uint32_t d_id; + struct fnic *fnic = iport->fnic; + struct fnic_fdls_fabric_s *fabric = &iport->fabric; + int oxid_frame_type; + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header); + type = *fc_payload; + s_id = ntoh24(fchdr->fh_s_id); + d_id = ntoh24(fchdr->fh_d_id); + + /* some common validation */ + if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) { + if (iport->fcid != d_id || (!FNIC_FC_FRAME_CS_CTL(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "invalid frame received. Dropping frame"); + return -1; + } + } + + /* BLS ABTS response */ + if ((fchdr->fh_r_ctl == FC_RCTL_BA_ACC) + || (fchdr->fh_r_ctl == FC_RCTL_BA_RJT)) { + if (!(FNIC_FC_FRAME_TYPE_BLS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received ABTS invalid frame. Dropping frame"); + return -1; + + } + if (fdls_is_oxid_fabric_req(oxid)) { + if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unexpected ABTS RSP(oxid:0x%x) from 0x%x. Dropping frame", + oxid, s_id); + return -1; + } + return FNIC_FABRIC_BLS_ABTS_RSP; + } else if (fdls_is_oxid_fdmi_req(oxid)) { + return FNIC_FDMI_BLS_ABTS_RSP; + } else if (fdls_is_oxid_tgt_req(oxid)) { + return FNIC_TPORT_BLS_ABTS_RSP; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received ABTS rsp with unknown oxid(0x%x) from 0x%x. Dropping frame", + oxid, s_id); + return -1; + } + + /* BLS ABTS Req */ + if ((fchdr->fh_r_ctl == FC_RCTL_BA_ABTS) + && (FNIC_FC_FRAME_TYPE_BLS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Receiving Abort Request from s_id: 0x%x", s_id); + return FNIC_BLS_ABTS_REQ; + } + + /* unsolicited requests frames */ + if (FNIC_FC_FRAME_UNSOLICITED(fchdr)) { + switch (type) { + case ELS_LOGO: + if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr)) + || (!FNIC_FC_FRAME_UNSOLICITED(fchdr)) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received LOGO invalid frame. Dropping frame"); + return -1; + } + return FNIC_ELS_LOGO_REQ; + case ELS_RSCN: + if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr)) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr)) + || (!FNIC_FC_FRAME_UNSOLICITED(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received RSCN invalid FCTL. Dropping frame"); + return -1; + } + if (s_id != FC_FID_FCTRL) + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received RSCN from target FCTL: 0x%x type: 0x%x s_id: 0x%x.", + fchdr->fh_f_ctl[0], fchdr->fh_type, s_id); + return FNIC_ELS_RSCN_REQ; + case ELS_PLOGI: + return FNIC_ELS_PLOGI_REQ; + case ELS_ECHO: + return FNIC_ELS_ECHO_REQ; + case ELS_ADISC: + return FNIC_ELS_ADISC; + case ELS_RLS: + return FNIC_ELS_RLS; + case ELS_RRQ: + return FNIC_ELS_RRQ; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unsupported frame (type:0x%02x) from fcid: 0x%x", + type, s_id); + return FNIC_ELS_UNSUPPORTED_REQ; + } + } + + /* solicited response from fabric or target */ + oxid_frame_type = FNIC_FRAME_TYPE(oxid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "oxid frame code: 0x%x, oxid: 0x%x\n", oxid_frame_type, oxid); + switch (oxid_frame_type) { + case FNIC_FRAME_TYPE_FABRIC_FLOGI: + if (type == ELS_LS_ACC) { + if ((s_id != FC_FID_FLOGI) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + } + return FNIC_FABRIC_FLOGI_RSP; + + case FNIC_FRAME_TYPE_FABRIC_PLOGI: + if (type == ELS_LS_ACC) { + if ((s_id != FC_FID_DIR_SERV) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + } + return FNIC_FABRIC_PLOGI_RSP; + + case FNIC_FRAME_TYPE_FABRIC_SCR: + if (type == ELS_LS_ACC) { + if ((s_id != FC_FID_FCTRL) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + } + return FNIC_FABRIC_SCR_RSP; + + case FNIC_FRAME_TYPE_FABRIC_RPN: + if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + return FNIC_FABRIC_RPN_RSP; + + case FNIC_FRAME_TYPE_FABRIC_RFT: + if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + return FNIC_FABRIC_RFT_RSP; + + case FNIC_FRAME_TYPE_FABRIC_RFF: + if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + return FNIC_FABRIC_RFF_RSP; + + case FNIC_FRAME_TYPE_FABRIC_GPN_FT: + if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + return FNIC_FABRIC_GPN_FT_RSP; + + case FNIC_FRAME_TYPE_FABRIC_LOGO: + return FNIC_FABRIC_LOGO_RSP; + case FNIC_FRAME_TYPE_FDMI_PLOGI: + return FNIC_FDMI_PLOGI_RSP; + case FNIC_FRAME_TYPE_FDMI_RHBA: + return FNIC_FDMI_REG_HBA_RSP; + case FNIC_FRAME_TYPE_FDMI_RPA: + return FNIC_FDMI_RPA_RSP; + case FNIC_FRAME_TYPE_TGT_PLOGI: + return FNIC_TPORT_PLOGI_RSP; + case FNIC_FRAME_TYPE_TGT_PRLI: + return FNIC_TPORT_PRLI_RSP; + case FNIC_FRAME_TYPE_TGT_ADISC: + return FNIC_TPORT_ADISC_RSP; + case FNIC_FRAME_TYPE_TGT_LOGO: + if (!FNIC_FC_FRAME_TYPE_ELS(fchdr)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping Unknown frame in tport solicited exchange range type: 0x%x.", + fchdr->fh_type); + return -1; + } + return FNIC_TPORT_LOGO_RSP; + default: + /* Drop the Rx frame and log/stats it */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Solicited response: unknown OXID: 0x%x", oxid); + return -1; + } + + return -1; +} + +void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame, + int len, int fchdr_offset) +{ + struct fc_frame_header *fchdr; + uint32_t s_id = 0; + uint32_t d_id = 0; + struct fnic *fnic = iport->fnic; + int frame_type; + + fchdr = (struct fc_frame_header *) ((uint8_t *) rx_frame + fchdr_offset); + s_id = ntoh24(fchdr->fh_s_id); + d_id = ntoh24(fchdr->fh_d_id); + + fnic_debug_dump_fc_frame(fnic, fchdr, len, "Incoming"); + + frame_type = + fnic_fdls_validate_and_get_frame_type(iport, fchdr); + + /*if we are in flogo drop everything else */ + if (iport->fabric.state == FDLS_STATE_FABRIC_LOGO && + frame_type != FNIC_FABRIC_LOGO_RSP) + return; + + switch (frame_type) { + case FNIC_FABRIC_FLOGI_RSP: + fdls_process_flogi_rsp(iport, fchdr, rx_frame); + break; + case FNIC_FABRIC_PLOGI_RSP: + fdls_process_fabric_plogi_rsp(iport, fchdr); + break; + case FNIC_FDMI_PLOGI_RSP: + fdls_process_fdmi_plogi_rsp(iport, fchdr); + break; + case FNIC_FABRIC_RPN_RSP: + fdls_process_rpn_id_rsp(iport, fchdr); + break; + case FNIC_FABRIC_RFT_RSP: + fdls_process_rft_id_rsp(iport, fchdr); + break; + case FNIC_FABRIC_RFF_RSP: + fdls_process_rff_id_rsp(iport, fchdr); + break; + case FNIC_FABRIC_SCR_RSP: + fdls_process_scr_rsp(iport, fchdr); + break; + case FNIC_FABRIC_GPN_FT_RSP: + fdls_process_gpn_ft_rsp(iport, fchdr, len); + break; + case FNIC_TPORT_PLOGI_RSP: + fdls_process_tgt_plogi_rsp(iport, fchdr); + break; + case FNIC_TPORT_PRLI_RSP: + fdls_process_tgt_prli_rsp(iport, fchdr); + break; + case FNIC_TPORT_ADISC_RSP: + fdls_process_tgt_adisc_rsp(iport, fchdr); + break; + case FNIC_TPORT_BLS_ABTS_RSP: + fdls_process_tgt_abts_rsp(iport, fchdr); + break; + case FNIC_TPORT_LOGO_RSP: + /* Logo response from tgt which we have deleted */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Logo response from tgt: 0x%x", + ntoh24(fchdr->fh_s_id)); + break; + case FNIC_FABRIC_LOGO_RSP: + fdls_process_fabric_logo_rsp(iport, fchdr); + break; + case FNIC_FABRIC_BLS_ABTS_RSP: + fdls_process_fabric_abts_rsp(iport, fchdr); + break; + case FNIC_FDMI_BLS_ABTS_RSP: + fdls_process_fdmi_abts_rsp(iport, fchdr); + break; + case FNIC_BLS_ABTS_REQ: + fdls_process_abts_req(iport, fchdr); + break; + case FNIC_ELS_UNSUPPORTED_REQ: + fdls_process_unsupported_els_req(iport, fchdr); + break; + case FNIC_ELS_PLOGI_REQ: + fdls_process_plogi_req(iport, fchdr); + break; + case FNIC_ELS_RSCN_REQ: + fdls_process_rscn(iport, fchdr); + break; + case FNIC_ELS_LOGO_REQ: + fdls_process_logo_req(iport, fchdr); + break; + case FNIC_ELS_RRQ: + case FNIC_ELS_ECHO_REQ: + fdls_process_els_req(iport, fchdr, len); + break; + case FNIC_ELS_ADISC: + fdls_process_adisc_req(iport, fchdr); + break; + case FNIC_ELS_RLS: + fdls_process_rls_req(iport, fchdr); + break; + case FNIC_FDMI_REG_HBA_RSP: + case FNIC_FDMI_RPA_RSP: + fdls_process_fdmi_reg_ack(iport, fchdr, frame_type); + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "s_id: 0x%x d_did: 0x%x", s_id, d_id); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown FCoE frame of len: %d. Dropping frame", len); + break; + } +} + +void fnic_fdls_disc_init(struct fnic_iport_s *iport) +{ + fdls_reset_oxid_pool(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_INIT); +} + +void fnic_fdls_link_down(struct fnic_iport_s *iport) +{ + struct fnic_tport_s *tport, *next; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS processing link down", iport->fcid); + + fdls_set_state((&iport->fabric), FDLS_STATE_LINKDOWN); + iport->fabric.flags = 0; + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + fnic_scsi_fcpio_reset(iport->fnic); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "removing rport: 0x%x", tport->fcid); + fdls_delete_tport(iport, tport); + } + + if ((fnic_fdmi_support == 1) && (iport->fabric.fdmi_pending > 0)) { + timer_delete_sync(&iport->fabric.fdmi_timer); + iport->fabric.fdmi_pending = 0; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS finish processing link down", iport->fcid); +} diff --git a/drivers/scsi/fnic/fdls_fc.h b/drivers/scsi/fnic/fdls_fc.h new file mode 100644 index 000000000000..012f43afd083 --- /dev/null +++ b/drivers/scsi/fnic/fdls_fc.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _FDLS_FC_H_ +#define _FDLS_FC_H_ + +/* This file contains the declarations for FC fabric services + * and target discovery + * + * Request and Response for + * 1. FLOGI + * 2. PLOGI to Fabric Controller + * 3. GPN_ID, GPN_FT + * 4. RSCN + * 5. PLOGI to Target + * 6. PRLI to Target + */ + +#include <scsi/scsi.h> +#include <scsi/fc/fc_els.h> +#include <uapi/scsi/fc/fc_fs.h> +#include <uapi/scsi/fc/fc_ns.h> +#include <uapi/scsi/fc/fc_gs.h> +#include <uapi/linux/if_ether.h> +#include <scsi/fc/fc_ms.h> +#include <linux/minmax.h> +#include <linux/if_ether.h> +#include <scsi/fc/fc_encaps.h> +#include <scsi/fc/fc_fcoe.h> + +#define FDLS_MIN_FRAMES (32) +#define FDLS_MIN_FRAME_ELEM (4) +#define FNIC_FCP_SP_RD_XRDY_DIS 0x00000002 +#define FNIC_FCP_SP_TARGET 0x00000010 +#define FNIC_FCP_SP_INITIATOR 0x00000020 +#define FNIC_FCP_SP_CONF_CMPL 0x00000080 +#define FNIC_FCP_SP_RETRY 0x00000100 + +#define FNIC_FC_CONCUR_SEQS (0xFF) +#define FNIC_FC_RO_INFO (0x1F) + +/* Little Endian */ +#define FNIC_UNASSIGNED_OXID (0xffff) +#define FNIC_UNASSIGNED_RXID (0xffff) +#define FNIC_ELS_REQ_FCTL (0x000029) +#define FNIC_ELS_REP_FCTL (0x000099) + +#define FNIC_FCP_RSP_FCTL (0x000099) +#define FNIC_REQ_ABTS_FCTL (0x000009) + +#define FNIC_FC_PH_VER_HI (0x20) +#define FNIC_FC_PH_VER_LO (0x20) +#define FNIC_FC_PH_VER (0x2020) +#define FNIC_FC_B2B_CREDIT (0x0A) +#define FNIC_FC_B2B_RDF_SZ (0x0800) + +#define FNIC_LOGI_RDF_SIZE(_logi) ((_logi).fl_csp.sp_bb_data) +#define FNIC_LOGI_R_A_TOV(_logi) ((_logi).fl_csp.sp_r_a_tov) +#define FNIC_LOGI_E_D_TOV(_logi) ((_logi).fl_csp.sp_e_d_tov) +#define FNIC_LOGI_FEATURES(_logi) (be16_to_cpu((_logi).fl_csp.sp_features)) +#define FNIC_LOGI_PORT_NAME(_logi) ((_logi).fl_wwpn) +#define FNIC_LOGI_NODE_NAME(_logi) ((_logi).fl_wwnn) + +#define FNIC_LOGI_SET_RDF_SIZE(_logi, _rdf_size) \ + (FNIC_LOGI_RDF_SIZE(_logi) = cpu_to_be16(_rdf_size)) +#define FNIC_LOGI_SET_E_D_TOV(_logi, _e_d_tov) \ + (FNIC_LOGI_E_D_TOV(_logi) = cpu_to_be32(_e_d_tov)) +#define FNIC_LOGI_SET_R_A_TOV(_logi, _r_a_tov) \ + (FNIC_LOGI_R_A_TOV(_logi) = cpu_to_be32(_r_a_tov)) + +#define FNIC_STD_SET_S_ID(_fchdr, _sid) memcpy((_fchdr).fh_s_id, _sid, 3) +#define FNIC_STD_SET_D_ID(_fchdr, _did) memcpy((_fchdr).fh_d_id, _did, 3) +#define FNIC_STD_SET_OX_ID(_fchdr, _oxid) ((_fchdr).fh_ox_id = cpu_to_be16(_oxid)) +#define FNIC_STD_SET_RX_ID(_fchdr, _rxid) ((_fchdr).fh_rx_id = cpu_to_be16(_rxid)) + +#define FNIC_STD_SET_R_CTL(_fchdr, _rctl) ((_fchdr).fh_r_ctl = _rctl) +#define FNIC_STD_SET_TYPE(_fchdr, _type) ((_fchdr).fh_type = _type) +#define FNIC_STD_SET_F_CTL(_fchdr, _fctl) \ + put_unaligned_be24(_fctl, &((_fchdr).fh_f_ctl)) + +#define FNIC_STD_SET_NPORT_NAME(_ptr, _wwpn) put_unaligned_be64(_wwpn, _ptr) +#define FNIC_STD_SET_NODE_NAME(_ptr, _wwnn) put_unaligned_be64(_wwnn, _ptr) +#define FNIC_STD_SET_PORT_ID(__req, __portid) \ + memcpy(__req.fr_fid.fp_fid, __portid, 3) +#define FNIC_STD_SET_PORT_NAME(_req, _pName) \ + (put_unaligned_be64(_pName, &_req.fr_wwn)) + +#define FNIC_STD_GET_OX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_ox_id)) +#define FNIC_STD_GET_RX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_rx_id)) +#define FNIC_STD_GET_S_ID(_fchdr) ((_fchdr)->fh_s_id) +#define FNIC_STD_GET_D_ID(_fchdr) ((_fchdr)->fh_d_id) +#define FNIC_STD_GET_TYPE(_fchdr) ((_fchdr)->fh_type) +#define FNIC_STD_GET_F_CTL(_fchdr) ((_fchdr)->fh_f_ctl) +#define FNIC_STD_GET_R_CTL(_fchdr) ((_fchdr)->fh_r_ctl) + +#define FNIC_STD_GET_FC_CT_CMD(__fcct_hdr) (be16_to_cpu(__fcct_hdr->ct_cmd)) + +#define FNIC_FCOE_MAX_FRAME_SZ (2048) +#define FNIC_FCOE_MIN_FRAME_SZ (280) +#define FNIC_FC_MAX_PAYLOAD_LEN (2048) +#define FNIC_MIN_DATA_FIELD_SIZE (256) + +#define FNIC_FC_EDTOV_NSEC (0x400) +#define FNIC_NSEC_TO_MSEC (0x1000000) +#define FCP_PRLI_FUNC_TARGET (0x0010) + +#define FNIC_FC_R_CTL_SOLICITED_DATA (0x21) +#define FNIC_FC_F_CTL_LAST_END_SEQ (0x98) +#define FNIC_FC_F_CTL_LAST_END_SEQ_INT (0x99) +#define FNIC_FC_F_CTL_FIRST_LAST_SEQINIT (0x29) +#define FNIC_FC_R_CTL_FC4_SCTL (0x03) +#define FNIC_FC_CS_CTL (0x00) + +#define FNIC_FC_FRAME_UNSOLICITED(_fchdr) \ + (_fchdr->fh_r_ctl == FC_RCTL_ELS_REQ) +#define FNIC_FC_FRAME_SOLICITED_DATA(_fchdr) \ + (_fchdr->fh_r_ctl == FNIC_FC_R_CTL_SOLICITED_DATA) +#define FNIC_FC_FRAME_SOLICITED_CTRL_REPLY(_fchdr) \ + (_fchdr->fh_r_ctl == FC_RCTL_ELS_REP) +#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ(_fchdr) \ + (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ) +#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ_INT(_fchdr) \ + (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ_INT) +#define FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(_fchdr) \ + (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_FIRST_LAST_SEQINIT) +#define FNIC_FC_FRAME_FC4_SCTL(_fchdr) \ + (_fchdr->fh_r_ctl == FNIC_FC_R_CTL_FC4_SCTL) +#define FNIC_FC_FRAME_TYPE_BLS(_fchdr) (_fchdr->fh_type == FC_TYPE_BLS) +#define FNIC_FC_FRAME_TYPE_ELS(_fchdr) (_fchdr->fh_type == FC_TYPE_ELS) +#define FNIC_FC_FRAME_TYPE_FC_GS(_fchdr) (_fchdr->fh_type == FC_TYPE_CT) +#define FNIC_FC_FRAME_CS_CTL(_fchdr) (_fchdr->fh_cs_ctl == FNIC_FC_CS_CTL) + +#define FNIC_FC_C3_RDF (0xfff) +#define FNIC_FC_PLOGI_RSP_RDF(_plogi_rsp) \ + (min(_plogi_rsp->u.csp_plogi.b2b_rdf_size, \ + (_plogi_rsp->spc3[4] & FNIC_FC_C3_RDF))) +#define FNIC_FC_PLOGI_RSP_CONCUR_SEQ(_plogi_rsp) \ + (min((uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_csp.sp_tot_seq)), \ + (uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_cssp[2].cp_con_seq) & 0xff))) + +/* FLOGI/PLOGI struct */ +struct fc_std_flogi { + struct fc_frame_header fchdr; + struct fc_els_flogi els; +} __packed; + +struct fc_std_els_acc_rsp { + struct fc_frame_header fchdr; + struct fc_els_ls_acc acc; +} __packed; + +struct fc_std_els_rjt_rsp { + struct fc_frame_header fchdr; + struct fc_els_ls_rjt rej; +} __packed; + +struct fc_std_els_adisc { + struct fc_frame_header fchdr; + struct fc_els_adisc els; +} __packed; + +struct fc_std_rls_acc { + struct fc_frame_header fchdr; + struct fc_els_rls_resp els; +} __packed; + +struct fc_std_abts_ba_acc { + struct fc_frame_header fchdr; + struct fc_ba_acc acc; +} __packed; + +struct fc_std_abts_ba_rjt { + struct fc_frame_header fchdr; + struct fc_ba_rjt rjt; +} __packed; + +struct fc_std_els_prli { + struct fc_frame_header fchdr; + struct fc_els_prli els_prli; + struct fc_els_spp sp; +} __packed; + +struct fc_std_rpn_id { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_ns_rn_id rpn_id; +} __packed; + +struct fc_std_fdmi_rhba { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_fdmi_rhba rhba; +} __packed; + +struct fc_std_fdmi_rpa { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_fdmi_rpa rpa; +} __packed; + +struct fc_std_rft_id { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_ns_rft_id rft_id; +} __packed; + +struct fc_std_rff_id { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_ns_rff_id rff_id; +} __packed; + +struct fc_std_gpn_ft { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_ns_gid_ft gpn_ft; +} __packed; + +/* Accept CT_IU for GPN_FT */ +struct fc_gpn_ft_rsp_iu { + uint8_t ctrl; + uint8_t fcid[3]; + uint32_t rsvd; + __be64 wwpn; +} __packed; + +struct fc_std_rls { + struct fc_frame_header fchdr; + struct fc_els_rls els; +} __packed; + +struct fc_std_scr { + struct fc_frame_header fchdr; + struct fc_els_scr scr; +} __packed; + +struct fc_std_rscn { + struct fc_frame_header fchdr; + struct fc_els_rscn els; +} __packed; + +struct fc_std_logo { + struct fc_frame_header fchdr; + struct fc_els_logo els; +} __packed; + +#define FNIC_ETH_FCOE_HDRS_OFFSET \ + (sizeof(struct ethhdr) + sizeof(struct fcoe_hdr)) + +#endif /* _FDLS_FC_H */ diff --git a/drivers/scsi/fnic/fip.c b/drivers/scsi/fnic/fip.c new file mode 100644 index 000000000000..19395e2aee44 --- /dev/null +++ b/drivers/scsi/fnic/fip.c @@ -0,0 +1,1005 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#include "fnic.h" +#include "fip.h" +#include <linux/etherdevice.h> + +#define FIP_FNIC_RESET_WAIT_COUNT 15 + +/** + * fnic_fcoe_reset_vlans - Free up the list of discovered vlans + * @fnic: Handle to fnic driver instance + */ +void fnic_fcoe_reset_vlans(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan, *next; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (!list_empty(&fnic->vlan_list)) { + list_for_each_entry_safe(vlan, next, &fnic->vlan_list, list) { + list_del(&vlan->list); + kfree(vlan); + } + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Reset vlan complete\n"); +} + +/** + * fnic_fcoe_send_vlan_req - Send FIP vlan request to all FCFs MAC + * @fnic: Handle to fnic driver instance + */ +void fnic_fcoe_send_vlan_req(struct fnic *fnic) +{ + uint8_t *frame; + struct fnic_iport_s *iport = &fnic->iport; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + u64 vlan_tov; + struct fip_vlan_req *pvlan_req; + uint16_t frame_size = sizeof(struct fip_vlan_req); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send VLAN req"); + return; + } + + fnic_fcoe_reset_vlans(fnic); + + fnic->set_vlan(fnic, 0); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "set vlan done\n"); + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "got MAC 0x%x:%x:%x:%x:%x:%x\n", iport->hwmac[0], + iport->hwmac[1], iport->hwmac[2], iport->hwmac[3], + iport->hwmac[4], iport->hwmac[5]); + + pvlan_req = (struct fip_vlan_req *) frame; + *pvlan_req = (struct fip_vlan_req) { + .eth = {.h_dest = FCOE_ALL_FCFS_MAC, + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = {.fip_ver = FIP_VER_ENCAPS(FIP_VER), + .fip_op = cpu_to_be16(FIP_OP_VLAN), + .fip_subcode = FIP_SC_REQ, + .fip_dl_len = cpu_to_be16(FIP_VLAN_REQ_LEN)}, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, + .fip_dlen = 2}} + }; + + memcpy(pvlan_req->eth.h_source, iport->hwmac, ETH_ALEN); + memcpy(pvlan_req->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); + + atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); + + iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Send VLAN req\n"); + fnic_send_fip_frame(iport, frame, frame_size); + + vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); + mod_timer(&fnic->retry_fip_timer, round_jiffies(vlan_tov)); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fip timer set\n"); +} + +/** + * fnic_fcoe_process_vlan_resp - Processes the vlan response from one FCF and + * populates VLAN list. + * @fnic: Handle to fnic driver instance + * @fiph: Received FIP frame + * + * Will wait for responses from multiple FCFs until timeout. + */ +void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph) +{ + struct fip_vlan_notif *vlan_notif = (struct fip_vlan_notif *)fiph; + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + u16 vid; + int num_vlan = 0; + int cur_desc, desc_len; + struct fcoe_vlan *vlan; + struct fip_vlan_desc *vlan_desc; + unsigned long flags; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p got vlan resp\n", fnic); + + desc_len = be16_to_cpu(vlan_notif->fip.fip_dl_len); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "desc_len %d\n", desc_len); + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + cur_desc = 0; + while (desc_len > 0) { + vlan_desc = + (struct fip_vlan_desc *)(((char *)vlan_notif->vlans_desc) + + cur_desc * 4); + + if (vlan_desc->fd_desc.fip_dtype == FIP_DT_VLAN) { + if (vlan_desc->fd_desc.fip_dlen != 1) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "Invalid descriptor length(%x) in VLan response\n", + vlan_desc->fd_desc.fip_dlen); + + } + num_vlan++; + vid = be16_to_cpu(vlan_desc->fd_vlan); + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "process_vlan_resp: FIP VLAN %d\n", vid); + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + + if (!vlan) { + /* retry from timer */ + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "Mem Alloc failure\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + goto out; + } + vlan->vid = vid & 0x0fff; + vlan->state = FIP_VLAN_AVAIL; + list_add_tail(&vlan->list, &fnic->vlan_list); + break; + } + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "Invalid descriptor type(%x) in VLan response\n", + vlan_desc->fd_desc.fip_dtype); + /* + * Note : received a type=2 descriptor here i.e. FIP + * MAC Address Descriptor + */ + cur_desc += vlan_desc->fd_desc.fip_dlen; + desc_len -= vlan_desc->fd_desc.fip_dlen; + } + + /* any VLAN descriptors present ? */ + if (num_vlan == 0) { + atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p No VLAN descriptors in FIP VLAN response\n", + fnic); + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + out: + return; +} + +/** + * fnic_fcoe_start_fcf_discovery - Start FIP FCF discovery in a selected vlan + * @fnic: Handle to fnic driver instance + */ +void fnic_fcoe_start_fcf_discovery(struct fnic *fnic) +{ + uint8_t *frame; + struct fnic_iport_s *iport = &fnic->iport; + u64 fcs_tov; + struct fip_discovery *pdisc_sol; + uint16_t frame_size = sizeof(struct fip_discovery); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to start FCF discovery"); + return; + } + + eth_zero_addr(iport->selected_fcf.fcf_mac); + + pdisc_sol = (struct fip_discovery *) frame; + *pdisc_sol = (struct fip_discovery) { + .eth = {.h_dest = FCOE_ALL_FCFS_MAC, + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = { + .fip_ver = FIP_VER_ENCAPS(FIP_VER), .fip_op = cpu_to_be16(FIP_OP_DISC), + .fip_subcode = FIP_SC_REQ, .fip_dl_len = cpu_to_be16(FIP_DISC_SOL_LEN), + .fip_flags = cpu_to_be16(FIP_FL_FPMA)}, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}, + .name_desc = {.fd_desc = {.fip_dtype = FIP_DT_NAME, .fip_dlen = 3}}, + .fcoe_desc = {.fd_desc = {.fip_dtype = FIP_DT_FCOE_SIZE, .fip_dlen = 1}, + .fd_size = cpu_to_be16(FCOE_MAX_SIZE)} + }; + + memcpy(pdisc_sol->eth.h_source, iport->hwmac, ETH_ALEN); + memcpy(pdisc_sol->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); + iport->selected_fcf.fcf_priority = 0xFF; + + FNIC_STD_SET_NODE_NAME(&pdisc_sol->name_desc.fd_wwn, iport->wwnn); + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Start FCF discovery\n"); + fnic_send_fip_frame(iport, frame, frame_size); + + iport->fip.state = FDLS_FIP_FCF_DISCOVERY_STARTED; + + fcs_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FCS_TOV); + mod_timer(&fnic->retry_fip_timer, round_jiffies(fcs_tov)); +} + +/** + * fnic_fcoe_fip_discovery_resp - Processes FCF advertisements. + * @fnic: Handle to fnic driver instance + * @fiph: Received frame + * + * FCF advertisements can be: + * solicited - Sent in response of a discover FCF FIP request + * Store the information of the FCF with highest priority. + * Wait until timeout in case of multiple FCFs. + * + * unsolicited - Sent periodically by the FCF for keep alive. + * If FLOGI is in progress or completed and the advertisement is + * received by our selected FCF, refresh the keep alive timer. + */ +void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph) +{ + struct fnic_iport_s *iport = &fnic->iport; + struct fip_disc_adv *disc_adv = (struct fip_disc_adv *)fiph; + u64 fcs_ka_tov; + u64 tov; + int fka_has_changed; + + switch (iport->fip.state) { + case FDLS_FIP_FCF_DISCOVERY_STARTED: + if (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "fnic 0x%p Solicited adv\n", fnic); + + if ((disc_adv->prio_desc.fd_pri < + iport->selected_fcf.fcf_priority) + && (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_AVAIL)) { + + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "fnic 0x%p FCF Available\n", fnic); + memcpy(iport->selected_fcf.fcf_mac, + disc_adv->mac_desc.fd_mac, ETH_ALEN); + iport->selected_fcf.fcf_priority = + disc_adv->prio_desc.fd_pri; + iport->selected_fcf.fka_adv_period = + be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period); + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, "adv time %d", + iport->selected_fcf.fka_adv_period); + iport->selected_fcf.ka_disabled = + (disc_adv->fka_adv_desc.fd_flags & 1); + } + } + break; + case FDLS_FIP_FLOGI_STARTED: + case FDLS_FIP_FLOGI_COMPLETE: + if (!(be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL)) { + /* same fcf */ + if (memcmp + (iport->selected_fcf.fcf_mac, + disc_adv->mac_desc.fd_mac, ETH_ALEN) == 0) { + if (iport->selected_fcf.fka_adv_period != + be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period)) { + iport->selected_fcf.fka_adv_period = + be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period); + FNIC_FIP_DBG(KERN_INFO, + fnic->host, + fnic->fnic_num, + "change fka to %d", + iport->selected_fcf.fka_adv_period); + } + + fka_has_changed = + (iport->selected_fcf.ka_disabled == 1) + && ((disc_adv->fka_adv_desc.fd_flags & 1) == + 0); + + iport->selected_fcf.ka_disabled = + (disc_adv->fka_adv_desc.fd_flags & 1); + if (!((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == + 0))) { + + fcs_ka_tov = jiffies + + 3 + * + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->fcs_ka_timer, + round_jiffies(fcs_ka_tov)); + } else { + if (timer_pending(&fnic->fcs_ka_timer)) + timer_delete_sync(&fnic->fcs_ka_timer); + } + + if (fka_has_changed) { + if (iport->selected_fcf.fka_adv_period != 0) { + tov = + jiffies + + msecs_to_jiffies( + iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->enode_ka_timer, + round_jiffies(tov)); + + tov = + jiffies + + msecs_to_jiffies + (FIP_VN_KA_PERIOD); + mod_timer(&fnic->vn_ka_timer, + round_jiffies(tov)); + } + } + } + } + break; + default: + break; + } /* end switch */ +} + +/** + * fnic_fcoe_start_flogi - Send FIP FLOGI to the selected FCF + * @fnic: Handle to fnic driver instance + */ +void fnic_fcoe_start_flogi(struct fnic *fnic) +{ + uint8_t *frame; + struct fnic_iport_s *iport = &fnic->iport; + struct fip_flogi *pflogi_req; + u64 flogi_tov; + uint16_t oxid; + uint16_t frame_size = sizeof(struct fip_flogi); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to start FIP FLOGI"); + return; + } + + pflogi_req = (struct fip_flogi *) frame; + *pflogi_req = (struct fip_flogi) { + .eth = { + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = { + .fip_ver = FIP_VER_ENCAPS(FIP_VER), + .fip_op = cpu_to_be16(FIP_OP_LS), + .fip_subcode = FIP_SC_REQ, + .fip_dl_len = cpu_to_be16(FIP_FLOGI_LEN), + .fip_flags = cpu_to_be16(FIP_FL_FPMA)}, + .flogi_desc = { + .fd_desc = {.fip_dtype = FIP_DT_FLOGI, .fip_dlen = 36}, + .flogi = { + .fchdr = { + .fh_r_ctl = FC_RCTL_ELS_REQ, + .fh_d_id = {0xFF, 0xFF, 0xFE}, + .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .els = { + .fl_cmd = ELS_FLOGI, + .fl_csp = { + .sp_hi_ver = + FNIC_FC_PH_VER_HI, + .sp_lo_ver = + FNIC_FC_PH_VER_LO, + .sp_bb_cred = + cpu_to_be16 + (FNIC_FC_B2B_CREDIT), + .sp_bb_data = + cpu_to_be16 + (FNIC_FC_B2B_RDF_SZ)}, + .fl_cssp[2].cp_class = + cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ) + }, + } + }, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}} + }; + + memcpy(pflogi_req->eth.h_source, iport->hwmac, ETH_ALEN); + if (iport->usefip) + memcpy(pflogi_req->eth.h_dest, iport->selected_fcf.fcf_mac, + ETH_ALEN); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI, + &iport->active_oxid_fabric_req); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate OXID to send FIP FLOGI"); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(pflogi_req->flogi_desc.flogi.fchdr, oxid); + + FNIC_STD_SET_NPORT_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwpn, + iport->wwpn); + FNIC_STD_SET_NODE_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwnn, + iport->wwnn); + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FIP start FLOGI\n"); + fnic_send_fip_frame(iport, frame, frame_size); + iport->fip.flogi_retry++; + + iport->fip.state = FDLS_FIP_FLOGI_STARTED; + flogi_tov = jiffies + msecs_to_jiffies(fnic->config.flogi_timeout); + mod_timer(&fnic->retry_fip_timer, round_jiffies(flogi_tov)); +} + +/** + * fnic_fcoe_process_flogi_resp - Processes FLOGI response from FCF. + * @fnic: Handle to fnic driver instance + * @fiph: Received frame + * + * If successful save assigned fc_id and MAC, program firmware + * and start fdls discovery, else restart vlan discovery. + */ +void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph) +{ + struct fnic_iport_s *iport = &fnic->iport; + struct fip_flogi_rsp *flogi_rsp = (struct fip_flogi_rsp *)fiph; + int desc_len; + uint32_t s_id; + int frame_type; + uint16_t oxid; + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct fc_frame_header *fchdr = &flogi_rsp->rsp_desc.flogi.fchdr; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p FIP FLOGI rsp\n", fnic); + desc_len = be16_to_cpu(flogi_rsp->fip.fip_dl_len); + if (desc_len != 38) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Invalid Descriptor List len (%x). Dropping frame\n", + desc_len); + return; + } + + if (!((flogi_rsp->rsp_desc.fd_desc.fip_dtype == 7) + && (flogi_rsp->rsp_desc.fd_desc.fip_dlen == 36)) + || !((flogi_rsp->mac_desc.fd_desc.fip_dtype == 2) + && (flogi_rsp->mac_desc.fd_desc.fip_dlen == 2))) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping frame invalid type and len mix\n"); + return; + } + + frame_type = fnic_fdls_validate_and_get_frame_type(iport, fchdr); + + s_id = ntoh24(fchdr->fh_s_id); + if ((fchdr->fh_f_ctl[0] != 0x98) + || (fchdr->fh_r_ctl != 0x23) + || (s_id != FC_FID_FLOGI) + || (frame_type != FNIC_FABRIC_FLOGI_RSP) + || (fchdr->fh_type != 0x01)) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping invalid frame: s_id %x F %x R %x t %x OX_ID %x\n", + s_id, fchdr->fh_f_ctl[0], fchdr->fh_r_ctl, + fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr)); + return; + } + + if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p rsp for pending FLOGI\n", fnic); + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + timer_delete_sync(&fnic->retry_fip_timer); + + if ((be16_to_cpu(flogi_rsp->fip.fip_dl_len) == FIP_FLOGI_LEN) + && (flogi_rsp->rsp_desc.flogi.els.fl_cmd == ELS_LS_ACC)) { + + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "fnic 0x%p FLOGI success\n", fnic); + memcpy(iport->fpma, flogi_rsp->mac_desc.fd_mac, ETH_ALEN); + iport->fcid = + ntoh24(flogi_rsp->rsp_desc.flogi.fchdr.fh_d_id); + + iport->r_a_tov = + be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_r_a_tov); + iport->e_d_tov = + be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_e_d_tov); + memcpy(fnic->iport.fcfmac, iport->selected_fcf.fcf_mac, + ETH_ALEN); + vnic_dev_add_addr(fnic->vdev, flogi_rsp->mac_desc.fd_mac); + + if (fnic_fdls_register_portid(iport, iport->fcid, NULL) + != 0) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "fnic 0x%p flogi registration failed\n", + fnic); + return; + } + + iport->fip.state = FDLS_FIP_FLOGI_COMPLETE; + iport->state = FNIC_IPORT_STATE_FABRIC_DISC; + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, "iport->state:%d\n", + iport->state); + fnic_fdls_disc_start(iport); + if (!((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == 0))) { + u64 tov; + + tov = jiffies + + + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->enode_ka_timer, + round_jiffies(tov)); + + tov = + jiffies + + msecs_to_jiffies(FIP_VN_KA_PERIOD); + mod_timer(&fnic->vn_ka_timer, + round_jiffies(tov)); + + } + } else { + /* + * If there's FLOGI rejects - clear all + * fcf's & restart from scratch + */ + atomic64_inc(&fnic_stats->vlan_stats.flogi_rejects); + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + + iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED; + } + } +} + +/** + * fnic_common_fip_cleanup - Clean up FCF info and timers in case of + * link down/CVL + * @fnic: Handle to fnic driver instance + */ +void fnic_common_fip_cleanup(struct fnic *fnic) +{ + + struct fnic_iport_s *iport = &fnic->iport; + + if (!iport->usefip) + return; + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p fip cleanup\n", fnic); + + iport->fip.state = FDLS_FIP_INIT; + + timer_delete_sync(&fnic->retry_fip_timer); + timer_delete_sync(&fnic->fcs_ka_timer); + timer_delete_sync(&fnic->enode_ka_timer); + timer_delete_sync(&fnic->vn_ka_timer); + + if (!is_zero_ether_addr(iport->fpma)) + vnic_dev_del_addr(fnic->vdev, iport->fpma); + + eth_zero_addr(iport->fpma); + iport->fcid = 0; + iport->r_a_tov = 0; + iport->e_d_tov = 0; + eth_zero_addr(fnic->iport.fcfmac); + eth_zero_addr(iport->selected_fcf.fcf_mac); + iport->selected_fcf.fcf_priority = 0; + iport->selected_fcf.fka_adv_period = 0; + iport->selected_fcf.ka_disabled = 0; + + fnic_fcoe_reset_vlans(fnic); +} + +/** + * fnic_fcoe_process_cvl - Processes Clear Virtual Link from FCF. + * @fnic: Handle to fnic driver instance + * @fiph: Received frame + * + * Verify that cvl is received from our current FCF for our assigned MAC + * and clean up and restart the vlan discovery. + */ +void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph) +{ + struct fnic_iport_s *iport = &fnic->iport; + struct fip_cvl *cvl_msg = (struct fip_cvl *)fiph; + int i; + int found = false; + int max_count = 0; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p clear virtual link handler\n", fnic); + + if (!((cvl_msg->fcf_mac_desc.fd_desc.fip_dtype == 2) + && (cvl_msg->fcf_mac_desc.fd_desc.fip_dlen == 2)) + || !((cvl_msg->name_desc.fd_desc.fip_dtype == 4) + && (cvl_msg->name_desc.fd_desc.fip_dlen == 3))) { + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "invalid mix: ft %x fl %x ndt %x ndl %x", + cvl_msg->fcf_mac_desc.fd_desc.fip_dtype, + cvl_msg->fcf_mac_desc.fd_desc.fip_dlen, + cvl_msg->name_desc.fd_desc.fip_dtype, + cvl_msg->name_desc.fd_desc.fip_dlen); + } + + if (memcmp + (iport->selected_fcf.fcf_mac, cvl_msg->fcf_mac_desc.fd_mac, ETH_ALEN) + == 0) { + for (i = 0; i < ((be16_to_cpu(fiph->fip_dl_len) / 5) - 1); i++) { + if (!((cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype == 11) + && (cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen == 5))) { + + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "Invalid type and len mix type: %d len: %d\n", + cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype, + cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen); + } + if (memcmp + (iport->fpma, cvl_msg->vn_ports_desc[i].fd_mac, + ETH_ALEN) == 0) { + found = true; + break; + } + } + if (!found) + return; + fnic_common_fip_cleanup(fnic); + + while (fnic->reset_in_progress == IN_PROGRESS) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + wait_for_completion_timeout(&fnic->reset_completion_wait, + msecs_to_jiffies(5000)); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + max_count++; + if (max_count >= FIP_FNIC_RESET_WAIT_COUNT) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Rthr waited too long. Skipping handle link event %p\n", + fnic); + return; + } + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic reset in progress. Link event needs to wait %p", + fnic); + } + fnic->reset_in_progress = IN_PROGRESS; + fnic_fdls_link_down(iport); + fnic->reset_in_progress = NOT_IN_PROGRESS; + complete(&fnic->reset_completion_wait); + fnic_fcoe_send_vlan_req(fnic); + } +} + +/** + * fdls_fip_recv_frame - Demultiplexer for FIP frames + * @fnic: Handle to fnic driver instance + * @frame: Received ethernet frame + */ +int fdls_fip_recv_frame(struct fnic *fnic, void *frame) +{ + struct ethhdr *eth = (struct ethhdr *)frame; + struct fip_header *fiph; + u16 op; + u8 sub; + int len = 2048; + + if (be16_to_cpu(eth->h_proto) == ETH_P_FIP) { + fiph = (struct fip_header *)(eth + 1); + op = be16_to_cpu(fiph->fip_op); + sub = fiph->fip_subcode; + + fnic_debug_dump_fip_frame(fnic, eth, len, "Incoming"); + + if (op == FIP_OP_DISC && sub == FIP_SC_REP) + fnic_fcoe_fip_discovery_resp(fnic, fiph); + else if (op == FIP_OP_VLAN && sub == FIP_SC_REP) + fnic_fcoe_process_vlan_resp(fnic, fiph); + else if (op == FIP_OP_CTRL && sub == FIP_SC_REP) + fnic_fcoe_process_cvl(fnic, fiph); + else if (op == FIP_OP_LS && sub == FIP_SC_REP) + fnic_fcoe_process_flogi_resp(fnic, fiph); + + /* Return true if the frame was a FIP frame */ + return true; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Not a FIP Frame"); + return false; +} + +void fnic_work_on_fip_timer(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, fip_timer_work); + struct fnic_iport_s *iport = &fnic->iport; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FIP timeout\n"); + + if (iport->fip.state == FDLS_FIP_VLAN_DISCOVERY_STARTED) { + fnic_vlan_discovery_timeout(fnic); + } else if (iport->fip.state == FDLS_FIP_FCF_DISCOVERY_STARTED) { + u8 zmac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 }; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FCF Discovery timeout\n"); + if (memcmp(iport->selected_fcf.fcf_mac, zmac, ETH_ALEN) != 0) { + + if (iport->flags & FNIC_FIRST_LINK_UP) { + fnic_scsi_fcpio_reset(iport->fnic); + iport->flags &= ~FNIC_FIRST_LINK_UP; + } + + fnic_fcoe_start_flogi(fnic); + if (!((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == 0))) { + u64 fcf_tov; + + fcf_tov = jiffies + + 3 + * + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->fcs_ka_timer, + round_jiffies(fcf_tov)); + } + } else { + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, "FCF Discovery timeout\n"); + fnic_vlan_discovery_timeout(fnic); + } + } else if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) { + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI timeout\n"); + if (iport->fip.flogi_retry < fnic->config.flogi_retries) + fnic_fcoe_start_flogi(fnic); + else + fnic_vlan_discovery_timeout(fnic); + } +} + +/** + * fnic_handle_fip_timer - Timeout handler for FIP discover phase. + * @t: Handle to the timer list + * + * Based on the current state, start next phase or restart discovery. + */ +void fnic_handle_fip_timer(struct timer_list *t) +{ + struct fnic *fnic = from_timer(fnic, t, retry_fip_timer); + + INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fip_timer); + queue_work(fnic_fip_queue, &fnic->fip_timer_work); +} + +/** + * fnic_handle_enode_ka_timer - FIP node keep alive. + * @t: Handle to the timer list + */ +void fnic_handle_enode_ka_timer(struct timer_list *t) +{ + uint8_t *frame; + struct fnic *fnic = from_timer(fnic, t, enode_ka_timer); + + struct fnic_iport_s *iport = &fnic->iport; + struct fip_enode_ka *penode_ka; + u64 enode_ka_tov; + uint16_t frame_size = sizeof(struct fip_enode_ka); + + if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE) + return; + + if ((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == 0)) { + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send enode ka"); + return; + } + + penode_ka = (struct fip_enode_ka *) frame; + *penode_ka = (struct fip_enode_ka) { + .eth = { + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = { + .fip_ver = FIP_VER_ENCAPS(FIP_VER), + .fip_op = cpu_to_be16(FIP_OP_CTRL), + .fip_subcode = FIP_SC_REQ, + .fip_dl_len = cpu_to_be16(FIP_ENODE_KA_LEN)}, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}} + }; + + memcpy(penode_ka->eth.h_source, iport->hwmac, ETH_ALEN); + memcpy(penode_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN); + memcpy(penode_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); + + FNIC_FIP_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Handle enode KA timer\n"); + fnic_send_fip_frame(iport, frame, frame_size); + enode_ka_tov = jiffies + + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->enode_ka_timer, round_jiffies(enode_ka_tov)); +} + +/** + * fnic_handle_vn_ka_timer - FIP virtual port keep alive. + * @t: Handle to the timer list + */ +void fnic_handle_vn_ka_timer(struct timer_list *t) +{ + uint8_t *frame; + struct fnic *fnic = from_timer(fnic, t, vn_ka_timer); + + struct fnic_iport_s *iport = &fnic->iport; + struct fip_vn_port_ka *pvn_port_ka; + u64 vn_ka_tov; + uint8_t fcid[3]; + uint16_t frame_size = sizeof(struct fip_vn_port_ka); + + if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE) + return; + + if ((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == 0)) { + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send vn ka"); + return; + } + + pvn_port_ka = (struct fip_vn_port_ka *) frame; + *pvn_port_ka = (struct fip_vn_port_ka) { + .eth = { + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = { + .fip_ver = FIP_VER_ENCAPS(FIP_VER), + .fip_op = cpu_to_be16(FIP_OP_CTRL), + .fip_subcode = FIP_SC_REQ, + .fip_dl_len = cpu_to_be16(FIP_VN_KA_LEN)}, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}, + .vn_port_desc = {.fd_desc = {.fip_dtype = FIP_DT_VN_ID, .fip_dlen = 5}} + }; + + memcpy(pvn_port_ka->eth.h_source, iport->fpma, ETH_ALEN); + memcpy(pvn_port_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN); + memcpy(pvn_port_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); + memcpy(pvn_port_ka->vn_port_desc.fd_mac, iport->fpma, ETH_ALEN); + hton24(fcid, iport->fcid); + memcpy(pvn_port_ka->vn_port_desc.fd_fc_id, fcid, 3); + FNIC_STD_SET_NPORT_NAME(&pvn_port_ka->vn_port_desc.fd_wwpn, iport->wwpn); + + FNIC_FIP_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Handle vnport KA timer\n"); + fnic_send_fip_frame(iport, frame, frame_size); + vn_ka_tov = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); + mod_timer(&fnic->vn_ka_timer, round_jiffies(vn_ka_tov)); +} + +/** + * fnic_vlan_discovery_timeout - Handle vlan discovery timeout + * @fnic: Handle to fnic driver instance + * + * End of VLAN discovery or FCF discovery time window. + * Start the FCF discovery if VLAN was never used. + */ +void fnic_vlan_discovery_timeout(struct fnic *fnic) +{ + struct fcoe_vlan *vlan; + struct fnic_iport_s *iport = &fnic->iport; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (!iport->usefip) + return; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlan_list)) { + /* no vlans available, try again */ + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_fcoe_send_vlan_req(fnic); + return; + } + + vlan = list_first_entry(&fnic->vlan_list, struct fcoe_vlan, list); + + if (vlan->state == FIP_VLAN_SENT) { + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + /* + * no response on this vlan, remove from the list. + * Try the next vlan + */ + list_del(&vlan->list); + kfree(vlan); + vlan = NULL; + if (list_empty(&fnic->vlan_list)) { + /* we exhausted all vlans, restart vlan disc */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + fnic_fcoe_send_vlan_req(fnic); + return; + } + /* check the next vlan */ + vlan = + list_first_entry(&fnic->vlan_list, struct fcoe_vlan, + list); + + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + + } + atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); + + } else { + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + } + vlan->sol_count++; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_fcoe_start_fcf_discovery(fnic); +} + +/** + * fnic_work_on_fcs_ka_timer - Handle work on FCS keep alive timer. + * @work: the work queue to be serviced + * + * Finish handling fcs_ka_timer in process context. + * Clean up, bring the link down, and restart all FIP discovery. + */ +void fnic_work_on_fcs_ka_timer(struct work_struct *work) +{ + struct fnic + *fnic = container_of(work, struct fnic, fip_timer_work); + struct fnic_iport_s *iport = &fnic->iport; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p fcs ka timeout\n", fnic); + + fnic_common_fip_cleanup(fnic); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + fnic_fdls_link_down(iport); + iport->state = FNIC_IPORT_STATE_FIP; + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + + fnic_fcoe_send_vlan_req(fnic); +} + +/** + * fnic_handle_fcs_ka_timer - Handle FCS keep alive timer. + * @t: Handle to the timer list + * + * No keep alives received from FCF. Clean up, bring the link down + * and restart all the FIP discovery. + */ +void fnic_handle_fcs_ka_timer(struct timer_list *t) +{ + struct fnic *fnic = from_timer(fnic, t, fcs_ka_timer); + + INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fcs_ka_timer); + queue_work(fnic_fip_queue, &fnic->fip_timer_work); +} diff --git a/drivers/scsi/fnic/fip.h b/drivers/scsi/fnic/fip.h new file mode 100644 index 000000000000..79fee7628870 --- /dev/null +++ b/drivers/scsi/fnic/fip.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _FIP_H_ +#define _FIP_H_ + +#include "fdls_fc.h" +#include "fnic_fdls.h" +#include <scsi/fc/fc_fip.h> + +/* Drop the cast from the standard definition */ +#define FCOE_ALL_FCFS_MAC {0x01, 0x10, 0x18, 0x01, 0x00, 0x02} +#define FCOE_MAX_SIZE 0x082E + +#define FCOE_CTLR_FIPVLAN_TOV (3*1000) +#define FCOE_CTLR_FCS_TOV (3*1000) +#define FCOE_CTLR_MAX_SOL (5*1000) + +#define FIP_DISC_SOL_LEN (6) +#define FIP_VLAN_REQ_LEN (2) +#define FIP_ENODE_KA_LEN (2) +#define FIP_VN_KA_LEN (7) +#define FIP_FLOGI_LEN (38) + +enum fdls_vlan_state { + FIP_VLAN_AVAIL, + FIP_VLAN_SENT +}; + +enum fdls_fip_state { + FDLS_FIP_INIT, + FDLS_FIP_VLAN_DISCOVERY_STARTED, + FDLS_FIP_FCF_DISCOVERY_STARTED, + FDLS_FIP_FLOGI_STARTED, + FDLS_FIP_FLOGI_COMPLETE, +}; + +/* + * VLAN entry. + */ +struct fcoe_vlan { + struct list_head list; + uint16_t vid; /* vlan ID */ + uint16_t sol_count; /* no. of sols sent */ + uint16_t state; /* state */ +}; + +struct fip_vlan_req { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac_desc; +} __packed; + +struct fip_vlan_notif { + struct fip_header fip; + struct fip_vlan_desc vlans_desc[]; +} __packed; + +struct fip_vn_port_ka { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac_desc; + struct fip_vn_desc vn_port_desc; +} __packed; + +struct fip_enode_ka { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac_desc; +} __packed; + +struct fip_cvl { + struct fip_header fip; + struct fip_mac_desc fcf_mac_desc; + struct fip_wwn_desc name_desc; + struct fip_vn_desc vn_ports_desc[]; +} __packed; + +struct fip_flogi_desc { + struct fip_desc fd_desc; + uint16_t rsvd; + struct fc_std_flogi flogi; +} __packed; + +struct fip_flogi_rsp_desc { + struct fip_desc fd_desc; + uint16_t rsvd; + struct fc_std_flogi flogi; +} __packed; + +struct fip_flogi { + struct ethhdr eth; + struct fip_header fip; + struct fip_flogi_desc flogi_desc; + struct fip_mac_desc mac_desc; +} __packed; + +struct fip_flogi_rsp { + struct fip_header fip; + struct fip_flogi_rsp_desc rsp_desc; + struct fip_mac_desc mac_desc; +} __packed; + +struct fip_discovery { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac_desc; + struct fip_wwn_desc name_desc; + struct fip_size_desc fcoe_desc; +} __packed; + +struct fip_disc_adv { + struct fip_header fip; + struct fip_pri_desc prio_desc; + struct fip_mac_desc mac_desc; + struct fip_wwn_desc name_desc; + struct fip_fab_desc fabric_desc; + struct fip_fka_desc fka_adv_desc; +} __packed; + +void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph); +void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph); +void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph); +void fnic_work_on_fip_timer(struct work_struct *work); +void fnic_work_on_fcs_ka_timer(struct work_struct *work); +void fnic_fcoe_send_vlan_req(struct fnic *fnic); +void fnic_fcoe_start_fcf_discovery(struct fnic *fnic); +void fnic_fcoe_start_flogi(struct fnic *fnic); +void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph); +void fnic_vlan_discovery_timeout(struct fnic *fnic); + +extern struct workqueue_struct *fnic_fip_queue; + +#ifdef FNIC_DEBUG +static inline void +fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth, + int len, char *pfx) +{ + struct fip_header *fiph = (struct fip_header *)(eth + 1); + u16 op = be16_to_cpu(fiph->fip_op); + u8 sub = fiph->fip_subcode; + + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "FIP %s packet contents: op: 0x%x sub: 0x%x (len = %d)", + pfx, op, sub, len); + + fnic_debug_dump(fnic, (uint8_t *)eth, len); +} + +#else /* FNIC_DEBUG */ + +static inline void +fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth, + int len, char *pfx) {} +#endif /* FNIC_DEBUG */ + +#endif /* _FIP_H_ */ diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index ce73f08ee889..6c5f6046b1f5 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -10,8 +10,10 @@ #include <linux/netdevice.h> #include <linux/workqueue.h> #include <linux/bitops.h> -#include <scsi/libfc.h> -#include <scsi/libfcoe.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_transport.h> +#include <scsi/scsi_transport_fc.h> +#include <scsi/fc_frame.h> #include "fnic_io.h" #include "fnic_res.h" #include "fnic_trace.h" @@ -24,13 +26,15 @@ #include "vnic_intr.h" #include "vnic_stats.h" #include "vnic_scsi.h" +#include "fnic_fdls.h" #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.7.0.0" +#define DRV_VERSION "1.8.0.0" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " +#define FABRIC_LOGO_MAX_RETRY 3 #define DESC_CLEAN_LOW_WATERMARK 8 #define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ #define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ @@ -38,6 +42,7 @@ #define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */ #define FNIC_DFLT_QUEUE_DEPTH 256 #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ +#define LUN0_DELAY_TIME 9 /* * Tag bits used for special requests. @@ -75,6 +80,77 @@ #define FNIC_DEV_RST_TERM_DONE BIT(20) #define FNIC_DEV_RST_ABTS_PENDING BIT(21) +#define FNIC_FW_RESET_TIMEOUT 60000 /* mSec */ +#define FNIC_FCOE_MAX_CMD_LEN 16 +/* Retry supported by rport (returned by PRLI service parameters) */ +#define FNIC_FC_RP_FLAGS_RETRY 0x1 + +/* Cisco vendor id */ +#define PCI_VENDOR_ID_CISCO 0x1137 +#define PCI_DEVICE_ID_CISCO_VIC_FC 0x0045 /* fc vnic */ + +/* sereno pcie switch */ +#define PCI_DEVICE_ID_CISCO_SERENO 0x004e +#define PCI_DEVICE_ID_CISCO_CRUZ 0x007a /* Cruz */ +#define PCI_DEVICE_ID_CISCO_BODEGA 0x0131 /* Bodega */ +#define PCI_DEVICE_ID_CISCO_BEVERLY 0x025f /* Beverly */ + +/* Sereno */ +#define PCI_SUBDEVICE_ID_CISCO_VASONA 0x004f /* vasona mezz */ +#define PCI_SUBDEVICE_ID_CISCO_COTATI 0x0084 /* cotati mlom */ +#define PCI_SUBDEVICE_ID_CISCO_LEXINGTON 0x0085 /* lexington pcie */ +#define PCI_SUBDEVICE_ID_CISCO_ICEHOUSE 0x00cd /* Icehouse */ +#define PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE 0x00ce /* KirkwoodLake pcie */ +#define PCI_SUBDEVICE_ID_CISCO_SUSANVILLE 0x012e /* Susanville MLOM */ +#define PCI_SUBDEVICE_ID_CISCO_TORRANCE 0x0139 /* Torrance MLOM */ + +/* Cruz */ +#define PCI_SUBDEVICE_ID_CISCO_CALISTOGA 0x012c /* Calistoga MLOM */ +#define PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW 0x0137 /* Cruz Mezz */ +/* Cruz MountTian SIOC */ +#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN 0x014b +#define PCI_SUBDEVICE_ID_CISCO_CLEARLAKE 0x014d /* ClearLake pcie */ +/* Cruz MountTian2 SIOC */ +#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2 0x0157 +#define PCI_SUBDEVICE_ID_CISCO_CLAREMONT 0x015d /* Claremont MLOM */ + +/* Bodega */ +/* VIC 1457 PCIe mLOM */ +#define PCI_SUBDEVICE_ID_CISCO_BRADBURY 0x0218 +#define PCI_SUBDEVICE_ID_CISCO_BRENTWOOD 0x0217 /* VIC 1455 PCIe */ +/* VIC 1487 PCIe mLOM */ +#define PCI_SUBDEVICE_ID_CISCO_BURLINGAME 0x021a +#define PCI_SUBDEVICE_ID_CISCO_BAYSIDE 0x0219 /* VIC 1485 PCIe */ +/* VIC 1440 Mezz mLOM */ +#define PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD 0x0215 +#define PCI_SUBDEVICE_ID_CISCO_BOONVILLE 0x0216 /* VIC 1480 Mezz */ +#define PCI_SUBDEVICE_ID_CISCO_BENICIA 0x024a /* VIC 1495 */ +#define PCI_SUBDEVICE_ID_CISCO_BEAUMONT 0x024b /* VIC 1497 */ +#define PCI_SUBDEVICE_ID_CISCO_BRISBANE 0x02af /* VIC 1467 */ +#define PCI_SUBDEVICE_ID_CISCO_BENTON 0x02b0 /* VIC 1477 */ +#define PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER 0x02cf /* VIC 14425 */ +#define PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK 0x02d0 /* VIC 14825 */ + +/* Beverly */ +#define PCI_SUBDEVICE_ID_CISCO_BERN 0x02de /* VIC 15420 */ +#define PCI_SUBDEVICE_ID_CISCO_STOCKHOLM 0x02dd /* VIC 15428 */ +#define PCI_SUBDEVICE_ID_CISCO_KRAKOW 0x02dc /* VIC 15411 */ +#define PCI_SUBDEVICE_ID_CISCO_LUCERNE 0x02db /* VIC 15231 */ +#define PCI_SUBDEVICE_ID_CISCO_TURKU 0x02e8 /* VIC 15238 */ +#define PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS 0x02f3 /* VIC 15237 */ +#define PCI_SUBDEVICE_ID_CISCO_ZURICH 0x02df /* VIC 15230 */ +#define PCI_SUBDEVICE_ID_CISCO_RIGA 0x02e0 /* VIC 15427 */ +#define PCI_SUBDEVICE_ID_CISCO_GENEVA 0x02e1 /* VIC 15422 */ +#define PCI_SUBDEVICE_ID_CISCO_HELSINKI 0x02e4 /* VIC 15235 */ +#define PCI_SUBDEVICE_ID_CISCO_GOTHENBURG 0x02f2 /* VIC 15425 */ + +struct fnic_pcie_device { + u32 device; + u8 *desc; + u32 subsystem_device; + u8 *subsys_desc; +}; + /* * fnic private data per SCSI command. * These fields are locked by the hashed io_req_lock. @@ -127,8 +203,38 @@ static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd) #define fnic_clear_state_flags(fnicp, st_flags) \ __fnic_set_state_flags(fnicp, st_flags, 1) +enum reset_states { + NOT_IN_PROGRESS = 0, + IN_PROGRESS, + RESET_ERROR +}; + +enum rscn_type { + NOT_PC_RSCN = 0, + PC_RSCN +}; + +enum pc_rscn_handling_status { + PC_RSCN_HANDLING_NOT_IN_PROGRESS = 0, + PC_RSCN_HANDLING_IN_PROGRESS +}; + +enum pc_rscn_handling_feature { + PC_RSCN_HANDLING_FEATURE_OFF = 0, + PC_RSCN_HANDLING_FEATURE_ON +}; + +extern unsigned int fnic_fdmi_support; extern unsigned int fnic_log_level; extern unsigned int io_completions; +extern struct workqueue_struct *fnic_event_queue; + +extern unsigned int pc_rscn_handling_feature_flag; +extern spinlock_t reset_fnic_list_lock; +extern struct list_head reset_fnic_list; +extern struct workqueue_struct *reset_fnic_work_queue; +extern struct work_struct reset_fnic_work; + #define FNIC_MAIN_LOGGING 0x01 #define FNIC_FCS_LOGGING 0x02 @@ -155,6 +261,12 @@ do { \ "fnic<%d>: %s: %d: " fmt, fnic_num,\ __func__, __LINE__, ##args);) +#define FNIC_FIP_DBG(kern_level, host, fnic_num, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \ + shost_printk(kern_level, host, \ + "fnic<%d>: %s: %d: " fmt, fnic_num,\ + __func__, __LINE__, ##args);) + #define FNIC_SCSI_DBG(kern_level, host, fnic_num, fmt, args...) \ FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \ shost_printk(kern_level, host, \ @@ -213,12 +325,26 @@ enum fnic_state { struct mempool; +enum fnic_role_e { + FNIC_ROLE_FCP_INITIATOR = 0, +}; + enum fnic_evt { FNIC_EVT_START_VLAN_DISC = 1, FNIC_EVT_START_FCF_DISC = 2, FNIC_EVT_MAX, }; +struct fnic_frame_list { + /* + * Link to frame lists + */ + struct list_head links; + void *fp; + int frame_len; + int rx_ethhdr_stripped; +}; + struct fnic_event { struct list_head list; struct fnic *fnic; @@ -235,8 +361,9 @@ struct fnic_cpy_wq { /* Per-instance private data structure */ struct fnic { int fnic_num; - struct fc_lport *lport; - struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */ + enum fnic_role_e role; + struct fnic_iport_s iport; + struct Scsi_Host *host; struct vnic_dev_bar bar0; struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX]; @@ -255,6 +382,7 @@ struct fnic { unsigned int wq_count; unsigned int cq_count; + struct completion reset_completion_wait; struct mutex sgreset_mutex; spinlock_t sgreset_lock; /* lock for sgreset */ struct scsi_cmnd *sgreset_sc; @@ -268,25 +396,27 @@ struct fnic { u32 vlan_hw_insert:1; /* let hw insert the tag */ u32 in_remove:1; /* fnic device in removal */ u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ - u32 link_events:1; /* set when we get any link event*/ - - struct completion *remove_wait; /* device remove thread blocks */ + struct completion *fw_reset_done; + u32 reset_in_progress; atomic_t in_flight; /* io counter */ bool internal_reset_inprogress; u32 _reserved; /* fill hole */ unsigned long state_flags; /* protected by host lock */ enum fnic_state state; spinlock_t fnic_lock; + unsigned long lock_flags; u16 vlan_id; /* VLAN tag including priority */ u8 data_src_addr[ETH_ALEN]; u64 fcp_input_bytes; /* internal statistic */ u64 fcp_output_bytes; /* internal statistic */ u32 link_down_cnt; + u32 soft_reset_count; int link_status; struct list_head list; + struct list_head links; struct pci_dev *pdev; struct vnic_fc_config config; struct vnic_dev *vdev; @@ -306,19 +436,29 @@ struct fnic { struct work_struct link_work; struct work_struct frame_work; struct work_struct flush_work; - struct sk_buff_head frame_queue; - struct sk_buff_head tx_queue; + struct list_head frame_queue; + struct list_head tx_queue; + mempool_t *frame_pool; + mempool_t *frame_elem_pool; + struct work_struct tport_work; + struct list_head tport_event_list; + + char subsys_desc[14]; + int subsys_desc_len; + int pc_rscn_handling_status; /*** FIP related data members -- start ***/ void (*set_vlan)(struct fnic *, u16 vlan); struct work_struct fip_frame_work; - struct sk_buff_head fip_frame_queue; + struct work_struct fip_timer_work; + struct list_head fip_frame_queue; struct timer_list fip_timer; - struct list_head vlans; spinlock_t vlans_lock; - - struct work_struct event_work; - struct list_head evlist; + struct timer_list retry_fip_timer; + struct timer_list fcs_ka_timer; + struct timer_list enode_ka_timer; + struct timer_list vn_ka_timer; + struct list_head vlan_list; /*** FIP related data members -- end ***/ /* copy work queue cache line section */ @@ -341,11 +481,6 @@ struct fnic { ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; }; -static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) -{ - return container_of(fip, struct fnic, ctlr); -} - extern struct workqueue_struct *fnic_event_queue; extern struct workqueue_struct *fnic_fip_queue; extern const struct attribute_group *fnic_host_groups[]; @@ -356,29 +491,29 @@ int fnic_set_intr_mode_msix(struct fnic *fnic); void fnic_free_intr(struct fnic *fnic); int fnic_request_intr(struct fnic *fnic); -int fnic_send(struct fc_lport *, struct fc_frame *); void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); void fnic_handle_frame(struct work_struct *work); +void fnic_tport_event_handler(struct work_struct *work); void fnic_handle_link(struct work_struct *work); void fnic_handle_event(struct work_struct *work); +void fdls_reclaim_oxid_handler(struct work_struct *work); +void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid); +void fdls_schedule_oxid_free_retry_work(struct work_struct *work); int fnic_rq_cmpl_handler(struct fnic *fnic, int); int fnic_alloc_rq_frame(struct vnic_rq *rq); void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); void fnic_flush_tx(struct work_struct *work); -void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb); -void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); -void fnic_update_mac(struct fc_lport *, u8 *new); void fnic_update_mac_locked(struct fnic *, u8 *new); int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); int fnic_abort_cmd(struct scsi_cmnd *); int fnic_device_reset(struct scsi_cmnd *); -int fnic_host_reset(struct scsi_cmnd *); -int fnic_reset(struct Scsi_Host *); -void fnic_scsi_cleanup(struct fc_lport *); -void fnic_scsi_abort_io(struct fc_lport *); -void fnic_empty_scsi_cleanup(struct fc_lport *); -void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); +int fnic_eh_host_reset_handler(struct scsi_cmnd *sc); +int fnic_host_reset(struct Scsi_Host *shost); +void fnic_reset(struct Scsi_Host *shost); +int fnic_issue_fc_host_lip(struct Scsi_Host *shost); +void fnic_get_host_port_state(struct Scsi_Host *shost); +void fnic_scsi_fcpio_reset(struct fnic *fnic); int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index); int fnic_wq_cmpl_handler(struct fnic *fnic, int); int fnic_flogi_reg_handler(struct fnic *fnic, u32); @@ -390,14 +525,15 @@ const char *fnic_state_to_str(unsigned int state); void fnic_mq_map_queues_cpus(struct Scsi_Host *host); void fnic_log_q_error(struct fnic *fnic); void fnic_handle_link_event(struct fnic *fnic); - +int fnic_stats_debugfs_init(struct fnic *fnic); +void fnic_stats_debugfs_remove(struct fnic *fnic); int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); void fnic_handle_fip_frame(struct work_struct *work); +void fnic_reset_work_handler(struct work_struct *work); void fnic_handle_fip_event(struct fnic *fnic); void fnic_fcoe_reset_vlans(struct fnic *fnic); -void fnic_fcoe_evlist_free(struct fnic *fnic); -extern void fnic_handle_fip_timer(struct fnic *fnic); +extern void fnic_handle_fip_timer(struct timer_list *t); static inline int fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) @@ -406,4 +542,90 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) } void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); +void fnic_free_txq(struct list_head *head); +int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, + char **subsys_desc); +void fnic_fdls_link_status_change(struct fnic *fnic, int linkup); +void fnic_delete_fcp_tports(struct fnic *fnic); +void fnic_flush_tport_event_list(struct fnic *fnic); +int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid); +unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid); +unsigned int fnic_count_all_ioreqs(struct fnic *fnic); +unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq, + struct scsi_device *device); +unsigned int fnic_count_lun_ioreqs(struct fnic *fnic, + struct scsi_device *device); +void fnic_scsi_unload(struct fnic *fnic); +void fnic_scsi_unload_cleanup(struct fnic *fnic); +int fnic_get_debug_info(struct stats_debug_info *info, + struct fnic *fnic); + +struct fnic_scsi_iter_data { + struct fnic *fnic; + void *data1; + void *data2; + bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, + void *data1, void *data2); +}; + +static inline bool +fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data) +{ + struct fnic_scsi_iter_data *iter = iter_data; + + return iter->fn(iter->fnic, sc, iter->data1, iter->data2); +} + +static inline void +fnic_scsi_io_iter(struct fnic *fnic, + bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, + void *data1, void *data2), + void *data1, void *data2) +{ + struct fnic_scsi_iter_data iter_data = { + .fn = fn, + .fnic = fnic, + .data1 = data1, + .data2 = data2, + }; + scsi_host_busy_iter(fnic->host, fnic_io_iter_handler, &iter_data); +} + +#ifdef FNIC_DEBUG +static inline void +fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) +{ + int i; + + for (i = 0; i < len; i = i+8) { + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8, + u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3], + u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]); + } +} + +static inline void +fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, + int len, char *pfx) +{ + uint32_t s_id, d_id; + + s_id = ntoh24(fchdr->fh_s_id); + d_id = ntoh24(fchdr->fh_d_id); + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n", + pfx, s_id, d_id, fchdr->fh_type, + FNIC_STD_GET_OX_ID(fchdr), len); + + fnic_debug_dump(fnic, (uint8_t *)fchdr, len); + +} +#else /* FNIC_DEBUG */ +static inline void +fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {} +static inline void +fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, + uint32_t len, char *pfx) {} +#endif /* FNIC_DEBUG */ #endif /* _FNIC_H_ */ diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c index 0c5e57c7e322..705718f0809b 100644 --- a/drivers/scsi/fnic/fnic_attrs.c +++ b/drivers/scsi/fnic/fnic_attrs.c @@ -11,8 +11,8 @@ static ssize_t fnic_show_state(struct device *dev, struct device_attribute *attr, char *buf) { - struct fc_lport *lp = shost_priv(class_to_shost(dev)); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = + *((struct fnic **) shost_priv(class_to_shost(dev))); return sysfs_emit(buf, "%s\n", fnic_state_str[fnic->state]); } @@ -26,9 +26,13 @@ static ssize_t fnic_show_drv_version(struct device *dev, static ssize_t fnic_show_link_state(struct device *dev, struct device_attribute *attr, char *buf) { - struct fc_lport *lp = shost_priv(class_to_shost(dev)); + struct fnic *fnic = + *((struct fnic **) shost_priv(class_to_shost(dev))); - return sysfs_emit(buf, "%s\n", (lp->link_up) ? "Link Up" : "Link Down"); + return sysfs_emit(buf, "%s\n", + ((fnic->iport.state != FNIC_IPORT_STATE_INIT) && + (fnic->iport.state != FNIC_IPORT_STATE_LINK_WAIT)) ? + "Link Up" : "Link Down"); } static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL); diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index 2619a2d4f5f1..5767862ae42f 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -7,6 +7,9 @@ #include <linux/vmalloc.h> #include "fnic.h" +extern int fnic_get_debug_info(struct stats_debug_info *debug_buffer, + struct fnic *fnic); + static struct dentry *fnic_trace_debugfs_root; static struct dentry *fnic_trace_debugfs_file; static struct dentry *fnic_trace_enable; @@ -593,6 +596,7 @@ static int fnic_stats_debugfs_open(struct inode *inode, debug->buf_size = buf_size; memset((void *)debug->debug_buffer, 0, buf_size); debug->buffer_len = fnic_get_stats_data(debug, fnic_stats); + debug->buffer_len += fnic_get_debug_info(debug, fnic); file->private_data = debug; @@ -673,26 +677,25 @@ static const struct file_operations fnic_reset_debugfs_fops = { * It will create file stats and reset_stats under statistics/host# directory * to log per fnic stats. */ -void fnic_stats_debugfs_init(struct fnic *fnic) +int fnic_stats_debugfs_init(struct fnic *fnic) { char name[16]; - snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); + snprintf(name, sizeof(name), "host%d", fnic->host->host_no); fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, fnic_stats_debugfs_root); - fnic->fnic_stats_debugfs_file = debugfs_create_file("stats", S_IFREG|S_IRUGO|S_IWUSR, fnic->fnic_stats_debugfs_host, fnic, &fnic_stats_debugfs_fops); - fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats", S_IFREG|S_IRUGO|S_IWUSR, fnic->fnic_stats_debugfs_host, fnic, &fnic_reset_debugfs_fops); + return 0; } /* diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index a08293b2ad9f..1e8cd64f9a5c 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -14,701 +14,379 @@ #include <linux/workqueue.h> #include <scsi/fc/fc_fip.h> #include <scsi/fc/fc_els.h> -#include <scsi/fc/fc_fcoe.h> #include <scsi/fc_frame.h> -#include <scsi/libfc.h> +#include <linux/etherdevice.h> +#include <scsi/scsi_transport_fc.h> #include "fnic_io.h" #include "fnic.h" -#include "fnic_fip.h" +#include "fnic_fdls.h" +#include "fdls_fc.h" #include "cq_enet_desc.h" #include "cq_exch_desc.h" +#include "fip.h" + +#define MAX_RESET_WAIT_COUNT 64 -static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; -struct workqueue_struct *fnic_fip_queue; struct workqueue_struct *fnic_event_queue; -static void fnic_set_eth_mode(struct fnic *); -static void fnic_fcoe_send_vlan_req(struct fnic *fnic); -static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); -static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); -static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); -static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); +static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC; -void fnic_handle_link(struct work_struct *work) +/* + * Internal Functions + * This function will initialize the src_mac address to be + * used in outgoing frames + */ +static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic, + uint8_t *src_mac) { - struct fnic *fnic = container_of(work, struct fnic, link_work); - unsigned long flags; - int old_link_status; - u32 old_link_down_cnt; - u64 old_port_speed, new_port_speed; - - spin_lock_irqsave(&fnic->fnic_lock, flags); - - fnic->link_events = 1; /* less work to just set everytime*/ - - if (fnic->stop_rx_link_events) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - - old_link_down_cnt = fnic->link_down_cnt; - old_link_status = fnic->link_status; - old_port_speed = atomic64_read( - &fnic->fnic_stats.misc_stats.current_port_speed); - - fnic->link_status = vnic_dev_link_status(fnic->vdev); - fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); - - new_port_speed = vnic_dev_port_speed(fnic->vdev); - atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed, - new_port_speed); - if (old_port_speed != new_port_speed) - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Current vnic speed set to: %llu\n", - new_port_speed); - - switch (vnic_dev_port_speed(fnic->vdev)) { - case DCEM_PORTSPEED_10G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT; - break; - case DCEM_PORTSPEED_20G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT; - break; - case DCEM_PORTSPEED_25G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT; - break; - case DCEM_PORTSPEED_40G: - case DCEM_PORTSPEED_4x10G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT; - break; - case DCEM_PORTSPEED_100G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT; - break; - default: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN; - fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; - break; - } - - if (old_link_status == fnic->link_status) { - if (!fnic->link_status) { - /* DOWN -> DOWN */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_LE, "Link Status: DOWN->DOWN", - strlen("Link Status: DOWN->DOWN")); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "down->down\n"); - } else { - if (old_link_down_cnt != fnic->link_down_cnt) { - /* UP -> DOWN -> UP */ - fnic->lport->host_stats.link_failure_count++; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_fc_trace_set_data( - fnic->lport->host->host_no, - FNIC_FC_LE, - "Link Status:UP_DOWN_UP", - strlen("Link_Status:UP_DOWN_UP") - ); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "link down\n"); - fcoe_ctlr_link_down(&fnic->ctlr); - if (fnic->config.flags & VFCF_FIP_CAPABLE) { - /* start FCoE VLAN discovery */ - fnic_fc_trace_set_data( - fnic->lport->host->host_no, - FNIC_FC_LE, - "Link Status: UP_DOWN_UP_VLAN", - strlen( - "Link Status: UP_DOWN_UP_VLAN") - ); - fnic_fcoe_send_vlan_req(fnic); - return; - } - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "up->down->up: Link up\n"); - fcoe_ctlr_link_up(&fnic->ctlr); - } else { - /* UP -> UP */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_fc_trace_set_data( - fnic->lport->host->host_no, FNIC_FC_LE, - "Link Status: UP_UP", - strlen("Link Status: UP_UP")); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "up->up\n"); - } - } - } else if (fnic->link_status) { - /* DOWN -> UP */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (fnic->config.flags & VFCF_FIP_CAPABLE) { - /* start FCoE VLAN discovery */ - fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", - strlen("Link Status: DOWN_UP_VLAN")); - fnic_fcoe_send_vlan_req(fnic); - - return; - } - - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "down->up: Link up\n"); - fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, - "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); - fcoe_ctlr_link_up(&fnic->ctlr); - } else { - /* UP -> DOWN */ - fnic->lport->host_stats.link_failure_count++; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "up->down: Link down\n"); - fnic_fc_trace_set_data( - fnic->lport->host->host_no, FNIC_FC_LE, - "Link Status: UP_DOWN", - strlen("Link Status: UP_DOWN")); - if (fnic->config.flags & VFCF_FIP_CAPABLE) { - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "deleting fip-timer during link-down\n"); - del_timer_sync(&fnic->fip_timer); - } - fcoe_ctlr_link_down(&fnic->ctlr); - } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x", + src_mac[0], src_mac[1], src_mac[2], src_mac[3], + src_mac[4], src_mac[5]); + memcpy(fnic->iport.fpma, src_mac, 6); } /* - * This function passes incoming fabric frames to libFC + * This function will initialize the dst_mac address to be + * used in outgoing frames */ -void fnic_handle_frame(struct work_struct *work) +static inline void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic, + uint8_t *dst_mac) { - struct fnic *fnic = container_of(work, struct fnic, frame_work); - struct fc_lport *lp = fnic->lport; - unsigned long flags; - struct sk_buff *skb; - struct fc_frame *fp; - - while ((skb = skb_dequeue(&fnic->frame_queue))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x", + dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3], + dst_mac[4], dst_mac[5]); - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->stop_rx_link_events) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - dev_kfree_skb(skb); - return; - } - fp = (struct fc_frame *)skb; - - /* - * If we're in a transitional state, just re-queue and return. - * The queue will be serviced when we get to a stable state. - */ - if (fnic->state != FNIC_IN_FC_MODE && - fnic->state != FNIC_IN_ETH_MODE) { - skb_queue_head(&fnic->frame_queue, skb); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - - fc_exch_recv(lp, fp); - } + memcpy(fnic->iport.fcfmac, dst_mac, 6); } -void fnic_fcoe_evlist_free(struct fnic *fnic) +void fnic_get_host_port_state(struct Scsi_Host *shost) { - struct fnic_event *fevt = NULL; - struct fnic_event *next = NULL; + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + struct fnic_iport_s *iport = &fnic->iport; unsigned long flags; spin_lock_irqsave(&fnic->fnic_lock, flags); - if (list_empty(&fnic->evlist)) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - - list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { - list_del(&fevt->list); - kfree(fevt); - } + if (!fnic->link_status) + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + else if (iport->state == FNIC_IPORT_STATE_READY) + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + else + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; spin_unlock_irqrestore(&fnic->fnic_lock, flags); } -void fnic_handle_event(struct work_struct *work) +void fnic_fdls_link_status_change(struct fnic *fnic, int linkup) { - struct fnic *fnic = container_of(work, struct fnic, event_work); - struct fnic_event *fevt = NULL; - struct fnic_event *next = NULL; - unsigned long flags; + struct fnic_iport_s *iport = &fnic->iport; - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (list_empty(&fnic->evlist)) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "link up: %d, usefip: %d", linkup, iport->usefip); - list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { - if (fnic->stop_rx_link_events) { - list_del(&fevt->list); - kfree(fevt); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - /* - * If we're in a transitional state, just re-queue and return. - * The queue will be serviced when we get to a stable state. - */ - if (fnic->state != FNIC_IN_FC_MODE && - fnic->state != FNIC_IN_ETH_MODE) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); - list_del(&fevt->list); - switch (fevt->event) { - case FNIC_EVT_START_VLAN_DISC: - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (linkup) { + if (iport->usefip) { + iport->state = FNIC_IPORT_STATE_FIP; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "link up: %d, usefip: %d", linkup, iport->usefip); fnic_fcoe_send_vlan_req(fnic); - spin_lock_irqsave(&fnic->fnic_lock, flags); - break; - case FNIC_EVT_START_FCF_DISC: - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "Start FCF Discovery\n"); - fnic_fcoe_start_fcf_disc(fnic); - break; - default: - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "Unknown event 0x%x\n", fevt->event); - break; + } else { + iport->state = FNIC_IPORT_STATE_FABRIC_DISC; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport->state: %d", iport->state); + fnic_fdls_disc_start(iport); } - kfree(fevt); + } else { + iport->state = FNIC_IPORT_STATE_LINK_WAIT; + if (!is_zero_ether_addr(iport->fpma)) + vnic_dev_del_addr(fnic->vdev, iport->fpma); + fnic_common_fip_cleanup(fnic); + fnic_fdls_link_down(iport); + } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); } -/** - * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected - * @fip: The FCoE controller that received the frame - * @skb: The received FIP frame - * - * Returns non-zero if the frame is rejected with unsupported cmd with - * insufficient resource els explanation. + +/* + * FPMA can be either taken from ethhdr(dst_mac) or flogi resp + * or derive from FC_MAP and FCID combination. While it should be + * same, revisit this if there is any possibility of not-correct. */ -static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, - struct sk_buff *skb) +void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame, + uint8_t *fcid) { - struct fc_lport *lport = fip->lp; - struct fip_header *fiph; - struct fc_frame_header *fh = NULL; - struct fip_desc *desc; - struct fip_encaps *els; - u16 op; - u8 els_op; - u8 sub; - - size_t rlen; - size_t dlen = 0; - - if (skb_linearize(skb)) - return 0; + struct fnic *fnic = iport->fnic; + struct ethhdr *ethhdr = (struct ethhdr *) rx_frame; + uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 }; - if (skb->len < sizeof(*fiph)) - return 0; + memcpy(&fcmac[3], fcid, 3); - fiph = (struct fip_header *)skb->data; - op = ntohs(fiph->fip_op); - sub = fiph->fip_subcode; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x", + ethhdr->h_dest[0], ethhdr->h_dest[1], + ethhdr->h_dest[2], ethhdr->h_dest[3], + ethhdr->h_dest[4], ethhdr->h_dest[5]); - if (op != FIP_OP_LS) - return 0; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x", + fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], + fcmac[5]); - if (sub != FIP_SC_REP) - return 0; - - rlen = ntohs(fiph->fip_dl_len) * 4; - if (rlen + sizeof(*fiph) > skb->len) - return 0; - - desc = (struct fip_desc *)(fiph + 1); - dlen = desc->fip_dlen * FIP_BPW; + fnic_fdls_set_fcoe_srcmac(fnic, fcmac); + fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source); +} - if (desc->fip_dtype == FIP_DT_FLOGI) { +void fnic_fdls_init(struct fnic *fnic, int usefip) +{ + struct fnic_iport_s *iport = &fnic->iport; - if (dlen < sizeof(*els) + sizeof(*fh) + 1) - return 0; + /* Initialize iPort structure */ + iport->state = FNIC_IPORT_STATE_INIT; + iport->fnic = fnic; + iport->usefip = usefip; - els = (struct fip_encaps *)desc; - fh = (struct fc_frame_header *)(els + 1); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x", + iport->hwmac[0], iport->hwmac[1], iport->hwmac[2], + iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]); - if (!fh) - return 0; + INIT_LIST_HEAD(&iport->tport_list); + INIT_LIST_HEAD(&iport->tport_list_pending_del); - /* - * ELS command code, reason and explanation should be = Reject, - * unsupported command and insufficient resource - */ - els_op = *(u8 *)(fh + 1); - if (els_op == ELS_LS_RJT) { - shost_printk(KERN_INFO, lport->host, - "Flogi Request Rejected by Switch\n"); - return 1; - } - shost_printk(KERN_INFO, lport->host, - "Flogi Request Accepted by Switch\n"); - } - return 0; + fnic_fdls_disc_init(iport); } -static void fnic_fcoe_send_vlan_req(struct fnic *fnic) +void fnic_handle_link(struct work_struct *work) { - struct fcoe_ctlr *fip = &fnic->ctlr; - struct fnic_stats *fnic_stats = &fnic->fnic_stats; - struct sk_buff *skb; - char *eth_fr; - struct fip_vlan *vlan; - u64 vlan_tov; + struct fnic *fnic = container_of(work, struct fnic, link_work); + int old_link_status; + u32 old_link_down_cnt; + int max_count = 0; - fnic_fcoe_reset_vlans(fnic); - fnic->set_vlan(fnic, 0); + if (vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Interrupt mode is not MSI\n"); - if (printk_ratelimit()) - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Sending VLAN request...\n"); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); - skb = dev_alloc_skb(sizeof(struct fip_vlan)); - if (!skb) + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Stop link rx events\n"); return; - - eth_fr = (char *)skb->data; - vlan = (struct fip_vlan *)eth_fr; - - memset(vlan, 0, sizeof(*vlan)); - memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN); - memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN); - vlan->eth.h_proto = htons(ETH_P_FIP); - - vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); - vlan->fip.fip_op = htons(FIP_OP_VLAN); - vlan->fip.fip_subcode = FIP_SC_VL_REQ; - vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); - - vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; - vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; - memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); - - vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; - vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; - put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); - atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); - - skb_put(skb, sizeof(*vlan)); - skb->protocol = htons(ETH_P_FIP); - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - fip->send(fip, skb); - - /* set a timer so that we can retry if there no response */ - vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); - mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); -} - -static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) -{ - struct fcoe_ctlr *fip = &fnic->ctlr; - struct fip_header *fiph; - struct fip_desc *desc; - struct fnic_stats *fnic_stats = &fnic->fnic_stats; - u16 vid; - size_t rlen; - size_t dlen; - struct fcoe_vlan *vlan; - u64 sol_time; - unsigned long flags; - - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Received VLAN response...\n"); - - fiph = (struct fip_header *) skb->data; - - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Received VLAN response... OP 0x%x SUB_OP 0x%x\n", - ntohs(fiph->fip_op), fiph->fip_subcode); - - rlen = ntohs(fiph->fip_dl_len) * 4; - fnic_fcoe_reset_vlans(fnic); - spin_lock_irqsave(&fnic->vlans_lock, flags); - desc = (struct fip_desc *)(fiph + 1); - while (rlen > 0) { - dlen = desc->fip_dlen * FIP_BPW; - switch (desc->fip_dtype) { - case FIP_DT_VLAN: - vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); - shost_printk(KERN_INFO, fnic->lport->host, - "process_vlan_resp: FIP VLAN %d\n", vid); - vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); - if (!vlan) { - /* retry from timer */ - spin_unlock_irqrestore(&fnic->vlans_lock, - flags); - goto out; - } - vlan->vid = vid & 0x0fff; - vlan->state = FIP_VLAN_AVAIL; - list_add_tail(&vlan->list, &fnic->vlans); - break; - } - desc = (struct fip_desc *)((char *)desc + dlen); - rlen -= dlen; } - /* any VLAN descriptors present ? */ - if (list_empty(&fnic->vlans)) { - /* retry from timer */ - atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "No VLAN descriptors in FIP VLAN response\n"); - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - goto out; + /* Do not process if the fnic is already in transitional state */ + if ((fnic->state != FNIC_IN_ETH_MODE) + && (fnic->state != FNIC_IN_FC_MODE)) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic in transitional state: %d. link up: %d ignored", + fnic->state, vnic_dev_link_status(fnic->vdev)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Current link status: %d iport state: %d\n", + fnic->link_status, fnic->iport.state); + return; } - vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); - fnic->set_vlan(fnic, vlan->vid); - vlan->state = FIP_VLAN_SENT; /* sent now */ - vlan->sol_count++; - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - - /* start the solicitation */ - fcoe_ctlr_link_up(fip); - - sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); - mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); -out: - return; -} - -static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) -{ - unsigned long flags; - struct fcoe_vlan *vlan; - u64 sol_time; - - spin_lock_irqsave(&fnic->vlans_lock, flags); - vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); - fnic->set_vlan(fnic, vlan->vid); - vlan->state = FIP_VLAN_SENT; /* sent now */ - vlan->sol_count = 1; - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - - /* start the solicitation */ - fcoe_ctlr_link_up(&fnic->ctlr); - - sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); - mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); -} - -static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) -{ - unsigned long flags; - struct fcoe_vlan *fvlan; + old_link_down_cnt = fnic->link_down_cnt; + old_link_status = fnic->link_status; + fnic->link_status = vnic_dev_link_status(fnic->vdev); + fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); - spin_lock_irqsave(&fnic->vlans_lock, flags); - if (list_empty(&fnic->vlans)) { - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - return -EINVAL; + while (fnic->reset_in_progress == IN_PROGRESS) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic reset in progress. Link event needs to wait\n"); + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "waiting for reset completion\n"); + wait_for_completion_timeout(&fnic->reset_completion_wait, + msecs_to_jiffies(5000)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "woken up from reset completion wait\n"); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + max_count++; + if (max_count >= MAX_RESET_WAIT_COUNT) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Rstth waited for too long. Skipping handle link event\n"); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + return; + } } - - fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); - if (fvlan->state == FIP_VLAN_USED) { - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - return 0; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Marking fnic reset in progress\n"); + fnic->reset_in_progress = IN_PROGRESS; + + if ((vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) || + (fnic->link_status != old_link_status)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "old link status: %d link status: %d\n", + old_link_status, (int) fnic->link_status); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "old down count %d down count: %d\n", + old_link_down_cnt, (int) fnic->link_down_cnt); } - if (fvlan->state == FIP_VLAN_SENT) { - fvlan->state = FIP_VLAN_USED; - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - return 0; + if (old_link_status == fnic->link_status) { + if (!fnic->link_status) { + /* DOWN -> DOWN */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "down->down\n"); + } else { + if (old_link_down_cnt != fnic->link_down_cnt) { + /* UP -> DOWN -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "up->down. Link down\n"); + fnic_fdls_link_status_change(fnic, 0); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "down->up. Link up\n"); + fnic_fdls_link_status_change(fnic, 1); + } else { + /* UP -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "up->up\n"); + } + } + } else if (fnic->link_status) { + /* DOWN -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "down->up. Link up\n"); + fnic_fdls_link_status_change(fnic, 1); + } else { + /* UP -> DOWN */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "up->down. Link down\n"); + fnic_fdls_link_status_change(fnic, 0); } - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - return -EINVAL; -} -static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) -{ - struct fnic_event *fevt; - unsigned long flags; - - fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC); - if (!fevt) - return; + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + fnic->reset_in_progress = NOT_IN_PROGRESS; + complete(&fnic->reset_completion_wait); - fevt->fnic = fnic; - fevt->event = ev; - - spin_lock_irqsave(&fnic->fnic_lock, flags); - list_add_tail(&fevt->list, &fnic->evlist); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - - schedule_work(&fnic->event_work); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Marking fnic reset completion\n"); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); } -static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) +void fnic_handle_frame(struct work_struct *work) { - struct fip_header *fiph; - int ret = 1; - u16 op; - u8 sub; + struct fnic *fnic = container_of(work, struct fnic, frame_work); + struct fnic_frame_list *cur_frame, *next; + int fchdr_offset = 0; - if (!skb || !(skb->data)) - return -1; + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) { + if (fnic->stop_rx_link_events) { + list_del(&cur_frame->links); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + kfree(cur_frame->fp); + mempool_free(cur_frame, fnic->frame_elem_pool); + return; + } - if (skb_linearize(skb)) - goto drop; + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Cannot process frame in transitional state\n"); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + return; + } - fiph = (struct fip_header *)skb->data; - op = ntohs(fiph->fip_op); - sub = fiph->fip_subcode; + list_del(&cur_frame->links); - if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) - goto drop; + /* Frames from FCP_RQ will have ethhdrs stripped off */ + fchdr_offset = (cur_frame->rx_ethhdr_stripped) ? + 0 : FNIC_ETH_FCOE_HDRS_OFFSET; - if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) - goto drop; + fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp, + cur_frame->frame_len, fchdr_offset); - if (op == FIP_OP_DISC && sub == FIP_SC_ADV) { - if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) - goto drop; - /* pass it on to fcoe */ - ret = 1; - } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { - /* set the vlan as used */ - fnic_fcoe_process_vlan_resp(fnic, skb); - ret = 0; - } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { - /* received CVL request, restart vlan disc */ - fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); - /* pass it on to fcoe */ - ret = 1; + kfree(cur_frame->fp); + mempool_free(cur_frame, fnic->frame_elem_pool); } -drop: - return ret; + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); } void fnic_handle_fip_frame(struct work_struct *work) { + struct fnic_frame_list *cur_frame, *next; struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); - struct fnic_stats *fnic_stats = &fnic->fnic_stats; - unsigned long flags; - struct sk_buff *skb; - struct ethhdr *eh; - while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { - spin_lock_irqsave(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Processing FIP frame\n"); + + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + list_for_each_entry_safe(cur_frame, next, &fnic->fip_frame_queue, + links) { if (fnic->stop_rx_link_events) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - dev_kfree_skb(skb); + list_del(&cur_frame->links); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + kfree(cur_frame->fp); + kfree(cur_frame); return; } + /* * If we're in a transitional state, just re-queue and return. * The queue will be serviced when we get to a stable state. */ if (fnic->state != FNIC_IN_FC_MODE && - fnic->state != FNIC_IN_ETH_MODE) { - skb_queue_head(&fnic->fip_frame_queue, skb); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic->state != FNIC_IN_ETH_MODE) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); return; } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - eh = (struct ethhdr *)skb->data; - if (eh->h_proto == htons(ETH_P_FIP)) { - skb_pull(skb, sizeof(*eh)); - if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { - dev_kfree_skb(skb); - continue; - } - /* - * If there's FLOGI rejects - clear all - * fcf's & restart from scratch - */ - if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { - atomic64_inc( - &fnic_stats->vlan_stats.flogi_rejects); - shost_printk(KERN_INFO, fnic->lport->host, - "Trigger a Link down - VLAN Disc\n"); - fcoe_ctlr_link_down(&fnic->ctlr); - /* start FCoE VLAN discovery */ - fnic_fcoe_send_vlan_req(fnic); - dev_kfree_skb(skb); - continue; - } - fcoe_ctlr_recv(&fnic->ctlr, skb); - continue; + + list_del(&cur_frame->links); + + if (fdls_fip_recv_frame(fnic, cur_frame->fp)) { + kfree(cur_frame->fp); + kfree(cur_frame); } } + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); } /** * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. * @fnic: fnic instance. - * @skb: Ethernet Frame. + * @fp: Ethernet Frame. */ -static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) +static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, void *fp) { - struct fc_frame *fp; struct ethhdr *eh; - struct fcoe_hdr *fcoe_hdr; - struct fcoe_crc_eof *ft; + struct fnic_frame_list *fip_fr_elem; + unsigned long flags; - /* - * Undo VLAN encapsulation if present. - */ - eh = (struct ethhdr *)skb->data; - if (eh->h_proto == htons(ETH_P_8021Q)) { - memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); - eh = skb_pull(skb, VLAN_HLEN); - skb_reset_mac_header(skb); - } - if (eh->h_proto == htons(ETH_P_FIP)) { - if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { - printk(KERN_ERR "Dropped FIP frame, as firmware " - "uses non-FIP mode, Enable FIP " - "using UCSM\n"); - goto drop; - } - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); - } - skb_queue_tail(&fnic->fip_frame_queue, skb); + eh = (struct ethhdr *) fp; + if ((eh->h_proto == cpu_to_be16(ETH_P_FIP)) && (fnic->iport.usefip)) { + fip_fr_elem = (struct fnic_frame_list *) + kzalloc(sizeof(struct fnic_frame_list), GFP_ATOMIC); + if (!fip_fr_elem) + return 0; + fip_fr_elem->fp = fp; + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&fip_fr_elem->links, &fnic->fip_frame_queue); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); queue_work(fnic_fip_queue, &fnic->fip_frame_work); - return 1; /* let caller know packet was used */ - } - if (eh->h_proto != htons(ETH_P_FCOE)) - goto drop; - skb_set_network_header(skb, sizeof(*eh)); - skb_pull(skb, sizeof(*eh)); - - fcoe_hdr = (struct fcoe_hdr *)skb->data; - if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) - goto drop; - - fp = (struct fc_frame *)skb; - fc_frame_init(fp); - fr_sof(fp) = fcoe_hdr->fcoe_sof; - skb_pull(skb, sizeof(struct fcoe_hdr)); - skb_reset_transport_header(skb); - - ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft)); - fr_eof(fp) = ft->fcoe_eof; - skb_trim(skb, skb->len - sizeof(*ft)); - return 0; -drop: - dev_kfree_skb_irq(skb); - return -1; + return 1; /* let caller know packet was used */ + } else + return 0; } /** @@ -720,206 +398,147 @@ drop: */ void fnic_update_mac_locked(struct fnic *fnic, u8 *new) { - u8 *ctl = fnic->ctlr.ctl_src_addr; + struct fnic_iport_s *iport = &fnic->iport; + u8 *ctl = iport->hwmac; u8 *data = fnic->data_src_addr; if (is_zero_ether_addr(new)) new = ctl; if (ether_addr_equal(data, new)) return; - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "update_mac %pM\n", new); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Update MAC: %u\n", *new); + if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl)) vnic_dev_del_addr(fnic->vdev, data); + memcpy(data, new, ETH_ALEN); if (!ether_addr_equal(new, ctl)) vnic_dev_add_addr(fnic->vdev, new); } -/** - * fnic_update_mac() - set data MAC address and filters. - * @lport: local port. - * @new: newly-assigned FCoE MAC address. - */ -void fnic_update_mac(struct fc_lport *lport, u8 *new) -{ - struct fnic *fnic = lport_priv(lport); - - spin_lock_irq(&fnic->fnic_lock); - fnic_update_mac_locked(fnic, new); - spin_unlock_irq(&fnic->fnic_lock); -} - -/** - * fnic_set_port_id() - set the port_ID after successful FLOGI. - * @lport: local port. - * @port_id: assigned FC_ID. - * @fp: received frame containing the FLOGI accept or NULL. - * - * This is called from libfc when a new FC_ID has been assigned. - * This causes us to reset the firmware to FC_MODE and setup the new MAC - * address and FC_ID. - * - * It is also called with FC_ID 0 when we're logged off. - * - * If the FC_ID is due to point-to-point, fp may be NULL. - */ -void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) -{ - struct fnic *fnic = lport_priv(lport); - u8 *mac; - int ret; - - FNIC_FCS_DBG(KERN_DEBUG, lport->host, fnic->fnic_num, - "set port_id 0x%x fp 0x%p\n", - port_id, fp); - - /* - * If we're clearing the FC_ID, change to use the ctl_src_addr. - * Set ethernet mode to send FLOGI. - */ - if (!port_id) { - fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); - fnic_set_eth_mode(fnic); - return; - } - - if (fp) { - mac = fr_cb(fp)->granted_mac; - if (is_zero_ether_addr(mac)) { - /* non-FIP - FLOGI already accepted - ignore return */ - fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); - } - fnic_update_mac(lport, mac); - } - - /* Change state to reflect transition to FC mode */ - spin_lock_irq(&fnic->fnic_lock); - if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) - fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; - else { - FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, - "Unexpected fnic state: %s processing FLOGI response", - fnic_state_to_str(fnic->state)); - spin_unlock_irq(&fnic->fnic_lock); - return; - } - spin_unlock_irq(&fnic->fnic_lock); - - /* - * Send FLOGI registration to firmware to set up FC mode. - * The new address will be set up when registration completes. - */ - ret = fnic_flogi_reg_handler(fnic, port_id); - - if (ret < 0) { - spin_lock_irq(&fnic->fnic_lock); - if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) - fnic->state = FNIC_IN_ETH_MODE; - spin_unlock_irq(&fnic->fnic_lock); - } -} - static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped __attribute__((unused)), void *opaque) { struct fnic *fnic = vnic_dev_priv(rq->vdev); - struct sk_buff *skb; - struct fc_frame *fp; + uint8_t *fp; struct fnic_stats *fnic_stats = &fnic->fnic_stats; + unsigned int ethhdr_stripped; u8 type, color, eop, sop, ingress_port, vlan_stripped; - u8 fcoe = 0, fcoe_sof, fcoe_eof; - u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0; - u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; - u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc; + u8 fcoe_fnic_crc_ok = 1, fcoe_enc_error = 0; u8 fcs_ok = 1, packet_error = 0; - u16 q_number, completed_index, bytes_written = 0, vlan, checksum; + u16 q_number, completed_index, vlan; u32 rss_hash; + u16 checksum; + u8 csum_not_calc, rss_type, ipv4, ipv6, ipv4_fragment; + u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; + u8 fcoe = 0, fcoe_sof, fcoe_eof; u16 exchange_id, tmpl; u8 sof = 0; u8 eof = 0; u32 fcp_bytes_written = 0; + u16 enet_bytes_written = 0; + u32 bytes_written = 0; unsigned long flags; + struct fnic_frame_list *frame_elem = NULL; + struct ethhdr *eh; dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, - DMA_FROM_DEVICE); - skb = buf->os_buf; - fp = (struct fc_frame *)skb; + DMA_FROM_DEVICE); + fp = (uint8_t *) buf->os_buf; buf->os_buf = NULL; cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); if (type == CQ_DESC_TYPE_RQ_FCP) { - cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, - &type, &color, &q_number, &completed_index, - &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, - &tmpl, &fcp_bytes_written, &sof, &eof, - &ingress_port, &packet_error, - &fcoe_enc_error, &fcs_ok, &vlan_stripped, - &vlan); - skb_trim(skb, fcp_bytes_written); - fr_sof(fp) = sof; - fr_eof(fp) = eof; - + cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *) cq_desc, &type, + &color, &q_number, &completed_index, &eop, &sop, + &fcoe_fnic_crc_ok, &exchange_id, &tmpl, + &fcp_bytes_written, &sof, &eof, &ingress_port, + &packet_error, &fcoe_enc_error, &fcs_ok, + &vlan_stripped, &vlan); + ethhdr_stripped = 1; + bytes_written = fcp_bytes_written; } else if (type == CQ_DESC_TYPE_RQ_ENET) { - cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, - &type, &color, &q_number, &completed_index, - &ingress_port, &fcoe, &eop, &sop, - &rss_type, &csum_not_calc, &rss_hash, - &bytes_written, &packet_error, - &vlan_stripped, &vlan, &checksum, - &fcoe_sof, &fcoe_fc_crc_ok, - &fcoe_enc_error, &fcoe_eof, - &tcp_udp_csum_ok, &udp, &tcp, - &ipv4_csum_ok, &ipv6, &ipv4, - &ipv4_fragment, &fcs_ok); - skb_trim(skb, bytes_written); + cq_enet_rq_desc_dec((struct cq_enet_rq_desc *) cq_desc, &type, + &color, &q_number, &completed_index, + &ingress_port, &fcoe, &eop, &sop, &rss_type, + &csum_not_calc, &rss_hash, &enet_bytes_written, + &packet_error, &vlan_stripped, &vlan, + &checksum, &fcoe_sof, &fcoe_fnic_crc_ok, + &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, + &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, + &ipv4_fragment, &fcs_ok); + + ethhdr_stripped = 0; + bytes_written = enet_bytes_written; + if (!fcs_ok) { atomic64_inc(&fnic_stats->misc_stats.frame_errors); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fcs error. dropping packet.\n"); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p fcs error. Dropping packet.\n", fnic); goto drop; } - if (fnic_import_rq_eth_pkt(fnic, skb)) - return; + eh = (struct ethhdr *) fp; + if (eh->h_proto != cpu_to_be16(ETH_P_FCOE)) { + if (fnic_import_rq_eth_pkt(fnic, fp)) + return; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping h_proto 0x%x", + be16_to_cpu(eh->h_proto)); + goto drop; + } } else { - /* wrong CQ type*/ - shost_printk(KERN_ERR, fnic->lport->host, - "fnic rq_cmpl wrong cq type x%x\n", type); + /* wrong CQ type */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic rq_cmpl wrong cq type x%x\n", type); goto drop; } - if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { + if (!fcs_ok || packet_error || !fcoe_fnic_crc_ok || fcoe_enc_error) { atomic64_inc(&fnic_stats->misc_stats.frame_errors); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fnic rq_cmpl fcoe x%x fcsok x%x" - " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" - " x%x\n", - fcoe, fcs_ok, packet_error, - fcoe_fc_crc_ok, fcoe_enc_error); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fcoe %x fcsok %x pkterr %x ffco %x fee %x\n", + fcoe, fcs_ok, packet_error, + fcoe_fnic_crc_ok, fcoe_enc_error); goto drop; } spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->stop_rx_link_events) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic->stop_rx_link_events: %d\n", + fnic->stop_rx_link_events); goto drop; } - fr_dev(fp) = fnic->lport; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, - (char *)skb->data, skb->len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); + + frame_elem = mempool_alloc(fnic->frame_elem_pool, + GFP_ATOMIC | __GFP_ZERO); + if (!frame_elem) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate memory for frame elem"); + goto drop; } + frame_elem->fp = fp; + frame_elem->rx_ethhdr_stripped = ethhdr_stripped; + frame_elem->frame_len = bytes_written; - skb_queue_tail(&fnic->frame_queue, skb); - queue_work(fnic_event_queue, &fnic->frame_work); + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&frame_elem->links, &fnic->frame_queue); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + queue_work(fnic_event_queue, &fnic->frame_work); return; + drop: - dev_kfree_skb_irq(skb); + kfree(fp); } static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev, @@ -945,10 +564,10 @@ int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, fnic_rq_cmpl_handler_cont, NULL); - if (cur_work_done) { + if (cur_work_done && fnic->stop_rx_link_events != 1) { err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); if (err) - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "fnic_alloc_rq_frame can't alloc" " frame\n"); } @@ -966,218 +585,179 @@ int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) int fnic_alloc_rq_frame(struct vnic_rq *rq) { struct fnic *fnic = vnic_dev_priv(rq->vdev); - struct sk_buff *skb; + void *buf; u16 len; dma_addr_t pa; - int r; + int ret; - len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM; - skb = dev_alloc_skb(len); - if (!skb) { - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "Unable to allocate RQ sk_buff\n"); + len = FNIC_FRAME_HT_ROOM; + buf = kmalloc(len, GFP_ATOMIC); + if (!buf) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unable to allocate RQ buffer of size: %d\n", len); return -ENOMEM; } - skb_reset_mac_header(skb); - skb_reset_transport_header(skb); - skb_reset_network_header(skb); - skb_put(skb, len); - pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE); + + pa = dma_map_single(&fnic->pdev->dev, buf, len, DMA_FROM_DEVICE); if (dma_mapping_error(&fnic->pdev->dev, pa)) { - r = -ENOMEM; - printk(KERN_ERR "PCI mapping failed with error %d\n", r); - goto free_skb; + ret = -ENOMEM; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PCI mapping failed with error %d\n", ret); + goto free_buf; } - fnic_queue_rq_desc(rq, skb, pa, len); + fnic_queue_rq_desc(rq, buf, pa, len); return 0; - -free_skb: - kfree_skb(skb); - return r; +free_buf: + kfree(buf); + return ret; } void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) { - struct fc_frame *fp = buf->os_buf; + void *rq_buf = buf->os_buf; struct fnic *fnic = vnic_dev_priv(rq->vdev); dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, DMA_FROM_DEVICE); - dev_kfree_skb(fp_skb(fp)); + kfree(rq_buf); buf->os_buf = NULL; } -/** - * fnic_eth_send() - Send Ethernet frame. - * @fip: fcoe_ctlr instance. - * @skb: Ethernet Frame, FIP, without VLAN encapsulation. - */ -void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) -{ - struct fnic *fnic = fnic_from_ctlr(fip); - struct vnic_wq *wq = &fnic->wq[0]; - dma_addr_t pa; - struct ethhdr *eth_hdr; - struct vlan_ethhdr *vlan_hdr; - unsigned long flags; - - if (!fnic->vlan_hw_insert) { - eth_hdr = (struct ethhdr *)skb_mac_header(skb); - vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr)); - memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); - vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); - vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; - vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); - } - } else { - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); - } - } - - pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(&fnic->pdev->dev, pa)) { - printk(KERN_ERR "DMA mapping failed\n"); - goto free_skb; - } - - spin_lock_irqsave(&fnic->wq_lock[0], flags); - if (!vnic_wq_desc_avail(wq)) - goto irq_restore; - - fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, - 0 /* hw inserts cos value */, - fnic->vlan_id, 1); - spin_unlock_irqrestore(&fnic->wq_lock[0], flags); - return; - -irq_restore: - spin_unlock_irqrestore(&fnic->wq_lock[0], flags); - dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE); -free_skb: - kfree_skb(skb); -} - /* * Send FC frame. */ -static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) +static int fnic_send_frame(struct fnic *fnic, void *frame, int frame_len) { struct vnic_wq *wq = &fnic->wq[0]; - struct sk_buff *skb; dma_addr_t pa; - struct ethhdr *eth_hdr; - struct vlan_ethhdr *vlan_hdr; - struct fcoe_hdr *fcoe_hdr; - struct fc_frame_header *fh; - u32 tot_len, eth_hdr_len; int ret = 0; unsigned long flags; - fh = fc_frame_header_get(fp); - skb = fp_skb(fp); + pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE); - if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && - fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) - return 0; - - if (!fnic->vlan_hw_insert) { - eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); - vlan_hdr = skb_push(skb, eth_hdr_len); - eth_hdr = (struct ethhdr *)vlan_hdr; - vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); - vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE); - vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); - fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1); - } else { - eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr); - eth_hdr = skb_push(skb, eth_hdr_len); - eth_hdr->h_proto = htons(ETH_P_FCOE); - fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); - } - - if (fnic->ctlr.map_dest) - fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); - else - memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); - memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); - - tot_len = skb->len; - BUG_ON(tot_len % 4); - - memset(fcoe_hdr, 0, sizeof(*fcoe_hdr)); - fcoe_hdr->fcoe_sof = fr_sof(fp); - if (FC_FCOE_VER) - FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); - - pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE); - if (dma_mapping_error(&fnic->pdev->dev, pa)) { - ret = -ENOMEM; - printk(KERN_ERR "DMA map failed with error %d\n", ret); - goto free_skb_on_err; - } - - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, - (char *)eth_hdr, tot_len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); + if ((fnic_fc_trace_set_data(fnic->fnic_num, + FNIC_FC_SEND | 0x80, (char *) frame, + frame_len)) != 0) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic ctlr frame trace error"); } spin_lock_irqsave(&fnic->wq_lock[0], flags); if (!vnic_wq_desc_avail(wq)) { - dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE); + dma_unmap_single(&fnic->pdev->dev, pa, frame_len, DMA_TO_DEVICE); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "vnic work queue descriptor is not available"); ret = -1; - goto irq_restore; + goto fnic_send_frame_end; } - fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), - 0 /* hw inserts cos value */, - fnic->vlan_id, 1, 1, 1); + /* hw inserts cos value */ + fnic_queue_wq_desc(wq, frame, pa, frame_len, FC_EOF_T, + 0, fnic->vlan_id, 1, 1, 1); -irq_restore: +fnic_send_frame_end: spin_unlock_irqrestore(&fnic->wq_lock[0], flags); - -free_skb_on_err: - if (ret) - dev_kfree_skb_any(fp_skb(fp)); - return ret; } -/* - * fnic_send - * Routine to send a raw frame +/** + * fdls_send_fcoe_frame - send a filled-in FC frame, filling in eth and FCoE + * info. This interface is used only in the non fast path. (login, fabric + * registrations etc.) + * + * @fnic: fnic instance + * @frame: frame structure with FC payload filled in + * @frame_size: length of the frame to be sent + * @srcmac: source mac address + * @dstmac: destination mac address + * + * Called with the fnic lock held. */ -int fnic_send(struct fc_lport *lp, struct fc_frame *fp) +static int +fdls_send_fcoe_frame(struct fnic *fnic, void *frame, int frame_size, + uint8_t *srcmac, uint8_t *dstmac) { - struct fnic *fnic = lport_priv(lp); - unsigned long flags; + struct ethhdr *pethhdr; + struct fcoe_hdr *pfcoe_hdr; + struct fnic_frame_list *frame_elem; + int len = frame_size; + int ret; + struct fc_frame_header *fchdr = (struct fc_frame_header *) (frame + + FNIC_ETH_FCOE_HDRS_OFFSET); - if (fnic->in_remove) { - dev_kfree_skb(fp_skb(fp)); - return -1; - } + pethhdr = (struct ethhdr *) frame; + pethhdr->h_proto = cpu_to_be16(ETH_P_FCOE); + memcpy(pethhdr->h_source, srcmac, ETH_ALEN); + memcpy(pethhdr->h_dest, dstmac, ETH_ALEN); + + pfcoe_hdr = (struct fcoe_hdr *) (frame + sizeof(struct ethhdr)); + pfcoe_hdr->fcoe_sof = FC_SOF_I3; /* * Queue frame if in a transitional state. * This occurs while registering the Port_ID / MAC address after FLOGI. */ - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { - skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if ((fnic->state != FNIC_IN_FC_MODE) + && (fnic->state != FNIC_IN_ETH_MODE)) { + frame_elem = mempool_alloc(fnic->frame_elem_pool, + GFP_ATOMIC | __GFP_ZERO); + if (!frame_elem) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate memory for frame elem"); + return -ENOMEM; + } + + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n", + ntoh24(fchdr->fh_s_id), ntoh24(fchdr->fh_d_id), + fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr)); + frame_elem->fp = frame; + frame_elem->frame_len = len; + list_add_tail(&frame_elem->links, &fnic->tx_queue); return 0; } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return fnic_send_frame(fnic, fp); + fnic_debug_dump_fc_frame(fnic, fchdr, frame_size, "Outgoing"); + + ret = fnic_send_frame(fnic, frame, len); + return ret; +} + +void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame, + int frame_size) +{ + struct fnic *fnic = iport->fnic; + uint8_t *dstmac, *srcmac; + + /* If module unload is in-progress, don't send */ + if (fnic->in_remove) + return; + + if (iport->fabric.flags & FNIC_FDLS_FPMA_LEARNT) { + srcmac = iport->fpma; + dstmac = iport->fcfmac; + } else { + srcmac = iport->hwmac; + dstmac = FCOE_ALL_FCF_MAC; + } + + fdls_send_fcoe_frame(fnic, frame, frame_size, srcmac, dstmac); +} + +int +fnic_send_fip_frame(struct fnic_iport_s *iport, void *frame, + int frame_size) +{ + struct fnic *fnic = iport->fnic; + + if (fnic->in_remove) + return -1; + + fnic_debug_dump_fip_frame(fnic, frame, frame_size, "Outgoing"); + return fnic_send_frame(fnic, frame, frame_size); } /** @@ -1193,64 +773,87 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp) void fnic_flush_tx(struct work_struct *work) { struct fnic *fnic = container_of(work, struct fnic, flush_work); - struct sk_buff *skb; struct fc_frame *fp; + struct fnic_frame_list *cur_frame, *next; - while ((skb = skb_dequeue(&fnic->tx_queue))) { - fp = (struct fc_frame *)skb; - fnic_send_frame(fnic, fp); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Flush queued frames"); + + list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) { + fp = cur_frame->fp; + list_del(&cur_frame->links); + fnic_send_frame(fnic, fp, cur_frame->frame_len); + mempool_free(cur_frame, fnic->frame_elem_pool); } } -/** - * fnic_set_eth_mode() - put fnic into ethernet mode. - * @fnic: fnic device - * - * Called without fnic lock held. - */ -static void fnic_set_eth_mode(struct fnic *fnic) +int +fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id, + void *fp) { - unsigned long flags; - enum fnic_state old_state; + struct fnic *fnic = iport->fnic; + struct ethhdr *ethhdr; int ret; - spin_lock_irqsave(&fnic->fnic_lock, flags); -again: - old_state = fnic->state; - switch (old_state) { - case FNIC_IN_FC_MODE: - case FNIC_IN_ETH_TRANS_FC_MODE: - default: - fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Setting port id: 0x%x fp: 0x%p fnic state: %d", port_id, + fp, fnic->state); - ret = fnic_fw_reset_handler(fnic); + if (fp) { + ethhdr = (struct ethhdr *) fp; + vnic_dev_add_addr(fnic->vdev, ethhdr->h_dest); + } - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) - goto again; - if (ret) - fnic->state = old_state; - break; - - case FNIC_IN_FC_TRANS_ETH_MODE: - case FNIC_IN_ETH_MODE: - break; + /* Change state to reflect transition to FC mode */ + if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) + fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; + else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unexpected fnic state while processing FLOGI response\n"); + return -1; + } + + /* + * Send FLOGI registration to firmware to set up FC mode. + * The new address will be set up when registration completes. + */ + ret = fnic_flogi_reg_handler(fnic, port_id); + if (ret < 0) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI registration error ret: %d fnic state: %d\n", + ret, fnic->state); + if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) + fnic->state = FNIC_IN_ETH_MODE; + + return -1; + } + iport->fabric.flags |= FNIC_FDLS_FPMA_LEARNT; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI registration success\n"); + return 0; +} + +void fnic_free_txq(struct list_head *head) +{ + struct fnic_frame_list *cur_frame, *next; + + list_for_each_entry_safe(cur_frame, next, head, links) { + list_del(&cur_frame->links); + kfree(cur_frame->fp); + kfree(cur_frame); } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); } static void fnic_wq_complete_frame_send(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) { - struct sk_buff *skb = buf->os_buf; - struct fc_frame *fp = (struct fc_frame *)skb; struct fnic *fnic = vnic_dev_priv(wq->vdev); dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(fp_skb(fp)); + mempool_free(buf->os_buf, fnic->frame_pool); buf->os_buf = NULL; } @@ -1288,119 +891,218 @@ int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do) void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) { - struct fc_frame *fp = buf->os_buf; struct fnic *fnic = vnic_dev_priv(wq->vdev); dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, DMA_TO_DEVICE); - dev_kfree_skb(fp_skb(fp)); + kfree(buf->os_buf); buf->os_buf = NULL; } -void fnic_fcoe_reset_vlans(struct fnic *fnic) +void +fnic_fdls_add_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport, + unsigned long flags) +{ + struct fnic *fnic = iport->fnic; + struct fc_rport *rport; + struct fc_rport_identifiers ids; + struct rport_dd_data_s *rdd_data; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Adding rport fcid: 0x%x", tport->fcid); + + ids.node_name = tport->wwnn; + ids.port_name = tport->wwpn; + ids.port_id = tport->fcid; + ids.roles = FC_RPORT_ROLE_FCP_TARGET; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + rport = fc_remote_port_add(fnic->host, 0, &ids); + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (!rport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to add rport for tport: 0x%x", tport->fcid); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Added rport fcid: 0x%x", tport->fcid); + + /* Mimic these assignments in queuecommand to avoid timing issues */ + rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN; + rport->supported_classes = FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET; + rdd_data = rport->dd_data; + rdd_data->tport = tport; + rdd_data->iport = iport; + tport->rport = rport; + tport->flags |= FNIC_FDLS_SCSI_REGISTERED; +} + +void +fnic_fdls_remove_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, unsigned long flags) +{ + struct fnic *fnic = iport->fnic; + struct rport_dd_data_s *rdd_data; + + struct fc_rport *rport; + + if (!tport) + return; + + fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINE); + rport = tport->rport; + + if (rport) { + /* tport resource release will be done + * after fnic_terminate_rport_io() + */ + tport->flags |= FNIC_FDLS_TPORT_DELETED; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + /* Interface to scsi_fc_transport */ + fc_remote_port_delete(rport); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Deregistered and freed tport fcid: 0x%x from scsi transport fc", + tport->fcid); + + /* + * the dd_data is allocated by fc transport + * of size dd_fcrport_size + */ + rdd_data = rport->dd_data; + rdd_data->tport = NULL; + rdd_data->iport = NULL; + list_del(&tport->links); + kfree(tport); + } else { + fnic_del_tport_timer_sync(fnic, tport); + list_del(&tport->links); + kfree(tport); + } +} + +void fnic_delete_fcp_tports(struct fnic *fnic) { + struct fnic_tport_s *tport, *next; unsigned long flags; - struct fcoe_vlan *vlan; - struct fcoe_vlan *next; - /* - * indicate a link down to fcoe so that all fcf's are free'd - * might not be required since we did this before sending vlan - * discovery request - */ - spin_lock_irqsave(&fnic->vlans_lock, flags); - if (!list_empty(&fnic->vlans)) { - list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { - list_del(&vlan->list); - kfree(vlan); - } + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_for_each_entry_safe(tport, next, &fnic->iport.tport_list, links) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "removing fcp rport fcid: 0x%x", tport->fcid); + fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING); + fnic_del_tport_timer_sync(fnic, tport); + fnic_fdls_remove_tport(&fnic->iport, tport, flags); } - spin_unlock_irqrestore(&fnic->vlans_lock, flags); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); } -void fnic_handle_fip_timer(struct fnic *fnic) +/** + * fnic_tport_event_handler() - Handler for remote port events + * in the tport_event_queue. + * + * @work: Handle to the remote port being dequeued + */ +void fnic_tport_event_handler(struct work_struct *work) { + struct fnic *fnic = container_of(work, struct fnic, tport_work); + struct fnic_tport_event_s *cur_evt, *next; unsigned long flags; - struct fcoe_vlan *vlan; - struct fnic_stats *fnic_stats = &fnic->fnic_stats; - u64 sol_time; + struct fnic_tport_s *tport; spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->stop_rx_link_events) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; + list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) { + tport = cur_evt->arg1; + switch (cur_evt->event) { + case TGT_EV_RPORT_ADD: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Add rport event"); + if (tport->state == FDLS_TGT_STATE_READY) { + fnic_fdls_add_tport(&fnic->iport, + (struct fnic_tport_s *) cur_evt->arg1, flags); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Target not ready. Add rport event dropped: 0x%x", + tport->fcid); + } + break; + case TGT_EV_RPORT_DEL: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Remove rport event"); + if (tport->state == FDLS_TGT_STATE_OFFLINING) { + fnic_fdls_remove_tport(&fnic->iport, + (struct fnic_tport_s *) cur_evt->arg1, flags); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "remove rport event dropped tport fcid: 0x%x", + tport->fcid); + } + break; + case TGT_EV_TPORT_DELETE: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Delete tport event"); + fdls_delete_tport(tport->iport, tport); + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unknown tport event"); + break; + } + list_del(&cur_evt->links); + kfree(cur_evt); } spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} - if (fnic->ctlr.mode == FIP_MODE_NON_FIP) - return; +void fnic_flush_tport_event_list(struct fnic *fnic) +{ + struct fnic_tport_event_s *cur_evt, *next; + unsigned long flags; - spin_lock_irqsave(&fnic->vlans_lock, flags); - if (list_empty(&fnic->vlans)) { - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - /* no vlans available, try again */ - if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) - if (printk_ratelimit()) - shost_printk(KERN_DEBUG, fnic->lport->host, - "Start VLAN Discovery\n"); - fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); - return; + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) { + list_del(&cur_evt->links); + kfree(cur_evt); } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} - vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fip_timer: vlan %d state %d sol_count %d\n", - vlan->vid, vlan->state, vlan->sol_count); - switch (vlan->state) { - case FIP_VLAN_USED: - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "FIP VLAN is selected for FC transaction\n"); - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - break; - case FIP_VLAN_FAILED: - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - /* if all vlans are in failed state, restart vlan disc */ - if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) - if (printk_ratelimit()) - shost_printk(KERN_DEBUG, fnic->lport->host, - "Start VLAN Discovery\n"); - fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); - break; - case FIP_VLAN_SENT: - if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { - /* - * no response on this vlan, remove from the list. - * Try the next vlan - */ - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Dequeue this VLAN ID %d from list\n", - vlan->vid); - list_del(&vlan->list); - kfree(vlan); - vlan = NULL; - if (list_empty(&fnic->vlans)) { - /* we exhausted all vlans, restart vlan disc */ - spin_unlock_irqrestore(&fnic->vlans_lock, - flags); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "fip_timer: vlan list empty, " - "trigger vlan disc\n"); - fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); - return; - } - /* check the next vlan */ - vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, - list); - fnic->set_vlan(fnic, vlan->vid); - vlan->state = FIP_VLAN_SENT; /* sent now */ - } - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); - vlan->sol_count++; - sol_time = jiffies + msecs_to_jiffies - (FCOE_CTLR_START_DELAY); - mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); - break; +void fnic_reset_work_handler(struct work_struct *work) +{ + struct fnic *cur_fnic, *next_fnic; + unsigned long reset_fnic_list_lock_flags; + int host_reset_ret_code; + + /* + * This is a single thread. It is per fnic module, not per fnic + * All the fnics that need to be reset + * have been serialized via the reset fnic list. + */ + spin_lock_irqsave(&reset_fnic_list_lock, reset_fnic_list_lock_flags); + list_for_each_entry_safe(cur_fnic, next_fnic, &reset_fnic_list, links) { + list_del(&cur_fnic->links); + spin_unlock_irqrestore(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); + + dev_err(&cur_fnic->pdev->dev, "fnic: <%d>: issuing a host reset\n", + cur_fnic->fnic_num); + host_reset_ret_code = fnic_host_reset(cur_fnic->host); + dev_err(&cur_fnic->pdev->dev, + "fnic: <%d>: returned from host reset with status: %d\n", + cur_fnic->fnic_num, host_reset_ret_code); + + spin_lock_irqsave(&cur_fnic->fnic_lock, cur_fnic->lock_flags); + cur_fnic->pc_rscn_handling_status = + PC_RSCN_HANDLING_NOT_IN_PROGRESS; + spin_unlock_irqrestore(&cur_fnic->fnic_lock, cur_fnic->lock_flags); + + spin_lock_irqsave(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); } + spin_unlock_irqrestore(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); } diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h new file mode 100644 index 000000000000..8e610b65ad57 --- /dev/null +++ b/drivers/scsi/fnic/fnic_fdls.h @@ -0,0 +1,434 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _FNIC_FDLS_H_ +#define _FNIC_FDLS_H_ + +#include "fnic_stats.h" +#include "fdls_fc.h" + +/* FDLS - Fabric discovery and login services + * -> VLAN discovery + * -> retry every retry delay seconds until it succeeds. + * <- List of VLANs + * + * -> Solicitation + * <- Solicitation response (Advertisement) + * + * -> FCF selection & FLOGI ( FLOGI timeout - 2 * E_D_TOV) + * <- FLOGI response + * + * -> FCF keep alive + * <- FCF keep alive + * + * -> PLOGI to FFFFFC (DNS) (PLOGI timeout - 2 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * <- PLOGI response + * -> Retry PLOGI to FFFFFC (DNS) - Number of retries from vnic.cfg + * + * -> SCR to FFFFFC (DNS) (SCR timeout - 2 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * <- SCR response + * -> Retry SCR - Number of retries 2 + * + * -> GPN_FT to FFFFFC (GPN_FT timeout - 2 * R_A_TOV)a + * -> Retry on BUSY until it succeeds + * -> Retry on BUSY until it succeeds + * -> 2 retries on timeout + * + * -> RFT_ID to FFFFFC (DNS) (RFT_ID timeout - 3 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * -> Retry RFT_ID to FFFFFC (DNS) (Number of retries 2 ) + * -> Ignore if both retires fail. + * + * Session establishment with targets + * For each PWWN + * -> PLOGI to FCID of that PWWN (PLOGI timeout 2 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * <- PLOGI response + * -> Retry PLOGI. Num retries using vnic.cfg + * + * -> PRLI to FCID of that PWWN (PRLI timeout 2 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * <- PRLI response + * -> Retry PRLI. Num retries using vnic.cfg + * + */ + +#define FDLS_RETRY_COUNT 2 + +/* + * OXID encoding: + * bits 0-8: oxid idx - allocated from poool + * bits 9-13: oxid frame code from fnic_oxid_frame_type_e + * bits 14-15: all zeros + */ +#define FNIC_OXID_POOL_SZ (512) /* always power of 2 */ +#define FNIC_OXID_ENCODE(idx, frame_type) (frame_type | idx) +#define FNIC_FRAME_MASK 0xFE00 +#define FNIC_FRAME_TYPE(oxid) (oxid & FNIC_FRAME_MASK) +#define FNIC_OXID_IDX(oxid) ((oxid) & (FNIC_OXID_POOL_SZ - 1)) + +#define OXID_RECLAIM_TOV(iport) (2 * iport->r_a_tov) /* in milliseconds */ + +#define FNIC_FDLS_FABRIC_ABORT_ISSUED 0x1 +#define FNIC_FDLS_FPMA_LEARNT 0x2 + +/* tport flags */ +#define FNIC_FDLS_TPORT_IN_GPN_FT_LIST 0x1 +#define FNIC_FDLS_TGT_ABORT_ISSUED 0x2 +#define FNIC_FDLS_TPORT_SEND_ADISC 0x4 +#define FNIC_FDLS_RETRY_FRAME 0x8 +#define FNIC_FDLS_TPORT_BUSY 0x10 +#define FNIC_FDLS_TPORT_TERMINATING 0x20 +#define FNIC_FDLS_TPORT_DELETED 0x40 +#define FNIC_FDLS_SCSI_REGISTERED 0x200 + +/* Retry supported by rport(returned by prli service parameters) */ +#define FDLS_FC_RP_FLAGS_RETRY 0x1 + +#define fdls_set_state(_fdls_fabric, _state) ((_fdls_fabric)->state = _state) +#define fdls_get_state(_fdls_fabric) ((_fdls_fabric)->state) + +#define FNIC_FDMI_ACTIVE 0x8 +#define FNIC_FIRST_LINK_UP 0x2 + +#define fdls_set_tport_state(_tport, _state) (_tport->state = _state) +#define fdls_get_tport_state(_tport) (_tport->state) + +#define FNIC_PORTSPEED_10GBIT 1 +#define FNIC_FRAME_HT_ROOM (2148) +#define FNIC_FCOE_FRAME_MAXSZ (2112) + + +#define FNIC_FRAME_TYPE_FABRIC_FLOGI 0x1000 +#define FNIC_FRAME_TYPE_FABRIC_PLOGI 0x1200 +#define FNIC_FRAME_TYPE_FABRIC_RPN 0x1400 +#define FNIC_FRAME_TYPE_FABRIC_RFT 0x1600 +#define FNIC_FRAME_TYPE_FABRIC_RFF 0x1800 +#define FNIC_FRAME_TYPE_FABRIC_SCR 0x1A00 +#define FNIC_FRAME_TYPE_FABRIC_GPN_FT 0x1C00 +#define FNIC_FRAME_TYPE_FABRIC_LOGO 0x1E00 +#define FNIC_FRAME_TYPE_FDMI_PLOGI 0x2000 +#define FNIC_FRAME_TYPE_FDMI_RHBA 0x2200 +#define FNIC_FRAME_TYPE_FDMI_RPA 0x2400 +#define FNIC_FRAME_TYPE_TGT_PLOGI 0x2600 +#define FNIC_FRAME_TYPE_TGT_PRLI 0x2800 +#define FNIC_FRAME_TYPE_TGT_ADISC 0x2A00 +#define FNIC_FRAME_TYPE_TGT_LOGO 0x2C00 + +struct fnic_fip_fcf_s { + uint16_t vlan_id; + uint8_t fcf_mac[6]; + uint8_t fcf_priority; + uint32_t fka_adv_period; + uint8_t ka_disabled; +}; + +enum fnic_fdls_state_e { + FDLS_STATE_INIT = 0, + FDLS_STATE_LINKDOWN, + FDLS_STATE_FABRIC_LOGO, + FDLS_STATE_FLOGO_DONE, + FDLS_STATE_FABRIC_FLOGI, + FDLS_STATE_FABRIC_PLOGI, + FDLS_STATE_RPN_ID, + FDLS_STATE_REGISTER_FC4_TYPES, + FDLS_STATE_REGISTER_FC4_FEATURES, + FDLS_STATE_SCR, + FDLS_STATE_GPN_FT, + FDLS_STATE_TGT_DISCOVERY, + FDLS_STATE_RSCN_GPN_FT, + FDLS_STATE_SEND_GPNFT +}; + +struct fnic_fdls_fabric_s { + enum fnic_fdls_state_e state; + uint32_t flags; + struct list_head tport_list; /* List of discovered tports */ + struct timer_list retry_timer; + int del_timer_inprogress; + int del_fdmi_timer_inprogress; + int retry_counter; + int timer_pending; + int fdmi_retry; + struct timer_list fdmi_timer; + int fdmi_pending; +}; + +struct fnic_fdls_fip_s { + uint32_t state; + uint32_t flogi_retry; +}; + +/* Message to tport_event_handler */ +enum fnic_tgt_msg_id { + TGT_EV_NONE = 0, + TGT_EV_RPORT_ADD, + TGT_EV_RPORT_DEL, + TGT_EV_TPORT_DELETE, + TGT_EV_REMOVE +}; + +struct fnic_tport_event_s { + struct list_head links; + enum fnic_tgt_msg_id event; + void *arg1; +}; + +enum fdls_tgt_state_e { + FDLS_TGT_STATE_INIT = 0, + FDLS_TGT_STATE_PLOGI, + FDLS_TGT_STATE_PRLI, + FDLS_TGT_STATE_READY, + FDLS_TGT_STATE_LOGO_RECEIVED, + FDLS_TGT_STATE_ADISC, + FDL_TGT_STATE_PLOGO, + FDLS_TGT_STATE_OFFLINING, + FDLS_TGT_STATE_OFFLINE +}; + +struct fnic_tport_s { + struct list_head links; /* To link the tports */ + enum fdls_tgt_state_e state; + uint32_t flags; + uint32_t fcid; + uint64_t wwpn; + uint64_t wwnn; + uint16_t active_oxid; + uint16_t tgt_flags; + atomic_t in_flight; /* io counter */ + uint16_t max_payload_size; + uint16_t r_a_tov; + uint16_t e_d_tov; + uint16_t lun0_delay; + int max_concur_seqs; + uint32_t fcp_csp; + struct timer_list retry_timer; + int del_timer_inprogress; + int retry_counter; + int timer_pending; + unsigned int num_pending_cmds; + int nexus_restart_count; + int exch_reset_in_progress; + void *iport; + struct work_struct tport_del_work; + struct completion *tport_del_done; + struct fc_rport *rport; + char str_wwpn[20]; + char str_wwnn[20]; +}; + +/* OXID pool related structures */ +struct reclaim_entry_s { + struct list_head links; + /* oxid that needs to be freed after 2*r_a_tov */ + uint16_t oxid_idx; + /* in jiffies. Use this to waiting time */ + unsigned long expires; + unsigned long *bitmap; +}; + +/* used for allocating oxids for fabric and fdmi requests */ +struct fnic_oxid_pool_s { + DECLARE_BITMAP(bitmap, FNIC_OXID_POOL_SZ); + int sz; /* size of the pool or block */ + int next_idx; /* used for cycling through the oxid pool */ + + /* retry schedule free */ + DECLARE_BITMAP(pending_schedule_free, FNIC_OXID_POOL_SZ); + struct delayed_work schedule_oxid_free_retry; + + /* List of oxids that need to be freed and reclaimed. + * This list is shared by all the oxid pools + */ + struct list_head oxid_reclaim_list; + /* Work associated with reclaim list */ + struct delayed_work oxid_reclaim_work; +}; + +/* iport */ +enum fnic_iport_state_e { + FNIC_IPORT_STATE_INIT = 0, + FNIC_IPORT_STATE_LINK_WAIT, + FNIC_IPORT_STATE_FIP, + FNIC_IPORT_STATE_FABRIC_DISC, + FNIC_IPORT_STATE_READY +}; + +struct fnic_iport_s { + enum fnic_iport_state_e state; + struct fnic *fnic; + uint64_t boot_time; + uint32_t flags; + int usefip; + uint8_t hwmac[6]; /* HW MAC Addr */ + uint8_t fpma[6]; /* Fabric Provided MA */ + uint8_t fcfmac[6]; /* MAC addr of Fabric */ + uint16_t vlan_id; + uint32_t fcid; + + /* oxid pool */ + struct fnic_oxid_pool_s oxid_pool; + + /* + * fabric reqs are serialized and only one req at a time. + * Tracking the oxid for sending abort + */ + uint16_t active_oxid_fabric_req; + /* fdmi only */ + uint16_t active_oxid_fdmi_plogi; + uint16_t active_oxid_fdmi_rhba; + uint16_t active_oxid_fdmi_rpa; + + struct fnic_fip_fcf_s selected_fcf; + struct fnic_fdls_fip_s fip; + struct fnic_fdls_fabric_s fabric; + struct list_head tport_list; + struct list_head tport_list_pending_del; + /* list of tports for which we are yet to send PLOGO */ + struct list_head inprocess_tport_list; + struct list_head deleted_tport_list; + struct work_struct tport_event_work; + uint32_t e_d_tov; /* msec */ + uint32_t r_a_tov; /* msec */ + uint32_t link_supported_speeds; + uint32_t max_flogi_retries; + uint32_t max_plogi_retries; + uint32_t plogi_timeout; + uint32_t service_params; + uint64_t wwpn; + uint64_t wwnn; + uint16_t max_payload_size; + spinlock_t deleted_tport_lst_lock; + struct completion *flogi_reg_done; + struct fnic_iport_stats iport_stats; + char str_wwpn[20]; + char str_wwnn[20]; +}; + +struct rport_dd_data_s { + struct fnic_tport_s *tport; + struct fnic_iport_s *iport; +}; + +enum fnic_recv_frame_type_e { + FNIC_FABRIC_FLOGI_RSP = 1, + FNIC_FABRIC_PLOGI_RSP, + FNIC_FABRIC_RPN_RSP, + FNIC_FABRIC_RFT_RSP, + FNIC_FABRIC_RFF_RSP, + FNIC_FABRIC_SCR_RSP, + FNIC_FABRIC_GPN_FT_RSP, + FNIC_FABRIC_BLS_ABTS_RSP, + FNIC_FDMI_PLOGI_RSP, + FNIC_FDMI_REG_HBA_RSP, + FNIC_FDMI_RPA_RSP, + FNIC_FDMI_BLS_ABTS_RSP, + FNIC_FABRIC_LOGO_RSP, + + /* responses to target requests */ + FNIC_TPORT_PLOGI_RSP, + FNIC_TPORT_PRLI_RSP, + FNIC_TPORT_ADISC_RSP, + FNIC_TPORT_BLS_ABTS_RSP, + FNIC_TPORT_LOGO_RSP, + + /* unsolicited requests */ + FNIC_BLS_ABTS_REQ, + FNIC_ELS_PLOGI_REQ, + FNIC_ELS_RSCN_REQ, + FNIC_ELS_LOGO_REQ, + FNIC_ELS_ECHO_REQ, + FNIC_ELS_ADISC, + FNIC_ELS_RLS, + FNIC_ELS_RRQ, + FNIC_ELS_UNSUPPORTED_REQ, +}; + +enum fnic_port_speeds { + DCEM_PORTSPEED_NONE = 0, + DCEM_PORTSPEED_1G = 1000, + DCEM_PORTSPEED_2G = 2000, + DCEM_PORTSPEED_4G = 4000, + DCEM_PORTSPEED_8G = 8000, + DCEM_PORTSPEED_10G = 10000, + DCEM_PORTSPEED_16G = 16000, + DCEM_PORTSPEED_20G = 20000, + DCEM_PORTSPEED_25G = 25000, + DCEM_PORTSPEED_32G = 32000, + DCEM_PORTSPEED_40G = 40000, + DCEM_PORTSPEED_4x10G = 41000, + DCEM_PORTSPEED_50G = 50000, + DCEM_PORTSPEED_64G = 64000, + DCEM_PORTSPEED_100G = 100000, + DCEM_PORTSPEED_128G = 128000, +}; + +/* Function Declarations */ +/* fdls_disc.c */ +void fnic_fdls_disc_init(struct fnic_iport_s *iport); +void fnic_fdls_disc_start(struct fnic_iport_s *iport); +void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame, + int len, int fchdr_offset); +void fnic_fdls_link_down(struct fnic_iport_s *iport); +int fdls_init_frame_pool(struct fnic_iport_s *iport); +uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport); +uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type, + uint16_t *active_oxid); +void fdls_free_oxid(struct fnic_iport_s *iport, + uint16_t oxid, uint16_t *active_oxid); +void fdls_tgt_logout(struct fnic_iport_s *iport, + struct fnic_tport_s *tport); +void fnic_del_fabric_timer_sync(struct fnic *fnic); +void fnic_del_tport_timer_sync(struct fnic *fnic, + struct fnic_tport_s *tport); +void fdls_send_fabric_logo(struct fnic_iport_s *iport); +int fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr); +void fdls_send_tport_abts(struct fnic_iport_s *iport, + struct fnic_tport_s *tport); +bool fdls_delete_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport); +void fdls_fdmi_timer_callback(struct timer_list *t); + +/* fnic_fcs.c */ +void fnic_fdls_init(struct fnic *fnic, int usefip); +void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame, + int frame_size); +void fnic_fcoe_send_vlan_req(struct fnic *fnic); +int fnic_send_fip_frame(struct fnic_iport_s *iport, + void *frame, int frame_size); +void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame, + uint8_t *fcid); +void fnic_fdls_add_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, unsigned long flags); +void fnic_fdls_remove_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, + unsigned long flags); + +/* fip.c */ +void fnic_fcoe_send_vlan_req(struct fnic *fnic); +void fnic_common_fip_cleanup(struct fnic *fnic); +int fdls_fip_recv_frame(struct fnic *fnic, void *frame); +void fnic_handle_fcs_ka_timer(struct timer_list *t); +void fnic_handle_enode_ka_timer(struct timer_list *t); +void fnic_handle_vn_ka_timer(struct timer_list *t); +void fnic_handle_fip_timer(struct timer_list *t); +extern void fdls_fabric_timer_callback(struct timer_list *t); + +/* fnic_scsi.c */ +void fnic_scsi_fcpio_reset(struct fnic *fnic); +extern void fdls_fabric_timer_callback(struct timer_list *t); +void fnic_rport_exch_reset(struct fnic *fnic, u32 fcid); +int fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id, + void *fp); +struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport, + uint32_t fcid); +struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport, + uint64_t wwpn); + +#endif /* _FNIC_FDLS_H_ */ diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h deleted file mode 100644 index 79f53029737b..000000000000 --- a/drivers/scsi/fnic/fnic_fip.h +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. - * Copyright 2007 Nuova Systems, Inc. All rights reserved. - */ - -#ifndef _FNIC_FIP_H_ -#define _FNIC_FIP_H_ - - -#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */ -#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */ -#define FCOE_CTLR_MAX_SOL 8 - -#define FINC_MAX_FLOGI_REJECTS 8 - -struct vlan { - __be16 vid; - __be16 type; -}; - -/* - * VLAN entry. - */ -struct fcoe_vlan { - struct list_head list; - u16 vid; /* vlan ID */ - u16 sol_count; /* no. of sols sent */ - u16 state; /* state */ -}; - -enum fip_vlan_state { - FIP_VLAN_AVAIL = 0, /* don't do anything */ - FIP_VLAN_SENT = 1, /* sent */ - FIP_VLAN_USED = 2, /* succeed */ - FIP_VLAN_FAILED = 3, /* failed to response */ -}; - -struct fip_vlan { - struct ethhdr eth; - struct fip_header fip; - struct { - struct fip_mac_desc mac; - struct fip_wwn_desc wwnn; - } desc; -}; - -#endif /* __FINC_FIP_H_ */ diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h index 5895ead20e14..0d974e040ab7 100644 --- a/drivers/scsi/fnic/fnic_io.h +++ b/drivers/scsi/fnic/fnic_io.h @@ -7,6 +7,7 @@ #define _FNIC_IO_H_ #include <scsi/fc/fc_fcp.h> +#include "fnic_fdls.h" #define FNIC_DFLT_SG_DESC_CNT 32 #define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */ @@ -41,6 +42,8 @@ enum fnic_ioreq_state { }; struct fnic_io_req { + struct fnic_iport_s *iport; + struct fnic_tport_s *tport; struct host_sg_desc *sgl_list; /* sgl list */ void *sgl_list_alloc; /* sgl list address used for free */ dma_addr_t sense_buf_pa; /* dma address for sense buffer*/ @@ -55,15 +58,4 @@ struct fnic_io_req { unsigned int tag; struct scsi_cmnd *sc; /* midlayer's cmd pointer */ }; - -enum fnic_port_speeds { - DCEM_PORTSPEED_NONE = 0, - DCEM_PORTSPEED_1G = 1000, - DCEM_PORTSPEED_10G = 10000, - DCEM_PORTSPEED_20G = 20000, - DCEM_PORTSPEED_25G = 25000, - DCEM_PORTSPEED_40G = 40000, - DCEM_PORTSPEED_4x10G = 41000, - DCEM_PORTSPEED_100G = 100000, -}; #endif /* _FNIC_IO_H_ */ diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c index ff85441c6cea..7ed50b11afa6 100644 --- a/drivers/scsi/fnic/fnic_isr.c +++ b/drivers/scsi/fnic/fnic_isr.c @@ -7,7 +7,7 @@ #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> -#include <scsi/libfc.h> +#include <scsi/scsi_transport_fc.h> #include <scsi/fc_frame.h> #include "vnic_dev.h" #include "vnic_intr.h" @@ -222,7 +222,7 @@ int fnic_request_intr(struct fnic *fnic) fnic->msix[i].devname, fnic->msix[i].devid); if (err) { - FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "request_irq failed with error: %d\n", err); fnic_free_intr(fnic); @@ -250,10 +250,10 @@ int fnic_set_intr_mode_msix(struct fnic *fnic) * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs * (last INTR is used for WQ/RQ errors and notification area) */ - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "rq-array size: %d wq-array size: %d copy-wq array size: %d\n", n, m, o); - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "rq_count: %d raw_wq_count: %d wq_copy_count: %d cq_count: %d\n", fnic->rq_count, fnic->raw_wq_count, fnic->wq_copy_count, fnic->cq_count); @@ -265,17 +265,17 @@ int fnic_set_intr_mode_msix(struct fnic *fnic) vec_count = pci_alloc_irq_vectors(fnic->pdev, min_irqs, vecs, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "allocated %d MSI-X vectors\n", vec_count); if (vec_count > 0) { if (vec_count < vecs) { - FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "interrupts number mismatch: vec_count: %d vecs: %d\n", vec_count, vecs); if (vec_count < min_irqs) { - FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "no interrupts for copy wq\n"); return 1; } @@ -287,7 +287,7 @@ int fnic_set_intr_mode_msix(struct fnic *fnic) fnic->wq_copy_count = vec_count - n - m - 1; fnic->wq_count = fnic->raw_wq_count + fnic->wq_copy_count; if (fnic->cq_count != vec_count - 1) { - FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "CQ count: %d does not match MSI-X vector count: %d\n", fnic->cq_count, vec_count); fnic->cq_count = vec_count - 1; @@ -295,23 +295,23 @@ int fnic_set_intr_mode_msix(struct fnic *fnic) fnic->intr_count = vec_count; fnic->err_intr_offset = fnic->rq_count + fnic->wq_count; - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "rq_count: %d raw_wq_count: %d copy_wq_base: %d\n", fnic->rq_count, fnic->raw_wq_count, fnic->copy_wq_base); - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "wq_copy_count: %d wq_count: %d cq_count: %d\n", fnic->wq_copy_count, fnic->wq_count, fnic->cq_count); - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "intr_count: %d err_intr_offset: %u", fnic->intr_count, fnic->err_intr_offset); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX); - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "fnic using MSI-X\n"); return 0; } @@ -351,7 +351,7 @@ int fnic_set_intr_mode(struct fnic *fnic) fnic->intr_count = 1; fnic->err_intr_offset = 0; - FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Using MSI Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI); @@ -377,7 +377,7 @@ int fnic_set_intr_mode(struct fnic *fnic) fnic->cq_count = 3; fnic->intr_count = 3; - FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Using Legacy Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 29eead383eb9..9a357ff42085 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -16,21 +16,20 @@ #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/if_ether.h> -#include <linux/blk-mq-pci.h> #include <scsi/fc/fc_fip.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_tcq.h> -#include <scsi/libfc.h> #include <scsi/fc_frame.h> #include "vnic_dev.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" -#include "fnic_fip.h" #include "fnic.h" +#include "fnic_fdls.h" +#include "fdls_fc.h" #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 @@ -39,12 +38,18 @@ static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES]; static struct kmem_cache *fnic_io_req_cache; +static struct kmem_cache *fdls_frame_cache; +static struct kmem_cache *fdls_frame_elem_cache; static LIST_HEAD(fnic_list); static DEFINE_SPINLOCK(fnic_list_lock); static DEFINE_IDA(fnic_ida); +struct work_struct reset_fnic_work; +LIST_HEAD(reset_fnic_list); +DEFINE_SPINLOCK(reset_fnic_list_lock); + /* Supported devices by fnic module */ -static struct pci_device_id fnic_id_table[] = { +static const struct pci_device_id fnic_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) }, { 0, } }; @@ -60,6 +65,14 @@ unsigned int fnic_log_level; module_param(fnic_log_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); +unsigned int fnic_fdmi_support = 1; +module_param(fnic_fdmi_support, int, 0644); +MODULE_PARM_DESC(fnic_fdmi_support, "FDMI support"); + +static unsigned int fnic_tgt_id_binding = 1; +module_param(fnic_tgt_id_binding, uint, 0644); +MODULE_PARM_DESC(fnic_tgt_id_binding, + "Target ID binding (0 for none. 1 for binding by WWPN (default))"); unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS; module_param(io_completions, int, S_IRUGO|S_IWUSR); @@ -79,15 +92,15 @@ static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); -static struct libfc_function_template fnic_transport_template = { - .frame_send = fnic_send, - .lport_set_port_id = fnic_set_port_id, - .fcp_abort_io = fnic_empty_scsi_cleanup, - .fcp_cleanup = fnic_empty_scsi_cleanup, - .exch_mgr_reset = fnic_exch_mgr_reset -}; +unsigned int pc_rscn_handling_feature_flag = PC_RSCN_HANDLING_FEATURE_ON; +module_param(pc_rscn_handling_feature_flag, uint, 0644); +MODULE_PARM_DESC(pc_rscn_handling_feature_flag, + "PCRSCN handling (0 for none. 1 to handle PCRSCN (default))"); -static int fnic_slave_alloc(struct scsi_device *sdev) +struct workqueue_struct *reset_fnic_work_queue; +struct workqueue_struct *fnic_fip_queue; + +static int fnic_sdev_init(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); @@ -105,8 +118,8 @@ static const struct scsi_host_template fnic_host_template = { .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = fnic_abort_cmd, .eh_device_reset_handler = fnic_device_reset, - .eh_host_reset_handler = fnic_host_reset, - .slave_alloc = fnic_slave_alloc, + .eh_host_reset_handler = fnic_eh_host_reset_handler, + .sdev_init = fnic_sdev_init, .change_queue_depth = scsi_change_queue_depth, .this_id = -1, .cmd_per_lun = 3, @@ -146,7 +159,7 @@ static struct fc_function_template fnic_fc_functions = { .get_host_speed = fnic_get_host_speed, .show_host_speed = 1, .show_host_port_type = 1, - .get_host_port_state = fc_get_host_port_state, + .get_host_port_state = fnic_get_host_port_state, .show_host_port_state = 1, .show_host_symbolic_name = 1, .show_rport_maxframe_size = 1, @@ -157,54 +170,88 @@ static struct fc_function_template fnic_fc_functions = { .show_starget_port_id = 1, .show_rport_dev_loss_tmo = 1, .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, - .issue_fc_host_lip = fnic_reset, + .issue_fc_host_lip = fnic_issue_fc_host_lip, .get_fc_host_stats = fnic_get_stats, .reset_fc_host_stats = fnic_reset_host_stats, - .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), + .dd_fcrport_size = sizeof(struct rport_dd_data_s), .terminate_rport_io = fnic_terminate_rport_io, - .bsg_request = fc_lport_bsg_request, + .bsg_request = NULL, }; static void fnic_get_host_speed(struct Scsi_Host *shost) { - struct fc_lport *lp = shost_priv(shost); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); u32 port_speed = vnic_dev_port_speed(fnic->vdev); + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + + FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "port_speed: %d Mbps", port_speed); + atomic64_set(&fnic_stats->misc_stats.port_speed_in_mbps, port_speed); /* Add in other values as they get defined in fw */ switch (port_speed) { + case DCEM_PORTSPEED_1G: + fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + break; + case DCEM_PORTSPEED_2G: + fc_host_speed(shost) = FC_PORTSPEED_2GBIT; + break; + case DCEM_PORTSPEED_4G: + fc_host_speed(shost) = FC_PORTSPEED_4GBIT; + break; + case DCEM_PORTSPEED_8G: + fc_host_speed(shost) = FC_PORTSPEED_8GBIT; + break; case DCEM_PORTSPEED_10G: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; + case DCEM_PORTSPEED_16G: + fc_host_speed(shost) = FC_PORTSPEED_16GBIT; + break; case DCEM_PORTSPEED_20G: fc_host_speed(shost) = FC_PORTSPEED_20GBIT; break; case DCEM_PORTSPEED_25G: fc_host_speed(shost) = FC_PORTSPEED_25GBIT; break; + case DCEM_PORTSPEED_32G: + fc_host_speed(shost) = FC_PORTSPEED_32GBIT; + break; case DCEM_PORTSPEED_40G: case DCEM_PORTSPEED_4x10G: fc_host_speed(shost) = FC_PORTSPEED_40GBIT; break; + case DCEM_PORTSPEED_50G: + fc_host_speed(shost) = FC_PORTSPEED_50GBIT; + break; + case DCEM_PORTSPEED_64G: + fc_host_speed(shost) = FC_PORTSPEED_64GBIT; + break; case DCEM_PORTSPEED_100G: fc_host_speed(shost) = FC_PORTSPEED_100GBIT; break; + case DCEM_PORTSPEED_128G: + fc_host_speed(shost) = FC_PORTSPEED_128GBIT; + break; default: + FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unknown FC speed: %d Mbps", port_speed); fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } +/* Placeholder function */ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) { int ret; - struct fc_lport *lp = shost_priv(host); - struct fnic *fnic = lport_priv(lp); - struct fc_host_statistics *stats = &lp->host_stats; + struct fnic *fnic = *((struct fnic **) shost_priv(host)); + struct fc_host_statistics *stats = &fnic->fnic_stats.host_stats; struct vnic_stats *vs; unsigned long flags; - if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) + if (time_before + (jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) return stats; fnic->stats_time = jiffies; @@ -213,24 +260,22 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (ret) { - FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fnic: Get vnic stats failed" - " 0x%x", ret); + FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "fnic: Get vnic stats failed: 0x%x", ret); return stats; } vs = fnic->stats; stats->tx_frames = vs->tx.tx_unicast_frames_ok; - stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; + stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; stats->rx_frames = vs->rx.rx_unicast_frames_ok; - stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; + stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; stats->invalid_crc_count = vs->rx.rx_crc_errors; stats->seconds_since_last_reset = - (jiffies - fnic->stats_reset_time) / HZ; + (jiffies - fnic->stats_reset_time) / HZ; stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); - return stats; } @@ -311,8 +356,7 @@ void fnic_dump_fchost_stats(struct Scsi_Host *host, static void fnic_reset_host_stats(struct Scsi_Host *host) { int ret; - struct fc_lport *lp = shost_priv(host); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(host)); struct fc_host_statistics *stats; unsigned long flags; @@ -325,7 +369,7 @@ static void fnic_reset_host_stats(struct Scsi_Host *host) spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (ret) { - FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "fnic: Reset vnic stats failed" " 0x%x", ret); return; @@ -344,25 +388,19 @@ void fnic_log_q_error(struct fnic *fnic) for (i = 0; i < fnic->raw_wq_count; i++) { error_status = ioread32(&fnic->wq[i].ctrl->error_status); if (error_status) - shost_printk(KERN_ERR, fnic->lport->host, - "WQ[%d] error_status" - " %d\n", i, error_status); + dev_err(&fnic->pdev->dev, "WQ[%d] error_status %d\n", i, error_status); } for (i = 0; i < fnic->rq_count; i++) { error_status = ioread32(&fnic->rq[i].ctrl->error_status); if (error_status) - shost_printk(KERN_ERR, fnic->lport->host, - "RQ[%d] error_status" - " %d\n", i, error_status); + dev_err(&fnic->pdev->dev, "RQ[%d] error_status %d\n", i, error_status); } for (i = 0; i < fnic->wq_copy_count; i++) { error_status = ioread32(&fnic->hw_copy_wq[i].ctrl->error_status); if (error_status) - shost_printk(KERN_ERR, fnic->lport->host, - "CWQ[%d] error_status" - " %d\n", i, error_status); + dev_err(&fnic->pdev->dev, "CWQ[%d] error_status %d\n", i, error_status); } } @@ -396,8 +434,7 @@ static int fnic_notify_set(struct fnic *fnic) err = vnic_dev_notify_set(fnic->vdev, fnic->wq_copy_count + fnic->copy_wq_base); break; default: - shost_printk(KERN_ERR, fnic->lport->host, - "Interrupt mode should be set up" + dev_err(&fnic->pdev->dev, "Interrupt mode should be set up" " before devcmd notify set %d\n", vnic_dev_get_intr_mode(fnic->vdev)); err = -1; @@ -416,13 +453,6 @@ static void fnic_notify_timer(struct timer_list *t) round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); } -static void fnic_fip_notify_timer(struct timer_list *t) -{ - struct fnic *fnic = from_timer(fnic, t, fip_timer); - - fnic_handle_fip_timer(fnic); -} - static void fnic_notify_timer_start(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { @@ -522,6 +552,8 @@ static int fnic_cleanup(struct fnic *fnic) vnic_intr_clean(&fnic->intr[i]); mempool_destroy(fnic->io_req_pool); + mempool_destroy(fnic->frame_pool); + mempool_destroy(fnic->frame_elem_pool); for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) mempool_destroy(fnic->io_sgl_pool[i]); @@ -534,25 +566,36 @@ static void fnic_iounmap(struct fnic *fnic) iounmap(fnic->bar0.vaddr); } -/** - * fnic_get_mac() - get assigned data MAC address for FIP code. - * @lport: local port. - */ -static u8 *fnic_get_mac(struct fc_lport *lport) +static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) +{ + vnic_dev_set_default_vlan(fnic->vdev, vlan_id); +} + +static void fnic_scsi_init(struct fnic *fnic) { - struct fnic *fnic = lport_priv(lport); + struct Scsi_Host *host = fnic->host; + + snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, + host->host_no); - return fnic->data_src_addr; + host->transportt = fnic_fc_transport; } -static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) +static void fnic_free_ioreq_tables_mq(struct fnic *fnic) { - vnic_dev_set_default_vlan(fnic->vdev, vlan_id); + int hwq; + + for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) + kfree(fnic->sw_copy_wq[hwq].io_req_table); } static int fnic_scsi_drv_init(struct fnic *fnic) { - struct Scsi_Host *host = fnic->lport->host; + struct Scsi_Host *host = fnic->host; + int err; + struct pci_dev *pdev = fnic->pdev; + struct fnic_iport_s *iport = &fnic->iport; + int hwq; /* Configure maximum outstanding IO reqs*/ if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) @@ -563,103 +606,160 @@ static int fnic_scsi_drv_init(struct fnic *fnic) fnic->fnic_max_tag_id = host->can_queue; host->max_lun = fnic->config.luns_per_tgt; host->max_id = FNIC_MAX_FCP_TARGET; - host->max_cmd_len = FCOE_MAX_CMD_LEN; + host->max_cmd_len = FNIC_FCOE_MAX_CMD_LEN; host->nr_hw_queues = fnic->wq_copy_count; - shost_printk(KERN_INFO, host, - "fnic: can_queue: %d max_lun: %llu", + dev_info(&fnic->pdev->dev, "fnic: can_queue: %d max_lun: %llu", host->can_queue, host->max_lun); - shost_printk(KERN_INFO, host, - "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + dev_info(&fnic->pdev->dev, "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", host->max_id, host->max_cmd_len, host->nr_hw_queues); + for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) { + fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id; + fnic->sw_copy_wq[hwq].io_req_table = + kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) * + sizeof(struct fnic_io_req *), GFP_KERNEL); + + if (!fnic->sw_copy_wq[hwq].io_req_table) { + fnic_free_ioreq_tables_mq(fnic); + return -ENOMEM; + } + } + + dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", + fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size); + + fnic_scsi_init(fnic); + + err = scsi_add_host(fnic->host, &pdev->dev); + if (err) { + dev_err(&fnic->pdev->dev, "fnic: scsi add host failed: aborting\n"); + return err; + } + fc_host_maxframe_size(fnic->host) = iport->max_payload_size; + fc_host_dev_loss_tmo(fnic->host) = + fnic->config.port_down_timeout / 1000; + sprintf(fc_host_symbolic_name(fnic->host), + DRV_NAME " v" DRV_VERSION " over %s", fnic->name); + fc_host_port_type(fnic->host) = FC_PORTTYPE_NPORT; + fc_host_node_name(fnic->host) = iport->wwnn; + fc_host_port_name(fnic->host) = iport->wwpn; + fc_host_supported_classes(fnic->host) = FC_COS_CLASS3; + memset(fc_host_supported_fc4s(fnic->host), 0, + sizeof(fc_host_supported_fc4s(fnic->host))); + fc_host_supported_fc4s(fnic->host)[2] = 1; + fc_host_supported_fc4s(fnic->host)[7] = 1; + fc_host_supported_speeds(fnic->host) = 0; + fc_host_supported_speeds(fnic->host) |= FC_PORTSPEED_8GBIT; + + dev_info(&fnic->pdev->dev, "shost_data: 0x%p\n", fnic->host->shost_data); + if (fnic->host->shost_data != NULL) { + if (fnic_tgt_id_binding == 0) { + dev_info(&fnic->pdev->dev, "Setting target binding to NONE\n"); + fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_NONE; + } else { + dev_info(&fnic->pdev->dev, "Setting target binding to WWPN\n"); + fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_BY_WWPN; + } + } + + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); + if (!fnic->io_req_pool) { + scsi_remove_host(fnic->host); + return -ENOMEM; + } + return 0; } void fnic_mq_map_queues_cpus(struct Scsi_Host *host) { - struct fc_lport *lp = shost_priv(host); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(host)); struct pci_dev *l_pdev = fnic->pdev; int intr_mode = fnic->config.intr_mode; struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT]; if (intr_mode == VNIC_DEV_INTR_MODE_MSI || intr_mode == VNIC_DEV_INTR_MODE_INTX) { - FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "intr_mode is not msix\n"); return; } - FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "qmap->nr_queues: %d\n", qmap->nr_queues); if (l_pdev == NULL) { - FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "l_pdev is null\n"); return; } - blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET); + blk_mq_map_hw_queues(qmap, &l_pdev->dev, FNIC_PCI_OFFSET); } static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct Scsi_Host *host; - struct fc_lport *lp; + struct Scsi_Host *host = NULL; struct fnic *fnic; mempool_t *pool; + struct fnic_iport_s *iport; int err = 0; int fnic_id = 0; int i; unsigned long flags; - int hwq; + char *desc, *subsys_desc; + int len; /* - * Allocate SCSI Host and set up association between host, - * local port, and fnic + * Allocate fnic */ - lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic)); - if (!lp) { - printk(KERN_ERR PFX "Unable to alloc libfc local port\n"); + fnic = kzalloc(sizeof(struct fnic), GFP_KERNEL); + if (!fnic) { err = -ENOMEM; - goto err_out; + goto err_out_fnic_alloc; } - host = lp->host; - fnic = lport_priv(lp); + iport = &fnic->iport; fnic_id = ida_alloc(&fnic_ida, GFP_KERNEL); if (fnic_id < 0) { - pr_err("Unable to alloc fnic ID\n"); + dev_err(&pdev->dev, "Unable to alloc fnic ID\n"); err = fnic_id; goto err_out_ida_alloc; } - fnic->lport = lp; - fnic->ctlr.lp = lp; - fnic->link_events = 0; - fnic->pdev = pdev; - snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, - host->host_no); - - host->transportt = fnic_fc_transport; + fnic->pdev = pdev; fnic->fnic_num = fnic_id; - fnic_stats_debugfs_init(fnic); + + /* Find model name from PCIe subsys ID */ + if (fnic_get_desc_by_devid(pdev, &desc, &subsys_desc) == 0) { + dev_info(&fnic->pdev->dev, "Model: %s\n", subsys_desc); + + /* Update FDMI model */ + fnic->subsys_desc_len = strlen(subsys_desc); + len = ARRAY_SIZE(fnic->subsys_desc); + if (fnic->subsys_desc_len > len) + fnic->subsys_desc_len = len; + memcpy(fnic->subsys_desc, subsys_desc, fnic->subsys_desc_len); + dev_info(&fnic->pdev->dev, "FDMI Model: %s\n", fnic->subsys_desc); + } else { + fnic->subsys_desc_len = 0; + dev_info(&fnic->pdev->dev, "Model: %s subsys_id: 0x%04x\n", "Unknown", + pdev->subsystem_device); + } err = pci_enable_device(pdev); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Cannot enable PCI device, aborting.\n"); - goto err_out_free_hba; + dev_err(&fnic->pdev->dev, "Cannot enable PCI device, aborting.\n"); + goto err_out_pci_enable_device; } err = pci_request_regions(pdev, DRV_NAME); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Cannot enable PCI resources, aborting\n"); - goto err_out_disable_device; + dev_err(&fnic->pdev->dev, "Cannot enable PCI resources, aborting\n"); + goto err_out_pci_request_regions; } pci_set_master(pdev); @@ -672,19 +772,17 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "No usable DMA configuration " + dev_err(&fnic->pdev->dev, "No usable DMA configuration " "aborting\n"); - goto err_out_release_regions; + goto err_out_set_dma_mask; } } /* Map vNIC resources from BAR0 */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - shost_printk(KERN_ERR, fnic->lport->host, - "BAR0 not memory-map'able, aborting.\n"); + dev_err(&fnic->pdev->dev, "BAR0 not memory-map'able, aborting.\n"); err = -ENODEV; - goto err_out_release_regions; + goto err_out_map_bar; } fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); @@ -692,61 +790,79 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) fnic->bar0.len = pci_resource_len(pdev, 0); if (!fnic->bar0.vaddr) { - shost_printk(KERN_ERR, fnic->lport->host, - "Cannot memory-map BAR0 res hdr, " + dev_err(&fnic->pdev->dev, "Cannot memory-map BAR0 res hdr, " "aborting.\n"); err = -ENODEV; - goto err_out_release_regions; + goto err_out_fnic_map_bar; } fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0); if (!fnic->vdev) { - shost_printk(KERN_ERR, fnic->lport->host, - "vNIC registration failed, " + dev_err(&fnic->pdev->dev, "vNIC registration failed, " "aborting.\n"); err = -ENODEV; - goto err_out_iounmap; + goto err_out_dev_register; } err = vnic_dev_cmd_init(fnic->vdev); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vnic_dev_cmd_init() returns %d, aborting\n", + dev_err(&fnic->pdev->dev, "vnic_dev_cmd_init() returns %d, aborting\n", err); - goto err_out_vnic_unregister; + goto err_out_dev_cmd_init; } err = fnic_dev_wait(fnic->vdev, vnic_dev_open, vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vNIC dev open failed, aborting.\n"); - goto err_out_dev_cmd_deinit; + dev_err(&fnic->pdev->dev, "vNIC dev open failed, aborting.\n"); + goto err_out_dev_open; } err = vnic_dev_init(fnic->vdev, 0); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vNIC dev init failed, aborting.\n"); - goto err_out_dev_close; + dev_err(&fnic->pdev->dev, "vNIC dev init failed, aborting.\n"); + goto err_out_dev_init; } - err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + err = vnic_dev_mac_addr(fnic->vdev, iport->hwmac); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vNIC get MAC addr failed \n"); - goto err_out_dev_close; + dev_err(&fnic->pdev->dev, "vNIC get MAC addr failed\n"); + goto err_out_dev_mac_addr; } /* set data_src for point-to-point mode and to keep it non-zero */ - memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); + memcpy(fnic->data_src_addr, iport->hwmac, ETH_ALEN); /* Get vNIC configuration */ err = fnic_get_vnic_config(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Get vNIC configuration failed, " + dev_err(&fnic->pdev->dev, "Get vNIC configuration failed, " "aborting.\n"); - goto err_out_dev_close; + goto err_out_fnic_get_config; + } + + switch (fnic->config.flags & 0xff0) { + case VFCF_FC_INITIATOR: + { + host = + scsi_host_alloc(&fnic_host_template, + sizeof(struct fnic *)); + if (!host) { + dev_err(&fnic->pdev->dev, "Unable to allocate scsi host\n"); + err = -ENOMEM; + goto err_out_scsi_host_alloc; + } + *((struct fnic **) shost_priv(host)) = fnic; + + fnic->host = host; + fnic->role = FNIC_ROLE_FCP_INITIATOR; + dev_info(&fnic->pdev->dev, "fnic: %d is scsi initiator\n", + fnic->fnic_num); + } + break; + default: + dev_info(&fnic->pdev->dev, "fnic: %d has no role defined\n", fnic->fnic_num); + err = -EINVAL; + goto err_out_fnic_role; } /* Setup PCI resources */ @@ -756,29 +872,18 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = fnic_set_intr_mode(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to set intr mode, " + dev_err(&fnic->pdev->dev, "Failed to set intr mode, " "aborting.\n"); - goto err_out_dev_close; + goto err_out_fnic_set_intr_mode; } err = fnic_alloc_vnic_resources(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to alloc vNIC resources, " + dev_err(&fnic->pdev->dev, "Failed to alloc vNIC resources, " "aborting.\n"); - goto err_out_clear_intr; - } - - fnic_scsi_drv_init(fnic); - - for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) { - fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id; - fnic->sw_copy_wq[hwq].io_req_table = - kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) * - sizeof(struct fnic_io_req *), GFP_KERNEL); + goto err_out_fnic_alloc_vnic_res; } - shost_printk(KERN_INFO, fnic->lport->host, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", + dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size); /* initialize all fnic locks */ @@ -794,51 +899,56 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) fnic->fw_ack_index[i] = -1; } - err = -ENOMEM; - fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); - if (!fnic->io_req_pool) - goto err_out_free_resources; - pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); - if (!pool) - goto err_out_free_ioreq_pool; + if (!pool) { + err = -ENOMEM; + goto err_out_free_resources; + } fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); - if (!pool) + if (!pool) { + err = -ENOMEM; goto err_out_free_dflt_pool; + } fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; + pool = mempool_create_slab_pool(FDLS_MIN_FRAMES, fdls_frame_cache); + if (!pool) { + err = -ENOMEM; + goto err_out_fdls_frame_pool; + } + fnic->frame_pool = pool; + + pool = mempool_create_slab_pool(FDLS_MIN_FRAME_ELEM, + fdls_frame_elem_cache); + if (!pool) { + err = -ENOMEM; + goto err_out_fdls_frame_elem_pool; + } + fnic->frame_elem_pool = pool; + /* setup vlan config, hw inserts vlan header */ fnic->vlan_hw_insert = 1; fnic->vlan_id = 0; - /* Initialize the FIP fcoe_ctrl struct */ - fnic->ctlr.send = fnic_eth_send; - fnic->ctlr.update_mac = fnic_update_mac; - fnic->ctlr.get_src_addr = fnic_get_mac; if (fnic->config.flags & VFCF_FIP_CAPABLE) { - shost_printk(KERN_INFO, fnic->lport->host, - "firmware supports FIP\n"); + dev_info(&fnic->pdev->dev, "firmware supports FIP\n"); /* enable directed and multicast */ vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); - vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); - fnic->set_vlan = fnic_set_vlan; - fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); - timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0); + vnic_dev_add_addr(fnic->vdev, iport->hwmac); spin_lock_init(&fnic->vlans_lock); INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); - INIT_WORK(&fnic->event_work, fnic_handle_event); - INIT_WORK(&fnic->flush_work, fnic_flush_tx); - skb_queue_head_init(&fnic->fip_frame_queue); - INIT_LIST_HEAD(&fnic->evlist); - INIT_LIST_HEAD(&fnic->vlans); + INIT_LIST_HEAD(&fnic->fip_frame_queue); + INIT_LIST_HEAD(&fnic->vlan_list); + timer_setup(&fnic->retry_fip_timer, fnic_handle_fip_timer, 0); + timer_setup(&fnic->fcs_ka_timer, fnic_handle_fcs_ka_timer, 0); + timer_setup(&fnic->enode_ka_timer, fnic_handle_enode_ka_timer, 0); + timer_setup(&fnic->vn_ka_timer, fnic_handle_vn_ka_timer, 0); + fnic->set_vlan = fnic_set_vlan; } else { - shost_printk(KERN_INFO, fnic->lport->host, - "firmware uses non-FIP mode\n"); - fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); - fnic->ctlr.state = FIP_ST_NON_FIP; + dev_info(&fnic->pdev->dev, "firmware uses non-FIP mode\n"); } fnic->state = FNIC_IN_FC_MODE; @@ -851,9 +961,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup notification buffer area */ err = fnic_notify_set(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to alloc notify buffer, aborting.\n"); - goto err_out_free_max_pool; + dev_err(&fnic->pdev->dev, "Failed to alloc notify buffer, aborting.\n"); + goto err_out_fnic_notify_set; } /* Setup notify timer when using MSI interrupts */ @@ -864,13 +973,62 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < fnic->rq_count; i++) { err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "fnic_alloc_rq_frame can't alloc " + dev_err(&fnic->pdev->dev, "fnic_alloc_rq_frame can't alloc " "frame\n"); - goto err_out_rq_buf; + goto err_out_alloc_rq_buf; } } + init_completion(&fnic->reset_completion_wait); + + /* Start local port initialization */ + iport->max_flogi_retries = fnic->config.flogi_retries; + iport->max_plogi_retries = fnic->config.plogi_retries; + iport->plogi_timeout = fnic->config.plogi_timeout; + iport->service_params = + (FNIC_FCP_SP_INITIATOR | FNIC_FCP_SP_RD_XRDY_DIS | + FNIC_FCP_SP_CONF_CMPL); + if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) + iport->service_params |= FNIC_FCP_SP_RETRY; + + iport->boot_time = jiffies; + iport->e_d_tov = fnic->config.ed_tov; + iport->r_a_tov = fnic->config.ra_tov; + iport->link_supported_speeds = FNIC_PORTSPEED_10GBIT; + iport->wwpn = fnic->config.port_wwn; + iport->wwnn = fnic->config.node_wwn; + + iport->max_payload_size = fnic->config.maxdatafieldsize; + + if ((iport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) || + (iport->max_payload_size > FNIC_FC_MAX_PAYLOAD_LEN) || + ((iport->max_payload_size % 4) != 0)) { + iport->max_payload_size = FNIC_FC_MAX_PAYLOAD_LEN; + } + + iport->flags |= FNIC_FIRST_LINK_UP; + + timer_setup(&(iport->fabric.retry_timer), fdls_fabric_timer_callback, + 0); + + fnic->stats_reset_time = jiffies; + + INIT_WORK(&fnic->link_work, fnic_handle_link); + INIT_WORK(&fnic->frame_work, fnic_handle_frame); + INIT_WORK(&fnic->tport_work, fnic_tport_event_handler); + INIT_WORK(&fnic->flush_work, fnic_flush_tx); + + INIT_LIST_HEAD(&fnic->frame_queue); + INIT_LIST_HEAD(&fnic->tx_queue); + INIT_LIST_HEAD(&fnic->tport_event_list); + + INIT_DELAYED_WORK(&iport->oxid_pool.schedule_oxid_free_retry, + fdls_schedule_oxid_free_retry_work); + + /* Initialize the oxid reclaim list and work struct */ + INIT_LIST_HEAD(&iport->oxid_pool.oxid_reclaim_list); + INIT_DELAYED_WORK(&iport->oxid_pool.oxid_reclaim_work, fdls_reclaim_oxid_handler); + /* Enable all queues */ for (i = 0; i < fnic->raw_wq_count; i++) vnic_wq_enable(&fnic->wq[i]); @@ -881,179 +1039,131 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < fnic->wq_copy_count; i++) vnic_wq_copy_enable(&fnic->hw_copy_wq[i]); - err = fnic_request_intr(fnic); - if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Unable to request irq.\n"); - goto err_out_request_intr; - } + vnic_dev_enable(fnic->vdev); - /* - * Initialization done with PCI system, hardware, firmware. - * Add host to SCSI - */ - err = scsi_add_host(lp->host, &pdev->dev); + err = fnic_request_intr(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "fnic: scsi_add_host failed...exiting\n"); - goto err_out_scsi_add_host; + dev_err(&fnic->pdev->dev, "Unable to request irq.\n"); + goto err_out_fnic_request_intr; } + fnic_notify_timer_start(fnic); - /* Start local port initiatialization */ - - lp->link_up = 0; - - lp->max_retry_count = fnic->config.flogi_retries; - lp->max_rport_retry_count = fnic->config.plogi_retries; - lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | - FCP_SPPF_CONF_COMPL); - if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) - lp->service_params |= FCP_SPPF_RETRY; - - lp->boot_time = jiffies; - lp->e_d_tov = fnic->config.ed_tov; - lp->r_a_tov = fnic->config.ra_tov; - lp->link_supported_speeds = FC_PORTSPEED_10GBIT; - fc_set_wwnn(lp, fnic->config.node_wwn); - fc_set_wwpn(lp, fnic->config.port_wwn); - - fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0); - - if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, - FCPIO_HOST_EXCH_RANGE_END, NULL)) { - err = -ENOMEM; - goto err_out_fc_exch_mgr_alloc; - } - - fc_lport_init_stats(lp); - fnic->stats_reset_time = jiffies; + fnic_fdls_init(fnic, (fnic->config.flags & VFCF_FIP_CAPABLE)); - fc_lport_config(lp); + err = fnic_scsi_drv_init(fnic); + if (err) + goto err_out_scsi_drv_init; - if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + - sizeof(struct fc_frame_header))) { - err = -EINVAL; - goto err_out_free_exch_mgr; + err = fnic_stats_debugfs_init(fnic); + if (err) { + dev_err(&fnic->pdev->dev, "Failed to initialize debugfs for stats\n"); + goto err_out_free_stats_debugfs; } - fc_host_maxframe_size(lp->host) = lp->mfs; - fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000; - sprintf(fc_host_symbolic_name(lp->host), - DRV_NAME " v" DRV_VERSION " over %s", fnic->name); + for (i = 0; i < fnic->intr_count; i++) + vnic_intr_unmask(&fnic->intr[i]); spin_lock_irqsave(&fnic_list_lock, flags); list_add_tail(&fnic->list, &fnic_list); spin_unlock_irqrestore(&fnic_list_lock, flags); - INIT_WORK(&fnic->link_work, fnic_handle_link); - INIT_WORK(&fnic->frame_work, fnic_handle_frame); - skb_queue_head_init(&fnic->frame_queue); - skb_queue_head_init(&fnic->tx_queue); - - fc_fabric_login(lp); - - vnic_dev_enable(fnic->vdev); - - for (i = 0; i < fnic->intr_count; i++) - vnic_intr_unmask(&fnic->intr[i]); - - fnic_notify_timer_start(fnic); - return 0; -err_out_free_exch_mgr: - fc_exch_mgr_free(lp); -err_out_fc_exch_mgr_alloc: - fc_remove_host(lp->host); - scsi_remove_host(lp->host); -err_out_scsi_add_host: +err_out_free_stats_debugfs: + fnic_stats_debugfs_remove(fnic); + fnic_free_ioreq_tables_mq(fnic); + scsi_remove_host(fnic->host); +err_out_scsi_drv_init: fnic_free_intr(fnic); -err_out_request_intr: - for (i = 0; i < fnic->rq_count; i++) +err_out_fnic_request_intr: +err_out_alloc_rq_buf: + for (i = 0; i < fnic->rq_count; i++) { + if (ioread32(&fnic->rq[i].ctrl->enable)) + vnic_rq_disable(&fnic->rq[i]); vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); -err_out_rq_buf: + } vnic_dev_notify_unset(fnic->vdev); -err_out_free_max_pool: +err_out_fnic_notify_set: + mempool_destroy(fnic->frame_elem_pool); +err_out_fdls_frame_elem_pool: + mempool_destroy(fnic->frame_pool); +err_out_fdls_frame_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); err_out_free_dflt_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); -err_out_free_ioreq_pool: - mempool_destroy(fnic->io_req_pool); err_out_free_resources: - for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) - kfree(fnic->sw_copy_wq[hwq].io_req_table); fnic_free_vnic_resources(fnic); -err_out_clear_intr: +err_out_fnic_alloc_vnic_res: fnic_clear_intr_mode(fnic); -err_out_dev_close: +err_out_fnic_set_intr_mode: + scsi_host_put(fnic->host); +err_out_fnic_role: +err_out_scsi_host_alloc: +err_out_fnic_get_config: +err_out_dev_mac_addr: +err_out_dev_init: vnic_dev_close(fnic->vdev); -err_out_dev_cmd_deinit: -err_out_vnic_unregister: +err_out_dev_open: +err_out_dev_cmd_init: vnic_dev_unregister(fnic->vdev); -err_out_iounmap: +err_out_dev_register: fnic_iounmap(fnic); -err_out_release_regions: +err_out_fnic_map_bar: +err_out_map_bar: +err_out_set_dma_mask: pci_release_regions(pdev); -err_out_disable_device: +err_out_pci_request_regions: pci_disable_device(pdev); -err_out_free_hba: - fnic_stats_debugfs_remove(fnic); +err_out_pci_enable_device: ida_free(&fnic_ida, fnic->fnic_num); err_out_ida_alloc: - scsi_host_put(lp->host); -err_out: + kfree(fnic); +err_out_fnic_alloc: return err; } static void fnic_remove(struct pci_dev *pdev) { struct fnic *fnic = pci_get_drvdata(pdev); - struct fc_lport *lp = fnic->lport; unsigned long flags; - int hwq; /* - * Mark state so that the workqueue thread stops forwarding - * received frames and link events to the local port. ISR and - * other threads that can queue work items will also stop - * creating work items on the fnic workqueue + * Sometimes when probe() fails and do not exit with an error code, + * remove() gets called with 'drvdata' not set. Avoid a crash by + * adding a defensive check. */ + if (!fnic) + return; + spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->stop_rx_link_events = 1; spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) - del_timer_sync(&fnic->notify_timer); - /* * Flush the fnic event queue. After this call, there should * be no event queued for this fnic device in the workqueue */ flush_workqueue(fnic_event_queue); - skb_queue_purge(&fnic->frame_queue); - skb_queue_purge(&fnic->tx_queue); + + fnic_scsi_unload(fnic); + + if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) + timer_delete_sync(&fnic->notify_timer); if (fnic->config.flags & VFCF_FIP_CAPABLE) { - del_timer_sync(&fnic->fip_timer); - skb_queue_purge(&fnic->fip_frame_queue); + timer_delete_sync(&fnic->retry_fip_timer); + timer_delete_sync(&fnic->fcs_ka_timer); + timer_delete_sync(&fnic->enode_ka_timer); + timer_delete_sync(&fnic->vn_ka_timer); + + fnic_free_txq(&fnic->fip_frame_queue); fnic_fcoe_reset_vlans(fnic); - fnic_fcoe_evlist_free(fnic); } - /* - * Log off the fabric. This stops all remote ports, dns port, - * logs off the fabric. This flushes all rport, disc, lport work - * before returning - */ - fc_fabric_logoff(fnic->lport); - - spin_lock_irqsave(&fnic->fnic_lock, flags); - fnic->in_remove = 1; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0)) + timer_delete_sync(&fnic->iport.fabric.fdmi_timer); - fcoe_ctlr_destroy(&fnic->ctlr); - fc_lport_destroy(lp); fnic_stats_debugfs_remove(fnic); /* @@ -1063,18 +1173,13 @@ static void fnic_remove(struct pci_dev *pdev) */ fnic_cleanup(fnic); - BUG_ON(!skb_queue_empty(&fnic->frame_queue)); - BUG_ON(!skb_queue_empty(&fnic->tx_queue)); - spin_lock_irqsave(&fnic_list_lock, flags); list_del(&fnic->list); spin_unlock_irqrestore(&fnic_list_lock, flags); - fc_remove_host(fnic->lport->host); - scsi_remove_host(fnic->lport->host); - for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) - kfree(fnic->sw_copy_wq[hwq].io_req_table); - fc_exch_mgr_free(fnic->lport); + fnic_free_txq(&fnic->frame_queue); + fnic_free_txq(&fnic->tx_queue); + vnic_dev_notify_unset(fnic->vdev); fnic_free_intr(fnic); fnic_free_vnic_resources(fnic); @@ -1084,8 +1189,11 @@ static void fnic_remove(struct pci_dev *pdev) fnic_iounmap(fnic); pci_release_regions(pdev); pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); ida_free(&fnic_ida, fnic->fnic_num); - scsi_host_put(lp->host); + fnic_scsi_unload_cleanup(fnic); + scsi_host_put(fnic->host); + kfree(fnic); } static struct pci_driver fnic_driver = { @@ -1161,20 +1269,53 @@ static int __init fnic_init_module(void) goto err_create_fnic_ioreq_slab; } - fnic_event_queue = create_singlethread_workqueue("fnic_event_wq"); + fdls_frame_cache = kmem_cache_create("fdls_frames", + FNIC_FCOE_FRAME_MAXSZ, + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!fdls_frame_cache) { + pr_err("fnic fdls frame cache create failed\n"); + err = -ENOMEM; + goto err_create_fdls_frame_cache; + } + + fdls_frame_elem_cache = kmem_cache_create("fdls_frame_elem", + sizeof(struct fnic_frame_list), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!fdls_frame_elem_cache) { + pr_err("fnic fdls frame elem cache create failed\n"); + err = -ENOMEM; + goto err_create_fdls_frame_cache_elem; + } + + fnic_event_queue = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq"); if (!fnic_event_queue) { printk(KERN_ERR PFX "fnic work queue create failed\n"); err = -ENOMEM; goto err_create_fnic_workq; } - fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q"); + fnic_fip_queue = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_fip_q"); if (!fnic_fip_queue) { printk(KERN_ERR PFX "fnic FIP work queue create failed\n"); err = -ENOMEM; goto err_create_fip_workq; } + if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) { + reset_fnic_work_queue = + create_singlethread_workqueue("reset_fnic_work_queue"); + if (!reset_fnic_work_queue) { + pr_err("reset fnic work queue create failed\n"); + err = -ENOMEM; + goto err_create_reset_fnic_workq; + } + spin_lock_init(&reset_fnic_list_lock); + INIT_LIST_HEAD(&reset_fnic_list); + INIT_WORK(&reset_fnic_work, fnic_reset_work_handler); + } + fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); if (!fnic_fc_transport) { printk(KERN_ERR PFX "fc_attach_transport error\n"); @@ -1195,8 +1336,15 @@ err_pci_register: err_fc_transport: destroy_workqueue(fnic_fip_queue); err_create_fip_workq: + if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) + destroy_workqueue(reset_fnic_work_queue); +err_create_reset_fnic_workq: destroy_workqueue(fnic_event_queue); err_create_fnic_workq: + kmem_cache_destroy(fdls_frame_elem_cache); +err_create_fdls_frame_cache_elem: + kmem_cache_destroy(fdls_frame_cache); +err_create_fdls_frame_cache: kmem_cache_destroy(fnic_io_req_cache); err_create_fnic_ioreq_slab: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); @@ -1213,11 +1361,17 @@ static void __exit fnic_cleanup_module(void) { pci_unregister_driver(&fnic_driver); destroy_workqueue(fnic_event_queue); + + if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) + destroy_workqueue(reset_fnic_work_queue); + if (fnic_fip_queue) destroy_workqueue(fnic_fip_queue); + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); kmem_cache_destroy(fnic_io_req_cache); + kmem_cache_destroy(fdls_frame_cache); fc_release_transport(fnic_fc_transport); fnic_trace_free(); fnic_fc_trace_free(); diff --git a/drivers/scsi/fnic/fnic_pci_subsys_devid.c b/drivers/scsi/fnic/fnic_pci_subsys_devid.c new file mode 100644 index 000000000000..36a2c1268422 --- /dev/null +++ b/drivers/scsi/fnic/fnic_pci_subsys_devid.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/mempool.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/kthread.h> +#include <linux/if_ether.h> +#include "fnic.h" + +static struct fnic_pcie_device fnic_pcie_device_table[] = { + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_VASONA, + "VIC 1280"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_COTATI, + "VIC 1240"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", + PCI_SUBDEVICE_ID_CISCO_LEXINGTON, "VIC 1225"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_ICEHOUSE, + "VIC 1285"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", + PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE, "VIC 1225T"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", + PCI_SUBDEVICE_ID_CISCO_SUSANVILLE, "VIC 1227"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_TORRANCE, + "VIC 1227T"}, + + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CALISTOGA, + "VIC 1340"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW, + "VIC 1380"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN, + "C3260-SIOC"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLEARLAKE, + "VIC 1385"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2, + "C3260-SIOC"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLAREMONT, + "VIC 1387"}, + + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRADBURY, + "VIC 1457"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_BRENTWOOD, "VIC 1455"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_BURLINGAME, "VIC 1487"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BAYSIDE, + "VIC 1485"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD, "VIC 1440"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_BOONVILLE, "VIC 1480"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENICIA, + "VIC 1495"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BEAUMONT, + "VIC 1497"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRISBANE, + "VIC 1467"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENTON, + "VIC 1477"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER, "VIC 14425"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK, "VIC 14825"}, + + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_BERN, + "VIC 15420"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_STOCKHOLM, "VIC 15428"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_KRAKOW, + "VIC 15411"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_LUCERNE, "VIC 15231"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_TURKU, + "VIC 15238"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_GENEVA, + "VIC 15422"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_HELSINKI, "VIC 15235"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_GOTHENBURG, "VIC 15425"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS, "VIC 15237"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_ZURICH, + "VIC 15230"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_RIGA, + "VIC 15427"}, + + {0,} +}; + +int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, + char **subsys_desc) +{ + unsigned short device = PCI_DEVICE_ID_CISCO_VIC_FC; + int max = ARRAY_SIZE(fnic_pcie_device_table); + struct fnic_pcie_device *t = fnic_pcie_device_table; + int index = 0; + + if (pdev->device != device) + return 1; + + while (t->device != 0) { + if (memcmp + ((char *) &pdev->subsystem_device, + (char *) &t->subsystem_device, sizeof(short)) == 0) + break; + t++; + index++; + } + + if (index >= max - 1) { + *desc = NULL; + *subsys_desc = NULL; + return 1; + } + + *desc = fnic_pcie_device_table[index].desc; + *subsys_desc = fnic_pcie_device_table[index].subsys_desc; + return 0; +} diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c index 33dd27f6f24e..763475587b7f 100644 --- a/drivers/scsi/fnic/fnic_res.c +++ b/drivers/scsi/fnic/fnic_res.c @@ -30,9 +30,7 @@ int fnic_get_vnic_config(struct fnic *fnic) offsetof(struct vnic_fc_config, m), \ sizeof(c->m), &c->m); \ if (err) { \ - shost_printk(KERN_ERR, fnic->lport->host, \ - "Error getting %s, %d\n", #m, \ - err); \ + dev_err(&fnic->pdev->dev, "Error getting %s, %d\n", #m, err); \ return err; \ } \ } while (0); @@ -60,6 +58,11 @@ int fnic_get_vnic_config(struct fnic *fnic) GET_CONFIG(intr_mode); GET_CONFIG(wq_copy_count); + if ((c->flags & (VFCF_FC_INITIATOR)) == 0) { + dev_info(&fnic->pdev->dev, "vNIC role not defined (def role: FC Init)\n"); + c->flags |= VFCF_FC_INITIATOR; + } + c->wq_enet_desc_count = min_t(u32, VNIC_FNIC_WQ_DESCS_MAX, max_t(u32, VNIC_FNIC_WQ_DESCS_MIN, @@ -139,40 +142,28 @@ int fnic_get_vnic_config(struct fnic *fnic) c->wq_copy_count = min_t(u16, FNIC_WQ_COPY_MAX, c->wq_copy_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC MAC addr %pM " - "wq/wq_copy/rq %d/%d/%d\n", - fnic->ctlr.ctl_src_addr, + dev_info(&fnic->pdev->dev, "fNIC MAC addr %p wq/wq_copy/rq %d/%d/%d\n", + fnic->data_src_addr, c->wq_enet_desc_count, c->wq_copy_desc_count, c->rq_desc_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC node wwn %llx port wwn %llx\n", + dev_info(&fnic->pdev->dev, "fNIC node wwn 0x%llx port wwn 0x%llx\n", c->node_wwn, c->port_wwn); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC ed_tov %d ra_tov %d\n", + dev_info(&fnic->pdev->dev, "fNIC ed_tov %d ra_tov %d\n", c->ed_tov, c->ra_tov); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC mtu %d intr timer %d\n", + dev_info(&fnic->pdev->dev, "fNIC mtu %d intr timer %d\n", c->maxdatafieldsize, c->intr_timer); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC flags 0x%x luns per tgt %d\n", + dev_info(&fnic->pdev->dev, "fNIC flags 0x%x luns per tgt %d\n", c->flags, c->luns_per_tgt); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC flogi_retries %d flogi timeout %d\n", + dev_info(&fnic->pdev->dev, "fNIC flogi_retries %d flogi timeout %d\n", c->flogi_retries, c->flogi_timeout); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC plogi retries %d plogi timeout %d\n", + dev_info(&fnic->pdev->dev, "fNIC plogi retries %d plogi timeout %d\n", c->plogi_retries, c->plogi_timeout); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC io throttle count %d link dn timeout %d\n", + dev_info(&fnic->pdev->dev, "fNIC io throttle count %d link dn timeout %d\n", c->io_throttle_count, c->link_down_timeout); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC port dn io retries %d port dn timeout %d\n", + dev_info(&fnic->pdev->dev, "fNIC port dn io retries %d port dn timeout %d\n", c->port_down_io_retries, c->port_down_timeout); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC wq_copy_count: %d\n", c->wq_copy_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC intr mode: %d\n", c->intr_mode); + dev_info(&fnic->pdev->dev, "fNIC wq_copy_count: %d\n", c->wq_copy_count); + dev_info(&fnic->pdev->dev, "fNIC intr mode: %d\n", c->intr_mode); return 0; } @@ -206,18 +197,12 @@ void fnic_get_res_counts(struct fnic *fnic) fnic->intr_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_INTR_CTRL); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC fw resources wq_count: %d\n", fnic->wq_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC fw resources rq_count: %d\n", fnic->rq_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC fw resources cq_count: %d\n", fnic->cq_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC fw resources intr_count: %d\n", fnic->intr_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources wq_count: %d\n", fnic->wq_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources rq_count: %d\n", fnic->rq_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources cq_count: %d\n", fnic->cq_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources intr_count: %d\n", fnic->intr_count); } void fnic_free_vnic_resources(struct fnic *fnic) @@ -253,19 +238,17 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) intr_mode = vnic_dev_get_intr_mode(fnic->vdev); - shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n", + dev_info(&fnic->pdev->dev, "vNIC interrupt mode: %s\n", intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : "unknown"); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC resources avail: wq %d cp_wq %d raw_wq %d rq %d", + dev_info(&fnic->pdev->dev, "res avail: wq %d cp_wq %d raw_wq %d rq %d", fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count, fnic->rq_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC resources avail: cq %d intr %d cpy-wq desc count %d\n", + dev_info(&fnic->pdev->dev, "res avail: cq %d intr %d cpy-wq desc count %d\n", fnic->cq_count, fnic->intr_count, fnic->config.wq_copy_desc_count); @@ -340,8 +323,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) RES_TYPE_INTR_PBA_LEGACY, 0); if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to hook legacy pba resource\n"); + dev_err(&fnic->pdev->dev, "Failed to hook legacy pba resource\n"); err = -ENODEV; goto err_out_cleanup; } @@ -444,8 +426,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) /* init the stats memory by making the first call here */ err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vnic_dev_stats_dump failed - x%x\n", err); + dev_err(&fnic->pdev->dev, "vnic_dev_stats_dump failed - x%x\n", err); goto err_out_cleanup; } diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 2ba61dba4569..7133b254cbe4 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -23,11 +23,13 @@ #include <scsi/scsi_tcq.h> #include <scsi/fc/fc_els.h> #include <scsi/fc/fc_fcoe.h> -#include <scsi/libfc.h> #include <scsi/fc_frame.h> +#include <scsi/scsi_transport_fc.h> #include "fnic_io.h" #include "fnic.h" +static void fnic_cleanup_io(struct fnic *fnic, int exclude_id); + const char *fnic_state_str[] = { [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE", [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE", @@ -65,6 +67,18 @@ static const char *fcpio_status_str[] = { [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND", }; +enum terminate_io_return { + TERM_SUCCESS = 0, + TERM_NO_SC = 1, + TERM_IO_REQ_NOT_FOUND, + TERM_ANOTHER_PORT, + TERM_GSTATE, + TERM_IO_BLOCKED, + TERM_OUT_OF_WQ_DESC, + TERM_TIMED_OUT, + TERM_MISC, +}; + const char *fnic_state_to_str(unsigned int state) { if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state]) @@ -90,8 +104,6 @@ static const char *fnic_fcpio_status_to_str(unsigned int status) return fcpio_status_str[status]; } -static void fnic_cleanup_io(struct fnic *fnic); - /* * Unmap the data buffer and sense buffer for an io_req, * also unmap and free the device-private scatter/gather list. @@ -114,6 +126,65 @@ static void fnic_release_ioreq_buf(struct fnic *fnic, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); } +static bool +fnic_count_portid_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc, + void *data1, void *data2) +{ + u32 *portid = data1; + unsigned int *count = data2; + struct fnic_io_req *io_req = fnic_priv(sc)->io_req; + + if (!io_req || (*portid && (io_req->port_id != *portid))) + return true; + + *count += 1; + return true; +} + +unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid) +{ + unsigned int count = 0; + + fnic_scsi_io_iter(fnic, fnic_count_portid_ioreqs_iter, + &portid, &count); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "portid = 0x%x count = %u\n", portid, count); + return count; +} + +unsigned int fnic_count_all_ioreqs(struct fnic *fnic) +{ + return fnic_count_ioreqs(fnic, 0); +} + +static bool +fnic_count_lun_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc, + void *data1, void *data2) +{ + struct scsi_device *scsi_device = data1; + unsigned int *count = data2; + + if (sc->device != scsi_device || !fnic_priv(sc)->io_req) + return true; + + *count += 1; + return true; +} + +unsigned int +fnic_count_lun_ioreqs(struct fnic *fnic, struct scsi_device *scsi_device) +{ + unsigned int count = 0; + + fnic_scsi_io_iter(fnic, fnic_count_lun_ioreqs_iter, + scsi_device, &count); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "lun = %p count = %u\n", scsi_device, count); + return count; +} + /* Free up Copy Wq descriptors. Called with copy_wq lock held */ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq) { @@ -179,12 +250,11 @@ int fnic_fw_reset_handler(struct fnic *fnic) struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; int ret = 0; unsigned long flags; + unsigned int ioreq_count; /* indicate fwreset to io path */ fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET); - - skb_queue_purge(&fnic->frame_queue); - skb_queue_purge(&fnic->tx_queue); + ioreq_count = fnic_count_all_ioreqs(fnic); /* wait for io cmpl */ while (atomic_read(&fnic->in_flight)) @@ -198,6 +268,8 @@ int fnic_fw_reset_handler(struct fnic *fnic) if (!vnic_wq_copy_desc_avail(wq)) ret = -EAGAIN; else { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ioreq_count: %u\n", ioreq_count); fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > @@ -211,11 +283,11 @@ int fnic_fw_reset_handler(struct fnic *fnic) if (!ret) { atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets); - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "Issued fw reset\n"); } else { fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "Failed to issue fw reset\n"); } @@ -231,10 +303,10 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) { struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; enum fcpio_flogi_reg_format_type format; - struct fc_lport *lp = fnic->lport; u8 gw_mac[ETH_ALEN]; int ret = 0; unsigned long flags; + struct fnic_iport_s *iport = &fnic->iport; spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); @@ -246,28 +318,23 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) goto flogi_reg_ioreq_end; } - if (fnic->ctlr.map_dest) { - eth_broadcast_addr(gw_mac); - format = FCPIO_FLOGI_REG_DEF_DEST; - } else { - memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); - format = FCPIO_FLOGI_REG_GW_DEST; - } + memcpy(gw_mac, fnic->iport.fcfmac, ETH_ALEN); + format = FCPIO_FLOGI_REG_GW_DEST; - if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { + if (fnic->config.flags & VFCF_FIP_CAPABLE) { fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, fc_id, gw_mac, - fnic->data_src_addr, - lp->r_a_tov, lp->e_d_tov); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "FLOGI FIP reg issued fcid %x src %pM dest %pM\n", - fc_id, fnic->data_src_addr, gw_mac); + fnic->iport.fpma, + iport->r_a_tov, iport->e_d_tov); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI FIP reg issued fcid: 0x%x src %p dest %p\n", + fc_id, fnic->iport.fpma, gw_mac); } else { fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, format, fc_id, gw_mac); - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "FLOGI reg issued fcid 0x%x map %d dest 0x%p\n", - fc_id, fnic->ctlr.map_dest, gw_mac); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI reg issued fcid 0x%x dest %p\n", + fc_id, gw_mac); } atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); @@ -295,13 +362,17 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, { struct scatterlist *sg; struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); - struct fc_rport_libfc_priv *rp = rport->dd_data; struct host_sg_desc *desc; struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; unsigned int i; int flags; u8 exch_flags; struct scsi_lun fc_lun; + struct fnic_tport_s *tport; + struct rport_dd_data_s *rdd_data; + + rdd_data = rport->dd_data; + tport = rdd_data->tport; if (sg_count) { /* For each SGE, create a device desc entry */ @@ -342,7 +413,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, free_wq_copy_descs(fnic, wq, hwq); if (unlikely(!vnic_wq_copy_desc_avail(wq))) { - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "fnic_queue_wq_copy_desc failure - no descriptors\n"); atomic64_inc(&misc_stats->io_cpwq_alloc_failures); return SCSI_MLQUEUE_HOST_BUSY; @@ -356,7 +427,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, exch_flags = 0; if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && - (rp->flags & FC_RP_FLAGS_RETRY)) + (tport->tgt_flags & FDLS_FC_RP_FLAGS_RETRY)) exch_flags |= FCPIO_ICMND_SRFLAG_RETRY; fnic_queue_wq_copy_desc_icmnd_16(wq, mqtag, @@ -371,8 +442,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, sc->cmnd, sc->cmd_len, scsi_bufflen(sc), fc_lun.scsi_lun, io_req->port_id, - rport->maxframe_size, rp->r_a_tov, - rp->e_d_tov); + tport->max_payload_size, + tport->r_a_tov, tport->e_d_tov); atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > @@ -388,10 +459,10 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) struct request *const rq = scsi_cmd_to_rq(sc); uint32_t mqtag = 0; void (*done)(struct scsi_cmnd *) = scsi_done; - struct fc_lport *lp = shost_priv(sc->device->host); struct fc_rport *rport; struct fnic_io_req *io_req = NULL; - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(sc->device->host)); + struct fnic_iport_s *iport = NULL; struct fnic_stats *fnic_stats = &fnic->fnic_stats; struct vnic_wq_copy *wq; int ret = 1; @@ -400,32 +471,14 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) unsigned long flags = 0; unsigned long ptr; int io_lock_acquired = 0; - struct fc_rport_libfc_priv *rp; uint16_t hwq = 0; - - mqtag = blk_mq_unique_tag(rq); - spin_lock_irqsave(&fnic->fnic_lock, flags); - - if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, - "fnic IO blocked flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", - fnic->state_flags); - return SCSI_MLQUEUE_HOST_BUSY; - } - - if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, - "fnic flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", - fnic->state_flags); - return SCSI_MLQUEUE_HOST_BUSY; - } + struct fnic_tport_s *tport = NULL; + struct rport_dd_data_s *rdd_data; + uint16_t lun0_delay = 0; rport = starget_to_rport(scsi_target(sc->device)); if (!rport) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "returning DID_NO_CONNECT for IO as rport is NULL\n"); sc->result = DID_NO_CONNECT << 16; done(sc); @@ -434,50 +487,96 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) ret = fc_remote_port_chkready(rport); if (ret) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "rport is not ready\n"); - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); sc->result = ret; done(sc); return 0; } - rp = rport->dd_data; - if (!rp || rp->rp_state == RPORT_ST_DELETE) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "rport 0x%x removed, returning DID_NO_CONNECT\n", - rport->port_id); + mqtag = blk_mq_unique_tag(rq); + spin_lock_irqsave(&fnic->fnic_lock, flags); + iport = &fnic->iport; - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); - sc->result = DID_NO_CONNECT<<16; + if (iport->state != FNIC_IPORT_STATE_READY) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "returning DID_NO_CONNECT for IO as iport state: %d\n", + iport->state); + sc->result = DID_NO_CONNECT << 16; done(sc); return 0; } - if (rp->rp_state != RPORT_ST_READY) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n", - rport->port_id, rp->rp_state); + /* fc_remote_port_add() may have added the tport to + * fc_transport but dd_data not yet set + */ + rdd_data = rport->dd_data; + tport = rdd_data->tport; + if (!tport || (rdd_data->iport != iport)) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "dd_data not yet set in SCSI for rport portid: 0x%x\n", + rport->port_id); + tport = fnic_find_tport_by_fcid(iport, rport->port_id); + if (!tport) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "returning DID_BUS_BUSY for IO as tport not found for: 0x%x\n", + rport->port_id); + sc->result = DID_BUS_BUSY << 16; + done(sc); + return 0; + } + + /* Re-assign same params as in fnic_fdls_add_tport */ + rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN; + rport->supported_classes = + FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET; + /* the dd_data is allocated by fctransport of size dd_fcrport_size */ + rdd_data = rport->dd_data; + rdd_data->tport = tport; + rdd_data->iport = iport; + tport->rport = rport; + tport->flags |= FNIC_FDLS_SCSI_REGISTERED; + } - sc->result = DID_IMM_RETRY << 16; + if ((tport->state != FDLS_TGT_STATE_READY) + && (tport->state != FDLS_TGT_STATE_ADISC)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "returning DID_NO_CONNECT for IO as tport state: %d\n", + tport->state); + sc->result = DID_NO_CONNECT << 16; done(sc); return 0; } - if (lp->state != LPORT_ST_READY || !(lp->link_up)) { + atomic_inc(&fnic->in_flight); + atomic_inc(&tport->in_flight); + + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { + atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, - "state not ready: %d/link not up: %d Returning HOST_BUSY\n", - lp->state, lp->link_up); return SCSI_MLQUEUE_HOST_BUSY; } - atomic_inc(&fnic->in_flight); + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "fnic flags FW reset: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", + fnic->state_flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + if (!tport->lun0_delay) { + lun0_delay = 1; + tport->lun0_delay++; + } spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED; fnic_priv(sc)->flags = FNIC_NO_FLAGS; @@ -499,6 +598,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) goto out; } + io_req->tport = tport; /* Determine the type of scatter/gather list we need */ io_req->sgl_cnt = sg_count; io_req->sgl_type = FNIC_SGL_CACHE_DFLT; @@ -575,6 +675,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) mempool_free(io_req, fnic->io_req_pool); } atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); return ret; } else { atomic64_inc(&fnic_stats->io_stats.active_ios); @@ -602,6 +703,14 @@ out: spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); + + if (lun0_delay) { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "LUN0 delay\n"); + mdelay(LUN0_DELAY_TIME); + } + return ret; } @@ -625,7 +734,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, atomic64_inc(&reset_stats->fw_reset_completions); /* Clean up all outstanding io requests */ - fnic_cleanup_io(fnic); + fnic_cleanup_io(fnic, SCSI_NO_TAG); atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); @@ -637,44 +746,37 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { /* Check status of reset completion */ if (!hdr_status) { - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "reset cmpl success\n"); /* Ready to send flogi out */ fnic->state = FNIC_IN_ETH_MODE; } else { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "reset failed with header status: %s\n", fnic_fcpio_status_to_str(hdr_status)); - /* - * Unable to change to eth mode, cannot send out flogi - * Change state to fc mode, so that subsequent Flogi - * requests from libFC will cause more attempts to - * reset the firmware. Free the cached flogi - */ fnic->state = FNIC_IN_FC_MODE; atomic64_inc(&reset_stats->fw_reset_failures); ret = -1; } } else { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "Unexpected state while processing reset completion: %s\n", fnic_state_to_str(fnic->state)); atomic64_inc(&reset_stats->fw_reset_failures); ret = -1; } - /* Thread removing device blocks till firmware reset is complete */ - if (fnic->remove_wait) - complete(fnic->remove_wait); + if (fnic->fw_reset_done) + complete(fnic->fw_reset_done); /* * If fnic is being removed, or fw reset failed * free the flogi frame. Else, send it out */ - if (fnic->remove_wait || ret) { + if (ret) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); - skb_queue_purge(&fnic->tx_queue); + fnic_free_txq(&fnic->tx_queue); goto reset_cmpl_handler_end; } @@ -710,19 +812,19 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, /* Check flogi registration completion status */ if (!hdr_status) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "flog reg succeeded\n"); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "FLOGI reg succeeded\n"); fnic->state = FNIC_IN_FC_MODE; } else { FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, fnic->fnic_num, - "fnic flogi reg :failed %s\n", + fnic->host, fnic->fnic_num, + "fnic flogi reg failed: %s\n", fnic_fcpio_status_to_str(hdr_status)); fnic->state = FNIC_IN_ETH_MODE; ret = -1; } } else { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Unexpected fnic state %s while" " processing flogi reg completion\n", fnic_state_to_str(fnic->state)); @@ -795,7 +897,7 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic, spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags); FNIC_TRACE(fnic_fcpio_ack_handler, - fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], + fnic->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], ox_id_tag[4], ox_id_tag[5]); } @@ -833,36 +935,36 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind hwq = blk_mq_unique_tag_to_hwq(mqtag); if (hwq != cq_index) { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", hwq, mqtag, tag, cq_index); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hdr status: %s icmnd completion on the wrong queue\n", fnic_fcpio_status_to_str(hdr_status)); } if (tag >= fnic->fnic_max_tag_id) { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", hwq, mqtag, tag, cq_index); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hdr status: %s Out of range tag\n", fnic_fcpio_status_to_str(hdr_status)); return; } spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - sc = scsi_host_find_tag(fnic->lport->host, id); + sc = scsi_host_find_tag(fnic->host, id); WARN_ON_ONCE(!sc); if (!sc) { atomic64_inc(&fnic_stats->io_stats.sc_null); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "icmnd_cmpl sc is null - " "hdr status = %s tag = 0x%x desc = 0x%p\n", fnic_fcpio_status_to_str(hdr_status), id, desc); FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler, - fnic->lport->host->host_no, id, + fnic->host->host_no, id, ((u64)icmnd_cmpl->_resvd0[1] << 16 | (u64)icmnd_cmpl->_resvd0[0]), ((u64)hdr_status << 16 | @@ -885,7 +987,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind atomic64_inc(&fnic_stats->io_stats.ioreq_null); fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL; spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "icmnd_cmpl io_req is null - " "hdr status = %s tag = 0x%x sc 0x%p\n", fnic_fcpio_status_to_str(hdr_status), id, sc); @@ -912,7 +1014,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind if(FCPIO_ABORTED == hdr_status) fnic_priv(sc)->flags |= FNIC_IO_ABORTED; - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "icmnd_cmpl abts pending " "hdr status = %s tag = 0x%x sc = 0x%p " "scsi_status = %x residual = %d\n", @@ -943,6 +1045,9 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) atomic64_inc(&fnic_stats->misc_stats.queue_fulls); + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "xfer_len: %llu", xfer_len); break; case FCPIO_TIMEOUT: /* request was timed out */ @@ -1004,7 +1109,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind if (hdr_status != FCPIO_SUCCESS) { atomic64_inc(&fnic_stats->io_stats.io_failures); - shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", + shost_printk(KERN_ERR, fnic->host, "hdr status = %s\n", fnic_fcpio_status_to_str(hdr_status)); } @@ -1024,13 +1129,13 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind desc, cmd_trace, fnic_flags_and_state(sc)); if (sc->sc_data_direction == DMA_FROM_DEVICE) { - fnic->lport->host_stats.fcp_input_requests++; + fnic_stats->host_stats.fcp_input_requests++; fnic->fcp_input_bytes += xfer_len; } else if (sc->sc_data_direction == DMA_TO_DEVICE) { - fnic->lport->host_stats.fcp_output_requests++; + fnic_stats->host_stats.fcp_output_requests++; fnic->fcp_output_bytes += xfer_len; } else - fnic->lport->host_stats.fcp_control_requests++; + fnic_stats->host_stats.fcp_control_requests++; /* Call SCSI completion function to complete the IO */ scsi_done(sc); @@ -1097,27 +1202,27 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK); if (hwq != cq_index) { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", hwq, mqtag, tag, cq_index); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hdr status: %s ITMF completion on the wrong queue\n", fnic_fcpio_status_to_str(hdr_status)); } if (tag > fnic->fnic_max_tag_id) { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", hwq, mqtag, tag, cq_index); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hdr status: %s Tag out of range\n", fnic_fcpio_status_to_str(hdr_status)); return; } else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", hwq, mqtag, tag, cq_index); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hdr status: %s Tag out of range\n", fnic_fcpio_status_to_str(hdr_status)); return; @@ -1133,14 +1238,14 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde if (io_req) sc = io_req->sc; } else { - sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); + sc = scsi_host_find_tag(fnic->host, id & FNIC_TAG_MASK); } WARN_ON_ONCE(!sc); if (!sc) { atomic64_inc(&fnic_stats->io_stats.sc_null); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", fnic_fcpio_status_to_str(hdr_status), tag); return; @@ -1152,7 +1257,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde atomic64_inc(&fnic_stats->io_stats.ioreq_null); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "itmf_cmpl io_req is null - " "hdr status = %s tag = 0x%x sc 0x%p\n", fnic_fcpio_status_to_str(hdr_status), tag, sc); @@ -1163,7 +1268,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) { /* Abort and terminate completion of device reset req */ /* REVISIT : Add asserts about various flags */ - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n", hwq, mqtag, tag, fnic_fcpio_status_to_str(hdr_status)); @@ -1175,7 +1280,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); } else if (id & FNIC_TAG_ABORT) { /* Completion of abort cmd */ - shost_printk(KERN_DEBUG, fnic->lport->host, + shost_printk(KERN_DEBUG, fnic->host, "hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n", hwq, mqtag, tag, fnic_fcpio_status_to_str(hdr_status)); @@ -1190,7 +1295,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde &term_stats->terminate_fw_timeouts); break; case FCPIO_ITMF_REJECTED: - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "abort reject recd. id %d\n", (int)(id & FNIC_TAG_MASK)); break; @@ -1225,7 +1330,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "abts cmpl recd. id %d status %s\n", (int)(id & FNIC_TAG_MASK), fnic_fcpio_status_to_str(hdr_status)); @@ -1238,11 +1343,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde if (io_req->abts_done) { complete(io_req->abts_done); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - shost_printk(KERN_INFO, fnic->lport->host, + shost_printk(KERN_INFO, fnic->host, "hwq: %d mqtag: 0x%x tag: 0x%x Waking up abort thread\n", hwq, mqtag, tag); } else { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n", hwq, mqtag, tag, fnic_fcpio_status_to_str(hdr_status)); @@ -1273,7 +1378,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde } } else if (id & FNIC_TAG_DEV_RST) { /* Completion of device reset */ - shost_printk(KERN_INFO, fnic->lport->host, + shost_printk(KERN_INFO, fnic->host, "hwq: %d mqtag: 0x%x tag: 0x%x DR hst: %s\n", hwq, mqtag, tag, fnic_fcpio_status_to_str(hdr_status)); @@ -1285,7 +1390,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde sc->device->host->host_no, id, sc, jiffies_to_msecs(jiffies - start_time), desc, 0, fnic_flags_and_state(sc)); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Terminate pending\n", hwq, mqtag, tag, fnic_fcpio_status_to_str(hdr_status)); @@ -1298,7 +1403,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde sc->device->host->host_no, id, sc, jiffies_to_msecs(jiffies - start_time), desc, 0, fnic_flags_and_state(sc)); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "dev reset cmpl recd after time out. " "id %d status %s\n", (int)(id & FNIC_TAG_MASK), @@ -1307,7 +1412,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde } fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s DR completion received\n", hwq, mqtag, tag, fnic_fcpio_status_to_str(hdr_status)); @@ -1316,7 +1421,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); } else { - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n", __func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); @@ -1371,7 +1476,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, break; default: - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "firmware completion type %d\n", desc->hdr.type); break; @@ -1414,8 +1519,8 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) struct request *const rq = scsi_cmd_to_rq(sc); struct fnic *fnic = data; struct fnic_io_req *io_req; - unsigned long flags = 0; unsigned long start_time = 0; + unsigned long flags; struct fnic_stats *fnic_stats = &fnic->fnic_stats; uint16_t hwq = 0; int tag; @@ -1432,14 +1537,14 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) io_req = fnic_priv(sc)->io_req; if (!io_req) { spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n", hwq, mqtag, tag, fnic_priv(sc)->flags); return true; } if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && - !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { + !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { /* * We will be here only when FW completes reset * without sending completions for outstanding ios. @@ -1449,6 +1554,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) complete(io_req->dr_done); else if (io_req && io_req->abts_done) complete(io_req->abts_done); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { @@ -1458,19 +1564,19 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) fnic_priv(sc)->io_req = NULL; io_req->sc = NULL; + start_time = io_req->start_time; spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); /* * If there is a scsi_cmnd associated with this io_req, then * free the corresponding state */ - start_time = io_req->start_time; fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); sc->result = DID_TRANSPORT_DISRUPTED << 16; - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, - "mqtag:0x%x tag: 0x%x sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "mqtag: 0x%x tag: 0x%x sc: 0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", mqtag, tag, sc, (jiffies - start_time)); if (atomic64_read(&fnic->io_cmpl_skip)) @@ -1479,23 +1585,60 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) atomic64_inc(&fnic_stats->io_stats.io_completions); FNIC_TRACE(fnic_cleanup_io, - sc->device->host->host_no, tag, sc, - jiffies_to_msecs(jiffies - start_time), - 0, ((u64)sc->cmnd[0] << 32 | - (u64)sc->cmnd[2] << 24 | - (u64)sc->cmnd[3] << 16 | - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), - fnic_flags_and_state(sc)); - + sc->device->host->host_no, tag, sc, + jiffies_to_msecs(jiffies - start_time), + 0, ((u64) sc->cmnd[0] << 32 | + (u64) sc->cmnd[2] << 24 | + (u64) sc->cmnd[3] << 16 | + (u64) sc->cmnd[4] << 8 | sc->cmnd[5]), + (((u64) fnic_priv(sc)->flags << 32) | fnic_priv(sc)-> + state)); + + /* Complete the command to SCSI */ scsi_done(sc); - return true; } -static void fnic_cleanup_io(struct fnic *fnic) +static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) { - scsi_host_busy_iter(fnic->lport->host, - fnic_cleanup_io_iter, fnic); + unsigned int io_count = 0; + unsigned long flags; + struct fnic_io_req *io_req = NULL; + struct scsi_cmnd *sc = NULL; + + io_count = fnic_count_all_ioreqs(fnic); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Outstanding ioreq count: %d active io count: %lld Waiting\n", + io_count, + atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); + + scsi_host_busy_iter(fnic->host, + fnic_cleanup_io_iter, fnic); + + /* with sg3utils device reset, SC needs to be retrieved from ioreq */ + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + io_req = fnic->sw_copy_wq[0].io_req_table[fnic->fnic_max_tag_id]; + if (io_req) { + sc = io_req->sc; + if (sc) { + if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) + && !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { + fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; + if (io_req && io_req->dr_done) + complete(io_req->dr_done); + } + } + } + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + + while ((io_count = fnic_count_all_ioreqs(fnic))) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Outstanding ioreq count: %d active io count: %lld Waiting\n", + io_count, + atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); + + schedule_timeout(msecs_to_jiffies(100)); + } } void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, @@ -1516,7 +1659,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, if (id >= fnic->fnic_max_tag_id) return; - sc = scsi_host_find_tag(fnic->lport->host, id); + sc = scsi_host_find_tag(fnic->host, id); if (!sc) return; @@ -1545,7 +1688,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, wq_copy_cleanup_scsi_cmd: sc->result = DID_NO_CONNECT << 16; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "wq_copy_cleanup_handler:" + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "wq_copy_cleanup_handler:" " DID_NO_CONNECT\n"); FNIC_TRACE(fnic_wq_copy_cleanup_handler, @@ -1567,10 +1710,13 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq]; struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; unsigned long flags; + struct fnic_tport_s *tport = io_req->tport; spin_lock_irqsave(&fnic->fnic_lock, flags); if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { + atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); spin_unlock_irqrestore(&fnic->fnic_lock, flags); return 1; } else @@ -1585,7 +1731,8 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, if (!vnic_wq_copy_desc_avail(wq)) { spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); atomic_dec(&fnic->in_flight); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + atomic_dec(&tport->in_flight); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "fnic_queue_abort_io_req: failure: no descriptors\n"); atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); return 1; @@ -1619,20 +1766,24 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) struct fnic *fnic = iter_data->fnic; int abt_tag = 0; struct fnic_io_req *io_req; - unsigned long flags; struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; struct scsi_lun fc_lun; enum fnic_ioreq_state old_ioreq_state; uint16_t hwq = 0; + unsigned long flags; abt_tag = blk_mq_unique_tag(rq); hwq = blk_mq_unique_tag_to_hwq(abt_tag); - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + if (!sc) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "sc is NULL abt_tag: 0x%x hwq: %d\n", abt_tag, hwq); + return true; + } + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); io_req = fnic_priv(sc)->io_req; - if (!io_req || io_req->port_id != iter_data->port_id) { spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; @@ -1640,7 +1791,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n", hwq, abt_tag, fnic_priv(sc)->flags); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); @@ -1655,37 +1806,40 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } + if (io_req->abts_done) { - shost_printk(KERN_ERR, fnic->lport->host, - "fnic_rport_exch_reset: io_req->abts_done is set " - "state is %s\n", + shost_printk(KERN_ERR, fnic->host, + "fnic_rport_exch_reset: io_req->abts_done is set state is %s\n", fnic_ioreq_state_to_str(fnic_priv(sc)->state)); } if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) { - shost_printk(KERN_ERR, fnic->lport->host, - "rport_exch_reset " - "IO not yet issued %p tag 0x%x flags " - "%x state %d\n", - sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state); + shost_printk(KERN_ERR, fnic->host, + "rport_exch_reset IO not yet issued %p abt_tag 0x%x", + sc, abt_tag); + shost_printk(KERN_ERR, fnic->host, + "flags %x state %d\n", fnic_priv(sc)->flags, + fnic_priv(sc)->state); } old_ioreq_state = fnic_priv(sc)->state; fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { atomic64_inc(&reset_stats->device_reset_terminates); abt_tag |= FNIC_TAG_DEV_RST; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "dev reset sc 0x%p\n", sc); } - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fnic_rport_exch_reset dev rst sc 0x%p\n", sc); - BUG_ON(io_req->abts_done); - - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "fnic_rport_exch_reset: dev rst sc 0x%p\n", sc); + WARN_ON_ONCE(io_req->abts_done); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "fnic_rport_reset_exch: Issuing abts\n"); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - /* Now queue the abort command to firmware */ + /* Queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); if (fnic_queue_abort_io_req(fnic, abt_tag, @@ -1698,7 +1852,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) * lun reset */ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n", hwq, abt_tag, fnic_priv(sc)->flags); if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) @@ -1714,11 +1868,14 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) atomic64_inc(&term_stats->terminates); iter_data->term_cnt++; } + return true; } -static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) +void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) { + unsigned int io_count = 0; + unsigned long flags; struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; struct fnic_rport_abort_io_iter_data iter_data = { .fnic = fnic, @@ -1726,53 +1883,115 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) .term_cnt = 0, }; - FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, fnic->fnic_num, - "fnic_rport_exch_reset called portid 0x%06x\n", - port_id); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "fnic rport exchange reset for tport: 0x%06x\n", + port_id); if (fnic->in_remove) return; - scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter, + io_count = fnic_count_ioreqs(fnic, port_id); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Starting terminates: rport:0x%x portid-io-count: %d active-io-count: %lld\n", + port_id, io_count, + atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + /* Bump in_flight counter to hold off fnic_fw_reset_handler. */ + atomic_inc(&fnic->in_flight); + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { + atomic_dec(&fnic->in_flight); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + scsi_host_busy_iter(fnic->host, fnic_rport_abort_io_iter, &iter_data); + if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates)) atomic64_set(&term_stats->max_terminates, iter_data.term_cnt); + atomic_dec(&fnic->in_flight); + + while ((io_count = fnic_count_ioreqs(fnic, port_id))) + schedule_timeout(msecs_to_jiffies(1000)); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "rport: 0x%x remaining portid-io-count: %d ", + port_id, io_count); } void fnic_terminate_rport_io(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rdata; - struct fc_lport *lport; - struct fnic *fnic; + struct fnic_tport_s *tport; + struct rport_dd_data_s *rdd_data; + struct fnic_iport_s *iport = NULL; + struct fnic *fnic = NULL; if (!rport) { - printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); + pr_err("rport is NULL\n"); return; } - rdata = rport->dd_data; - if (!rdata) { - printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n"); - return; + rdd_data = rport->dd_data; + if (rdd_data) { + tport = rdd_data->tport; + if (!tport) { + pr_err( + "term rport io called after tport is deleted. Returning 0x%8x\n", + rport->port_id); + } else { + pr_err( + "term rport io called after tport is set 0x%8x\n", + rport->port_id); + pr_err( + "tport maybe rediscovered\n"); + + iport = (struct fnic_iport_s *) tport->iport; + fnic = iport->fnic; + fnic_rport_exch_reset(fnic, rport->port_id); + } } - lport = rdata->local_port; +} - if (!lport) { - printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n"); - return; - } - fnic = lport_priv(lport); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", - rport->port_name, rport->node_name, rport, - rport->port_id); +/* + * FCP-SCSI specific handling for module unload + * + */ +void fnic_scsi_unload(struct fnic *fnic) +{ + unsigned long flags; - if (fnic->in_remove) - return; + /* + * Mark state so that the workqueue thread stops forwarding + * received frames and link events to the local port. ISR and + * other threads that can queue work items will also stop + * creating work items on the fnic workqueue + */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT) + fnic_scsi_fcpio_reset(fnic); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->in_remove = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_flush_tport_event_list(fnic); + fnic_delete_fcp_tports(fnic); +} - fnic_rport_exch_reset(fnic, rport->port_id); +void fnic_scsi_unload_cleanup(struct fnic *fnic) +{ + int hwq = 0; + + fc_remove_host(fnic->host); + scsi_remove_host(fnic->host); + for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) + kfree(fnic->sw_copy_wq[hwq].io_req_table); } /* @@ -1783,10 +2002,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport) int fnic_abort_cmd(struct scsi_cmnd *sc) { struct request *const rq = scsi_cmd_to_rq(sc); - struct fc_lport *lp; + struct fnic_iport_s *iport; + struct fnic_tport_s *tport; struct fnic *fnic; struct fnic_io_req *io_req = NULL; struct fc_rport *rport; + struct rport_dd_data_s *rdd_data; unsigned long flags; unsigned long start_time = 0; int ret = SUCCESS; @@ -1806,11 +2027,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) fc_block_scsi_eh(sc); /* Get local-port, check ready and link up */ - lp = shost_priv(sc->device->host); - - fnic = lport_priv(lp); + fnic = *((struct fnic **) shost_priv(sc->device->host)); spin_lock_irqsave(&fnic->fnic_lock, flags); + iport = &fnic->iport; + fnic_stats = &fnic->fnic_stats; abts_stats = &fnic->fnic_stats.abts_stats; term_stats = &fnic->fnic_stats.term_stats; @@ -1821,7 +2042,44 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) fnic_priv(sc)->flags = FNIC_NO_FLAGS; - if (lp->state != LPORT_ST_READY || !(lp->link_up)) { + rdd_data = rport->dd_data; + tport = rdd_data->tport; + + if (!tport) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Abort cmd called after tport delete! rport fcid: 0x%x", + rport->port_id); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "lun: %llu hwq: 0x%x mqtag: 0x%x Op: 0x%x flags: 0x%x\n", + sc->device->lun, hwq, mqtag, + sc->cmnd[0], fnic_priv(sc)->flags); + ret = FAILED; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_abort_cmd_end; + } + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Abort cmd called rport fcid: 0x%x lun: %llu hwq: 0x%x mqtag: 0x%x", + rport->port_id, sc->device->lun, hwq, mqtag); + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Op: 0x%x flags: 0x%x\n", + sc->cmnd[0], + fnic_priv(sc)->flags); + + if (iport->state != FNIC_IPORT_STATE_READY) { + atomic64_inc(&fnic_stats->misc_stats.iport_not_ready); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport NOT in READY state"); + ret = FAILED; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_abort_cmd_end; + } + + if ((tport->state != FDLS_TGT_STATE_READY) && + (tport->state != FDLS_TGT_STATE_ADISC)) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport state: %d\n", tport->state); ret = FAILED; spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto fnic_abort_cmd_end; @@ -1843,6 +2101,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); io_req = fnic_priv(sc)->io_req; if (!io_req) { + ret = FAILED; spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); goto fnic_abort_cmd_end; } @@ -1870,7 +2129,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) else atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "CDB Opcode: 0x%02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time); /* @@ -1893,7 +2152,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) if (fc_remote_port_chkready(rport) == 0) task_req = FCPIO_ITMF_ABT_TASK; else { - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); task_req = FCPIO_ITMF_ABT_TASK_TERM; } @@ -1961,7 +2220,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "Issuing host reset due to out of order IO\n"); ret = FAILED; @@ -2009,7 +2268,7 @@ fnic_abort_cmd_end: (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), fnic_flags_and_state(sc)); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Returning from abort cmd type %x %s\n", task_req, (ret == SUCCESS) ? "SUCCESS" : "FAILED"); @@ -2027,6 +2286,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, unsigned long flags; uint16_t hwq = 0; uint32_t tag = 0; + struct fnic_tport_s *tport = io_req->tport; tag = io_req->tag; hwq = blk_mq_unique_tag_to_hwq(tag); @@ -2037,8 +2297,10 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, FNIC_FLAGS_IO_BLOCKED))) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); return FAILED; - } else + } else { atomic_inc(&fnic->in_flight); + atomic_inc(&tport->in_flight); + } spin_unlock_irqrestore(&fnic->fnic_lock, flags); spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); @@ -2047,7 +2309,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, free_wq_copy_descs(fnic, wq, hwq); if (!vnic_wq_copy_desc_avail(wq)) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "queue_dr_io_req failure - no descriptors\n"); atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); ret = -EAGAIN; @@ -2072,6 +2334,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, lr_io_req_end: spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); return ret; } @@ -2114,7 +2377,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) * Found IO that is still pending with firmware and * belongs to the LUN that we are resetting */ - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Found IO in %s on lun\n", fnic_ioreq_state_to_str(fnic_priv(sc)->state)); @@ -2124,14 +2387,14 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) } if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) { - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "dev rst not pending sc 0x%p\n", sc); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } if (io_req->abts_done) - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "%s: io_req->abts_done is set state is %s\n", __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); old_ioreq_state = fnic_priv(sc)->state; @@ -2147,7 +2410,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) BUG_ON(io_req->abts_done); if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "dev rst sc 0x%p\n", sc); } @@ -2169,7 +2432,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) fnic_priv(sc)->state = old_ioreq_state; spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); iter_data->ret = FAILED; - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d abt_tag: 0x%lx Abort could not be queued\n", hwq, abt_tag); return false; @@ -2248,7 +2511,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, iter_data.lr_sc = lr_sc; - scsi_host_busy_iter(fnic->lport->host, + scsi_host_busy_iter(fnic->host, fnic_pending_aborts_iter, &iter_data); if (iter_data.ret == FAILED) { ret = iter_data.ret; @@ -2261,7 +2524,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, ret = 1; clean_pending_aborts_end: - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "exit status: %d\n", ret); return ret; } @@ -2274,11 +2537,11 @@ clean_pending_aborts_end: int fnic_device_reset(struct scsi_cmnd *sc) { struct request *rq = scsi_cmd_to_rq(sc); - struct fc_lport *lp; struct fnic *fnic; struct fnic_io_req *io_req = NULL; struct fc_rport *rport; int status; + int count = 0; int ret = FAILED; unsigned long flags; unsigned long start_time = 0; @@ -2289,31 +2552,63 @@ int fnic_device_reset(struct scsi_cmnd *sc) DECLARE_COMPLETION_ONSTACK(tm_done); bool new_sc = 0; uint16_t hwq = 0; + struct fnic_iport_s *iport = NULL; + struct rport_dd_data_s *rdd_data; + struct fnic_tport_s *tport; + u32 old_soft_reset_count; + u32 old_link_down_cnt; + int exit_dr = 0; /* Wait for rport to unblock */ fc_block_scsi_eh(sc); /* Get local-port, check ready and link up */ - lp = shost_priv(sc->device->host); + fnic = *((struct fnic **) shost_priv(sc->device->host)); + iport = &fnic->iport; - fnic = lport_priv(lp); fnic_stats = &fnic->fnic_stats; - reset_stats = &fnic->fnic_stats.reset_stats; + reset_stats = &fnic_stats->reset_stats; atomic64_inc(&reset_stats->device_resets); rport = starget_to_rport(scsi_target(sc->device)); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fcid: 0x%x lun: 0x%llx hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n", + + spin_lock_irqsave(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "fcid: 0x%x lun: %llu hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n", rport->port_id, sc->device->lun, hwq, mqtag, fnic_priv(sc)->flags); - if (lp->state != LPORT_ST_READY || !(lp->link_up)) + rdd_data = rport->dd_data; + tport = rdd_data->tport; + if (!tport) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Dev rst called after tport delete! rport fcid: 0x%x lun: %llu\n", + rport->port_id, sc->device->lun); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_device_reset_end; + } + + if (iport->state != FNIC_IPORT_STATE_READY) { + atomic64_inc(&fnic_stats->misc_stats.iport_not_ready); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport NOT in READY state"); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_device_reset_end; + } + + if ((tport->state != FDLS_TGT_STATE_READY) && + (tport->state != FDLS_TGT_STATE_ADISC)) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport state: %d\n", tport->state); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto fnic_device_reset_end; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); /* Check if remote port up */ if (fc_remote_port_chkready(rport)) { - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); goto fnic_device_reset_end; } @@ -2352,6 +2647,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) io_req->port_id = rport->port_id; io_req->tag = mqtag; fnic_priv(sc)->io_req = io_req; + io_req->tport = tport; io_req->sc = sc; if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) @@ -2366,7 +2662,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE; spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "TAG %x\n", mqtag); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "TAG %x\n", mqtag); /* * issue the device reset, if enqueue failed, clean up the ioreq @@ -2383,6 +2679,11 @@ int fnic_device_reset(struct scsi_cmnd *sc) fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED; spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + spin_lock_irqsave(&fnic->fnic_lock, flags); + old_link_down_cnt = iport->fnic->link_down_cnt; + old_soft_reset_count = fnic->soft_reset_count; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + /* * Wait on the local completion for LUN reset. The io_req may be * freed while we wait since we hold no lock. @@ -2390,14 +2691,39 @@ int fnic_device_reset(struct scsi_cmnd *sc) wait_for_completion_timeout(&tm_done, msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); + /* + * Wake up can be due to the following reasons: + * 1) The device reset completed from target. + * 2) Device reset timed out. + * 3) A link-down/host_reset may have happened in between. + * 4) The device reset was aborted and io_req->dr_done was called. + */ + + exit_dr = 0; + spin_lock_irqsave(&fnic->fnic_lock, flags); + if ((old_link_down_cnt != fnic->link_down_cnt) || + (fnic->reset_in_progress) || + (fnic->soft_reset_count != old_soft_reset_count) || + (iport->state != FNIC_IPORT_STATE_READY)) + exit_dr = 1; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); io_req = fnic_priv(sc)->io_req; if (!io_req) { spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc); goto fnic_device_reset_end; } + + if (exit_dr) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Host reset called for fnic. Exit device reset\n"); + io_req->dr_done = NULL; + goto fnic_device_reset_clean; + } io_req->dr_done = NULL; status = fnic_priv(sc)->lr_status; @@ -2408,53 +2734,11 @@ int fnic_device_reset(struct scsi_cmnd *sc) */ if (status == FCPIO_INVALID_CODE) { atomic64_inc(&reset_stats->device_reset_timeouts); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Device reset timed out\n"); fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT; - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); int_to_scsilun(sc->device->lun, &fc_lun); - /* - * Issue abort and terminate on device reset request. - * If q'ing of terminate fails, retry it after a delay. - */ - while (1) { - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) { - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - break; - } - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - if (fnic_queue_abort_io_req(fnic, - mqtag | FNIC_TAG_DEV_RST, - FCPIO_ITMF_ABT_TASK_TERM, - fc_lun.scsi_lun, io_req, hwq)) { - wait_for_completion_timeout(&tm_done, - msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT)); - } else { - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; - fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; - io_req->abts_done = &tm_done; - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "Abort and terminate issued on Device reset mqtag 0x%x sc 0x%p\n", - mqtag, sc); - break; - } - } - while (1) { - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - wait_for_completion_timeout(&tm_done, - msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); - break; - } else { - io_req = fnic_priv(sc)->io_req; - io_req->abts_done = NULL; - goto fnic_device_reset_clean; - } - } + goto fnic_device_reset_clean; } else { spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); } @@ -2463,7 +2747,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) if (status != FCPIO_SUCCESS) { spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, fnic->fnic_num, + fnic->host, fnic->fnic_num, "Device reset completed - failed\n"); io_req = fnic_priv(sc)->io_req; goto fnic_device_reset_clean; @@ -2479,9 +2763,8 @@ int fnic_device_reset(struct scsi_cmnd *sc) if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); io_req = fnic_priv(sc)->io_req; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "Device reset failed" - " since could not abort all IOs\n"); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Device reset failed: Cannot abort all IOs\n"); goto fnic_device_reset_clean; } @@ -2507,6 +2790,15 @@ fnic_device_reset_clean: mempool_free(io_req, fnic->io_req_pool); } + /* + * If link-event is seen while LUN reset is issued we need + * to complete the LUN reset here + */ + if (!new_sc) { + sc->result = DID_RESET << 16; + scsi_done(sc); + } + fnic_device_reset_end: FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc, jiffies_to_msecs(jiffies - start_time), @@ -2520,7 +2812,18 @@ fnic_device_reset_end: mutex_unlock(&fnic->sgreset_mutex); } - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + while ((ret == SUCCESS) && fnic_count_lun_ioreqs(fnic, sc->device)) { + if (count >= 2) { + ret = FAILED; + break; + } + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Cannot clean up all IOs for the LUN\n"); + schedule_timeout(msecs_to_jiffies(1000)); + count++; + } + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Returning from device reset %s\n", (ret == SUCCESS) ? "SUCCESS" : "FAILED"); @@ -2531,67 +2834,78 @@ fnic_device_reset_end: return ret; } -/* Clean up all IOs, clean up libFC local port */ -int fnic_reset(struct Scsi_Host *shost) +static void fnic_post_flogo_linkflap(struct fnic *fnic) +{ + unsigned long flags; + + fnic_fdls_link_status_change(fnic, 0); + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->link_status) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fdls_link_status_change(fnic, 1); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +/* Logout from all the targets and simulate link flap */ +void fnic_reset(struct Scsi_Host *shost) { - struct fc_lport *lp; struct fnic *fnic; - int ret = 0; struct reset_stats *reset_stats; - lp = shost_priv(shost); - fnic = lport_priv(lp); + fnic = *((struct fnic **) shost_priv(shost)); reset_stats = &fnic->fnic_stats.reset_stats; - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Issuing fnic reset\n"); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Issuing fnic reset\n"); atomic64_inc(&reset_stats->fnic_resets); + fnic_post_flogo_linkflap(fnic); - /* - * Reset local port, this will clean up libFC exchanges, - * reset remote port sessions, and if link is up, begin flogi - */ - ret = fc_lport_reset(lp); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Returning from fnic reset"); - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Returning from fnic reset with: %s\n", - (ret == 0) ? "SUCCESS" : "FAILED"); + atomic64_inc(&reset_stats->fnic_reset_completions); +} - if (ret == 0) - atomic64_inc(&reset_stats->fnic_reset_completions); - else - atomic64_inc(&reset_stats->fnic_reset_failures); +int fnic_issue_fc_host_lip(struct Scsi_Host *shost) +{ + int ret = 0; + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FC host lip issued"); + ret = fnic_host_reset(shost); return ret; } -/* - * SCSI Error handling calls driver's eh_host_reset if all prior - * error handling levels return FAILED. If host reset completes - * successfully, and if link is up, then Fabric login begins. - * - * Host Reset is the highest level of error recovery. If this fails, then - * host is offlined by SCSI. - * - */ -int fnic_host_reset(struct scsi_cmnd *sc) +int fnic_host_reset(struct Scsi_Host *shost) { - int ret; + int ret = SUCCESS; unsigned long wait_host_tmo; - struct Scsi_Host *shost = sc->device->host; - struct fc_lport *lp = shost_priv(shost); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); unsigned long flags; + struct fnic_iport_s *iport = &fnic->iport; spin_lock_irqsave(&fnic->fnic_lock, flags); - if (!fnic->internal_reset_inprogress) { - fnic->internal_reset_inprogress = true; + if (fnic->reset_in_progress == NOT_IN_PROGRESS) { + fnic->reset_in_progress = IN_PROGRESS; } else { spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "host reset in progress skipping another host reset\n"); - return SUCCESS; + wait_for_completion_timeout(&fnic->reset_completion_wait, + msecs_to_jiffies(10000)); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->reset_in_progress == IN_PROGRESS) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_WARNING, fnic->host, fnic->fnic_num, + "Firmware reset in progress. Skipping another host reset\n"); + return SUCCESS; + } + fnic->reset_in_progress = IN_PROGRESS; } spin_unlock_irqrestore(&fnic->fnic_lock, flags); @@ -2600,140 +2914,34 @@ int fnic_host_reset(struct scsi_cmnd *sc) * scsi-ml tries to send a TUR to every device if host reset is * successful, so before returning to scsi, fabric should be up */ - ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED; - if (ret == SUCCESS) { + fnic_reset(shost); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->reset_in_progress = NOT_IN_PROGRESS; + complete(&fnic->reset_completion_wait); + fnic->soft_reset_count++; + + /* wait till the link is up */ + if (fnic->link_status) { wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; ret = FAILED; while (time_before(jiffies, wait_host_tmo)) { - if ((lp->state == LPORT_ST_READY) && - (lp->link_up)) { + if (iport->state != FNIC_IPORT_STATE_READY + && fnic->link_status) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + ssleep(1); + spin_lock_irqsave(&fnic->fnic_lock, flags); + } else { ret = SUCCESS; break; } - ssleep(1); } } - - spin_lock_irqsave(&fnic->fnic_lock, flags); - fnic->internal_reset_inprogress = false; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return ret; -} - -/* - * This fxn is called from libFC when host is removed - */ -void fnic_scsi_abort_io(struct fc_lport *lp) -{ - int err = 0; - unsigned long flags; - enum fnic_state old_state; - struct fnic *fnic = lport_priv(lp); - DECLARE_COMPLETION_ONSTACK(remove_wait); - - /* Issue firmware reset for fnic, wait for reset to complete */ -retry_fw_reset: - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) && - fnic->link_events) { - /* fw reset is in progress, poll for its completion */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - schedule_timeout(msecs_to_jiffies(100)); - goto retry_fw_reset; - } - - fnic->remove_wait = &remove_wait; - old_state = fnic->state; - fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; - fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); spin_unlock_irqrestore(&fnic->fnic_lock, flags); - err = fnic_fw_reset_handler(fnic); - if (err) { - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) - fnic->state = old_state; - fnic->remove_wait = NULL; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - - /* Wait for firmware reset to complete */ - wait_for_completion_timeout(&remove_wait, - msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT)); - - spin_lock_irqsave(&fnic->fnic_lock, flags); - fnic->remove_wait = NULL; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fnic_scsi_abort_io %s\n", - (fnic->state == FNIC_IN_ETH_MODE) ? - "SUCCESS" : "FAILED"); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - -} - -/* - * This fxn called from libFC to clean up driver IO state on link down - */ -void fnic_scsi_cleanup(struct fc_lport *lp) -{ - unsigned long flags; - enum fnic_state old_state; - struct fnic *fnic = lport_priv(lp); - - /* issue fw reset */ -retry_fw_reset: - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { - /* fw reset is in progress, poll for its completion */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - schedule_timeout(msecs_to_jiffies(100)); - goto retry_fw_reset; - } - old_state = fnic->state; - fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; - fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - - if (fnic_fw_reset_handler(fnic)) { - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) - fnic->state = old_state; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - } - -} - -void fnic_empty_scsi_cleanup(struct fc_lport *lp) -{ -} - -void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) -{ - struct fnic *fnic = lport_priv(lp); - - /* Non-zero sid, nothing to do */ - if (sid) - goto call_fc_exch_mgr_reset; - - if (did) { - fnic_rport_exch_reset(fnic, did); - goto call_fc_exch_mgr_reset; - } - - /* - * sid = 0, did = 0 - * link down or device being removed - */ - if (!fnic->in_remove) - fnic_scsi_cleanup(lp); - else - fnic_scsi_abort_io(lp); - - /* call libFC exch mgr reset to reset its exchanges */ -call_fc_exch_mgr_reset: - fc_exch_mgr_reset(lp, sid, did); - + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "host reset return status: %d\n", ret); + return ret; } static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data) @@ -2771,7 +2979,7 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data) * Found IO that is still pending with firmware and * belongs to the LUN that we are resetting */ - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "hwq: %d tag: 0x%x Found IO in state: %s on lun\n", hwq, tag, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); @@ -2804,8 +3012,81 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) } /* walk again to check, if IOs are still pending in fw */ - scsi_host_busy_iter(fnic->lport->host, + scsi_host_busy_iter(fnic->host, fnic_abts_pending_iter, &iter_data); return iter_data.ret; } + +/* + * SCSI Error handling calls driver's eh_host_reset if all prior + * error handling levels return FAILED. If host reset completes + * successfully, and if link is up, then Fabric login begins. + * + * Host Reset is the highest level of error recovery. If this fails, then + * host is offlined by SCSI. + * + */ +int fnic_eh_host_reset_handler(struct scsi_cmnd *sc) +{ + int ret = 0; + struct Scsi_Host *shost = sc->device->host; + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "SCSI error handling: fnic host reset"); + + ret = fnic_host_reset(shost); + return ret; +} + + +void fnic_scsi_fcpio_reset(struct fnic *fnic) +{ + unsigned long flags; + enum fnic_state old_state; + struct fnic_iport_s *iport = &fnic->iport; + DECLARE_COMPLETION_ONSTACK(fw_reset_done); + int time_remain; + + /* issue fw reset */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { + /* fw reset is in progress, poll for its completion */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic is in unexpected state: %d for fw_reset\n", + fnic->state); + return; + } + + old_state = fnic->state; + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + + fnic_update_mac_locked(fnic, iport->hwmac); + fnic->fw_reset_done = &fw_reset_done; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Issuing fw reset\n"); + if (fnic_fw_reset_handler(fnic)) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) + fnic->state = old_state; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + } else { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Waiting for fw completion\n"); + time_remain = wait_for_completion_timeout(&fw_reset_done, + msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT)); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Woken up after fw completion timeout\n"); + if (time_remain == 0) { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FW reset completion timed out after %d ms)\n", + FNIC_FW_RESET_TIMEOUT); + } + atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts); + } + fnic->fw_reset_done = NULL; +} diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h index 9d7f98c452dd..8ddd20401a59 100644 --- a/drivers/scsi/fnic/fnic_stats.h +++ b/drivers/scsi/fnic/fnic_stats.h @@ -3,6 +3,7 @@ #ifndef _FNIC_STATS_H_ #define _FNIC_STATS_H_ #define FNIC_MQ_MAX_QUEUES 64 +#include <scsi/scsi_transport_fc.h> struct stats_timestamps { struct timespec64 last_reset_time; @@ -63,6 +64,7 @@ struct reset_stats { atomic64_t fw_resets; atomic64_t fw_reset_completions; atomic64_t fw_reset_failures; + atomic64_t fw_reset_timeouts; atomic64_t fnic_resets; atomic64_t fnic_reset_completions; atomic64_t fnic_reset_failures; @@ -102,10 +104,51 @@ struct misc_stats { atomic64_t no_icmnd_itmf_cmpls; atomic64_t check_condition; atomic64_t queue_fulls; - atomic64_t rport_not_ready; + atomic64_t tport_not_ready; + atomic64_t iport_not_ready; atomic64_t frame_errors; atomic64_t current_port_speed; atomic64_t intx_dummy; + atomic64_t port_speed_in_mbps; +}; + +struct fnic_iport_stats { + atomic64_t num_linkdn; + atomic64_t num_linkup; + atomic64_t link_failure_count; + atomic64_t num_rscns; + atomic64_t rscn_redisc; + atomic64_t rscn_not_redisc; + atomic64_t frame_err; + atomic64_t num_rnid; + atomic64_t fabric_flogi_sent; + atomic64_t fabric_flogi_ls_accepts; + atomic64_t fabric_flogi_ls_rejects; + atomic64_t fabric_flogi_misc_rejects; + atomic64_t fabric_plogi_sent; + atomic64_t fabric_plogi_ls_accepts; + atomic64_t fabric_plogi_ls_rejects; + atomic64_t fabric_plogi_misc_rejects; + atomic64_t fabric_scr_sent; + atomic64_t fabric_scr_ls_accepts; + atomic64_t fabric_scr_ls_rejects; + atomic64_t fabric_scr_misc_rejects; + atomic64_t fabric_logo_sent; + atomic64_t tport_alive; + atomic64_t tport_plogi_sent; + atomic64_t tport_plogi_ls_accepts; + atomic64_t tport_plogi_ls_rejects; + atomic64_t tport_plogi_misc_rejects; + atomic64_t tport_prli_sent; + atomic64_t tport_prli_ls_accepts; + atomic64_t tport_prli_ls_rejects; + atomic64_t tport_prli_misc_rejects; + atomic64_t tport_adisc_sent; + atomic64_t tport_adisc_ls_accepts; + atomic64_t tport_adisc_ls_rejects; + atomic64_t tport_logo_sent; + atomic64_t unsupported_frames_ls_rejects; + atomic64_t unsupported_frames_dropped; }; struct fnic_stats { @@ -116,6 +159,7 @@ struct fnic_stats { struct reset_stats reset_stats; struct fw_stats fw_stats; struct vlan_stats vlan_stats; + struct fc_host_statistics host_stats; struct misc_stats misc_stats; }; @@ -127,6 +171,5 @@ struct stats_debug_info { }; int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *); -void fnic_stats_debugfs_init(struct fnic *); -void fnic_stats_debugfs_remove(struct fnic *); +const char *fnic_role_to_str(unsigned int role); #endif /* _FNIC_STATS_H_ */ diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index aaa4ea02fb7c..cdc6b12b1ec2 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -8,6 +8,7 @@ #include <linux/kallsyms.h> #include <linux/time.h> #include <linux/vmalloc.h> +#include <scsi/scsi_transport_fc.h> #include "fnic_io.h" #include "fnic.h" @@ -29,6 +30,17 @@ int fnic_fc_tracing_enabled = 1; int fnic_fc_trace_cleared = 1; static DEFINE_SPINLOCK(fnic_fc_trace_lock); +static const char * const fnic_role_str[] = { + [FNIC_ROLE_FCP_INITIATOR] = "FCP_Initiator", +}; + +const char *fnic_role_to_str(unsigned int role) +{ + if (role >= ARRAY_SIZE(fnic_role_str) || !fnic_role_str[role]) + return "Unknown"; + + return fnic_role_str[role]; +} /* * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information @@ -423,7 +435,8 @@ int fnic_get_stats_data(struct stats_debug_info *debug, "Number of Check Conditions encountered: %lld\n" "Number of QUEUE Fulls: %lld\n" "Number of rport not ready: %lld\n" - "Number of receive frame errors: %lld\n", + "Number of receive frame errors: %lld\n" + "Port speed (in Mbps): %lld\n", (u64)stats->misc_stats.last_isr_time, (s64)val1.tv_sec, val1.tv_nsec, (u64)stats->misc_stats.last_ack_time, @@ -446,18 +459,68 @@ int fnic_get_stats_data(struct stats_debug_info *debug, (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), (u64)atomic64_read(&stats->misc_stats.check_condition), (u64)atomic64_read(&stats->misc_stats.queue_fulls), - (u64)atomic64_read(&stats->misc_stats.rport_not_ready), - (u64)atomic64_read(&stats->misc_stats.frame_errors)); - - len += scnprintf(debug->debug_buffer + len, buf_size - len, - "Firmware reported port speed: %llu\n", - (u64)atomic64_read( - &stats->misc_stats.current_port_speed)); + (u64)atomic64_read(&stats->misc_stats.tport_not_ready), + (u64)atomic64_read(&stats->misc_stats.frame_errors), + (u64)atomic64_read(&stats->misc_stats.port_speed_in_mbps)); return len; } +int fnic_get_debug_info(struct stats_debug_info *info, struct fnic *fnic) +{ + struct fnic_iport_s *iport = &fnic->iport; + int buf_size = info->buf_size; + int len = info->buffer_len; + struct fnic_tport_s *tport, *next; + unsigned long flags; + + len += snprintf(info->debug_buffer + len, buf_size - len, + "------------------------------------------\n" + "\t\t Debug Info\n" + "------------------------------------------\n"); + len += snprintf(info->debug_buffer + len, buf_size - len, + "fnic Name:%s number:%d Role:%s State:%s\n", + fnic->name, fnic->fnic_num, + fnic_role_to_str(fnic->role), + fnic_state_to_str(fnic->state)); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "iport State:%d Flags:0x%x vlan_id:%d fcid:0x%x\n", + iport->state, iport->flags, iport->vlan_id, iport->fcid); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "usefip:%d fip_state:%d fip_flogi_retry:%d\n", + iport->usefip, iport->fip.state, iport->fip.flogi_retry); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "fpma %02x:%02x:%02x:%02x:%02x:%02x", + iport->fpma[5], iport->fpma[4], iport->fpma[3], + iport->fpma[2], iport->fpma[1], iport->fpma[0]); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "fcfmac %02x:%02x:%02x:%02x:%02x:%02x\n", + iport->fcfmac[5], iport->fcfmac[4], iport->fcfmac[3], + iport->fcfmac[2], iport->fcfmac[1], iport->fcfmac[0]); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "fabric state:%d flags:0x%x retry_counter:%d e_d_tov:%d r_a_tov:%d\n", + iport->fabric.state, iport->fabric.flags, + iport->fabric.retry_counter, iport->e_d_tov, + iport->r_a_tov); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + len += snprintf(info->debug_buffer + len, buf_size - len, + "tport fcid:0x%x state:%d flags:0x%x inflight:%d retry_counter:%d\n", + tport->fcid, tport->state, tport->flags, + atomic_read(&tport->in_flight), + tport->retry_counter); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return len; +} + /* * fnic_trace_buf_init - Initialize fnic trace buffer logging facility * @@ -485,8 +548,7 @@ int fnic_trace_buf_init(void) } fnic_trace_entries.page_offset = - vmalloc(array_size(fnic_max_trace_entries, - sizeof(unsigned long))); + vcalloc(fnic_max_trace_entries, sizeof(unsigned long)); if (!fnic_trace_entries.page_offset) { printk(KERN_ERR PFX "Failed to allocate memory for" " page_offset\n"); @@ -497,8 +559,6 @@ int fnic_trace_buf_init(void) err = -ENOMEM; goto err_fnic_trace_buf_init; } - memset((void *)fnic_trace_entries.page_offset, 0, - (fnic_max_trace_entries * sizeof(unsigned long))); fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0; fnic_buf_head = fnic_trace_buf_p; @@ -559,8 +619,7 @@ int fnic_fc_trace_init(void) fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/ FC_TRC_SIZE_BYTES; fnic_fc_ctlr_trace_buf_p = - (unsigned long)vmalloc(array_size(PAGE_SIZE, - fnic_fc_trace_max_pages)); + (unsigned long)vcalloc(fnic_fc_trace_max_pages, PAGE_SIZE); if (!fnic_fc_ctlr_trace_buf_p) { pr_err("fnic: Failed to allocate memory for " "FC Control Trace Buf\n"); @@ -568,13 +627,9 @@ int fnic_fc_trace_init(void) goto err_fnic_fc_ctlr_trace_buf_init; } - memset((void *)fnic_fc_ctlr_trace_buf_p, 0, - fnic_fc_trace_max_pages * PAGE_SIZE); - /* Allocate memory for page offset */ fc_trace_entries.page_offset = - vmalloc(array_size(fc_trace_max_entries, - sizeof(unsigned long))); + vcalloc(fc_trace_max_entries, sizeof(unsigned long)); if (!fc_trace_entries.page_offset) { pr_err("fnic:Failed to allocate memory for page_offset\n"); if (fnic_fc_ctlr_trace_buf_p) { @@ -585,8 +640,6 @@ int fnic_fc_trace_init(void) err = -ENOMEM; goto err_fnic_fc_ctlr_trace_buf_init; } - memset((void *)fc_trace_entries.page_offset, 0, - (fc_trace_max_entries * sizeof(unsigned long))); fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p; @@ -688,7 +741,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, */ if (frame_type == FNIC_FC_RECV) { eth_fcoe_hdr_len = sizeof(struct ethhdr) + - sizeof(struct fcoe_hdr); + sizeof(struct fcoe_hdr); memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); /* Copy the rest of data frame */ memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index f6305e3e60f4..270eae7ac427 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -110,6 +110,7 @@ module_param_array(card, int, NULL, 0); MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)"); MODULE_ALIAS("g_NCR5380_mmio"); +MODULE_DESCRIPTION("Generic NCR5380/NCR53C400 SCSI driver"); MODULE_LICENSE("GPL"); static void g_NCR5380_trigger_irq(struct Scsi_Host *instance) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 1e4550156b73..1323ed8aa717 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -9,7 +9,6 @@ #include <linux/acpi.h> #include <linux/blk-mq.h> -#include <linux/blk-mq-pci.h> #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/dmapool.h> @@ -47,6 +46,13 @@ #define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10 #define HISI_SAS_FIFO_DATA_DW_SIZE 32 +#define HISI_SAS_REG_MEM_SIZE 4 +#define HISI_SAS_MAX_CDB_LEN 16 +#define HISI_SAS_BLK_QUEUE_DEPTH 64 + +#define BYTE_TO_DW 4 +#define BYTE_TO_DDW 8 + #define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer)) #define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table)) @@ -93,6 +99,8 @@ #define HISI_SAS_WAIT_PHYUP_TIMEOUT (30 * HZ) #define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ) +#define HISI_SAS_DELAY_FOR_PHY_DISABLE 100 +#define NAME_BUF_SIZE 256 struct hisi_hba; @@ -168,6 +176,8 @@ struct hisi_sas_debugfs_fifo { u32 rd_data[HISI_SAS_FIFO_DATA_DW_SIZE]; }; +#define FRAME_RCVD_BUF 32 +#define SAS_PHY_RESV_SIZE 2 struct hisi_sas_phy { struct work_struct works[HISI_PHYES_NUM]; struct hisi_hba *hisi_hba; @@ -179,10 +189,10 @@ struct hisi_sas_phy { spinlock_t lock; u64 port_id; /* from hw */ u64 frame_rcvd_size; - u8 frame_rcvd[32]; + u8 frame_rcvd[FRAME_RCVD_BUF]; u8 phy_attached; u8 in_reset; - u8 reserved[2]; + u8 reserved[SAS_PHY_RESV_SIZE]; u32 phy_type; u32 code_violation_err_count; enum sas_linkrate minimum_linkrate; @@ -307,6 +317,7 @@ enum { struct hisi_sas_hw { int (*hw_init)(struct hisi_hba *hisi_hba); + int (*fw_info_check)(struct hisi_hba *hisi_hba); int (*interrupt_preinit)(struct hisi_hba *hisi_hba); void (*setup_itct)(struct hisi_hba *hisi_hba, struct hisi_sas_device *device); @@ -348,7 +359,8 @@ struct hisi_sas_hw { const struct scsi_host_template *sht; }; -#define HISI_SAS_MAX_DEBUGFS_DUMP (50) +#define HISI_SAS_MAX_DEBUGFS_DUMP 50 +#define HISI_SAS_DEFAULT_DEBUGFS_DUMP 1 struct hisi_sas_debugfs_cq { struct hisi_sas_cq *cq; @@ -448,12 +460,12 @@ struct hisi_hba { dma_addr_t sata_breakpoint_dma; struct hisi_sas_slot *slot_info; unsigned long flags; - const struct hisi_sas_hw *hw; /* Low level hw interface */ + const struct hisi_sas_hw *hw; /* Low level hw interface */ unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)]; struct work_struct rst_work; u32 phy_state; - u32 intr_coal_ticks; /* Time of interrupt coalesce in us */ - u32 intr_coal_count; /* Interrupt count to coalesce */ + u32 intr_coal_ticks; /* Time of interrupt coalesce in us */ + u32 intr_coal_count; /* Interrupt count to coalesce */ int cq_nvecs; @@ -528,12 +540,13 @@ struct hisi_sas_cmd_hdr { __le64 dif_prd_table_addr; }; +#define ITCT_RESV_DDW 12 struct hisi_sas_itct { __le64 qw0; __le64 sas_addr; __le64 qw2; __le64 qw3; - __le64 qw4_15[12]; + __le64 qw4_15[ITCT_RESV_DDW]; }; struct hisi_sas_iost { @@ -543,22 +556,26 @@ struct hisi_sas_iost { __le64 qw3; }; +#define ERROR_RECORD_BUF_DW 4 struct hisi_sas_err_record { - u32 data[4]; + u32 data[ERROR_RECORD_BUF_DW]; }; +#define FIS_RESV_DW 3 struct hisi_sas_initial_fis { struct hisi_sas_err_record err_record; struct dev_to_host_fis fis; - u32 rsvd[3]; + u32 rsvd[FIS_RESV_DW]; }; +#define BREAKPOINT_DATA_SIZE 128 struct hisi_sas_breakpoint { - u8 data[128]; + u8 data[BREAKPOINT_DATA_SIZE]; }; +#define BREAKPOINT_TAG_NUM 32 struct hisi_sas_sata_breakpoint { - struct hisi_sas_breakpoint tag[32]; + struct hisi_sas_breakpoint tag[BREAKPOINT_TAG_NUM]; }; struct hisi_sas_sge { @@ -569,13 +586,15 @@ struct hisi_sas_sge { __le32 data_off; }; +#define SMP_CMD_TABLE_SIZE 44 struct hisi_sas_command_table_smp { - u8 bytes[44]; + u8 bytes[SMP_CMD_TABLE_SIZE]; }; +#define DUMMY_BUF_SIZE 12 struct hisi_sas_command_table_stp { struct host_to_dev_fis command_fis; - u8 dummy[12]; + u8 dummy[DUMMY_BUF_SIZE]; u8 atapi_cdb[ATAPI_CDB_LEN]; }; @@ -589,12 +608,13 @@ struct hisi_sas_sge_dif_page { struct hisi_sas_sge sge[HISI_SAS_SGE_DIF_PAGE_CNT]; } __aligned(16); +#define PROT_BUF_SIZE 7 struct hisi_sas_command_table_ssp { struct ssp_frame_hdr hdr; union { struct { struct ssp_command_iu task; - u32 prot[7]; + u32 prot[PROT_BUF_SIZE]; }; struct ssp_tmf_iu ssp_task; struct xfer_rdy_iu xfer_rdy; @@ -608,9 +628,10 @@ union hisi_sas_command_table { struct hisi_sas_command_table_stp stp; } __aligned(16); +#define IU_BUF_SIZE 1024 struct hisi_sas_status_buffer { struct hisi_sas_err_record err; - u8 iu[1024]; + u8 iu[IU_BUF_SIZE]; } __aligned(16); struct hisi_sas_slot_buf_table { @@ -633,8 +654,7 @@ extern struct dentry *hisi_sas_debugfs_dir; extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba); extern int hisi_sas_alloc(struct hisi_hba *hisi_hba); extern void hisi_sas_free(struct hisi_hba *hisi_hba); -extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, - int direction); +extern u8 hisi_sas_get_ata_protocol(struct sas_task *task); extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); extern void hisi_sas_sata_done(struct sas_task *task, struct hisi_sas_slot *slot); @@ -643,8 +663,8 @@ extern int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *ops); extern void hisi_sas_remove(struct platform_device *pdev); -extern int hisi_sas_slave_configure(struct scsi_device *sdev); -extern int hisi_sas_slave_alloc(struct scsi_device *sdev); +int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim); +extern int hisi_sas_sdev_init(struct scsi_device *sdev); extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time); extern void hisi_sas_scan_start(struct Scsi_Host *shost); extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 35f8e00850d6..4864e957be0b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -7,6 +7,16 @@ #include "hisi_sas.h" #define DRV_NAME "hisi_sas" +#define LINK_RATE_BIT_MASK 2 +#define FIS_BUF_SIZE 20 +#define WAIT_CMD_COMPLETE_DELAY 100 +#define WAIT_CMD_COMPLETE_TMROUT 5000 +#define DELAY_FOR_LINK_READY 2000 +#define BLK_CNT_OPTIMIZE_MARK 64 +#define HZ_TO_MHZ 1000000 +#define DELAY_FOR_SOFTRESET_MAX 1000 +#define DELAY_FOR_SOFTRESET_MIN 900 + #define DEV_IS_GONE(dev) \ ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) @@ -21,8 +31,32 @@ struct hisi_sas_internal_abort_data { bool rst_ha_timeout; /* reset the HA for timeout */ }; -u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) +static u8 hisi_sas_get_ata_protocol_from_tf(struct ata_queued_cmd *qc) +{ + if (!qc) + return HISI_SAS_SATA_PROTOCOL_PIO; + + switch (qc->tf.protocol) { + case ATA_PROT_NODATA: + return HISI_SAS_SATA_PROTOCOL_NONDATA; + case ATA_PROT_PIO: + return HISI_SAS_SATA_PROTOCOL_PIO; + case ATA_PROT_DMA: + return HISI_SAS_SATA_PROTOCOL_DMA; + case ATA_PROT_NCQ_NODATA: + case ATA_PROT_NCQ: + return HISI_SAS_SATA_PROTOCOL_FPDMA; + default: + return HISI_SAS_SATA_PROTOCOL_PIO; + } +} + +u8 hisi_sas_get_ata_protocol(struct sas_task *task) { + struct host_to_dev_fis *fis = &task->ata_task.fis; + struct ata_queued_cmd *qc = task->uldd_task; + int direction = task->data_dir; + switch (fis->command) { case ATA_CMD_FPDMA_WRITE: case ATA_CMD_FPDMA_READ: @@ -90,11 +124,9 @@ u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) } default: - { if (direction == DMA_NONE) return HISI_SAS_SATA_PROTOCOL_NONDATA; - return HISI_SAS_SATA_PROTOCOL_PIO; - } + return hisi_sas_get_ata_protocol_from_tf(qc); } } EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); @@ -107,7 +139,7 @@ void hisi_sas_sata_done(struct sas_task *task, struct hisi_sas_status_buffer *status_buf = hisi_sas_status_buf_addr_mem(slot); u8 *iu = &status_buf->iu[0]; - struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; + struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; resp->frame_len = sizeof(struct dev_to_host_fis); memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); @@ -127,7 +159,7 @@ u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) max -= SAS_LINK_RATE_1_5_GBPS; for (i = 0; i <= max; i++) - rate |= 1 << (i * 2); + rate |= 1 << (i * LINK_RATE_BIT_MASK); return rate; } EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); @@ -805,13 +837,13 @@ static int hisi_sas_init_device(struct domain_device *device) return rc; } -int hisi_sas_slave_alloc(struct scsi_device *sdev) +int hisi_sas_sdev_init(struct scsi_device *sdev) { struct domain_device *ddev = sdev_to_domain_dev(sdev); struct hisi_sas_device *sas_dev = ddev->lldd_dev; int rc; - rc = sas_slave_alloc(sdev); + rc = sas_sdev_init(sdev); if (rc) return rc; @@ -821,7 +853,7 @@ int hisi_sas_slave_alloc(struct scsi_device *sdev) sas_dev->dev_status = HISI_SAS_DEV_NORMAL; return 0; } -EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); +EXPORT_SYMBOL_GPL(hisi_sas_sdev_init); static int hisi_sas_dev_found(struct domain_device *device) { @@ -868,19 +900,19 @@ err_out: return rc; } -int hisi_sas_slave_configure(struct scsi_device *sdev) +int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { struct domain_device *dev = sdev_to_domain_dev(sdev); - int ret = sas_slave_configure(sdev); + int ret = sas_sdev_configure(sdev, lim); if (ret) return ret; if (!dev_is_sata(dev)) - sas_change_queue_depth(sdev, 64); + sas_change_queue_depth(sdev, HISI_SAS_BLK_QUEUE_DEPTH); return 0; } -EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); +EXPORT_SYMBOL_GPL(hisi_sas_sdev_configure); void hisi_sas_scan_start(struct Scsi_Host *shost) { @@ -911,8 +943,28 @@ static void hisi_sas_phyup_work_common(struct work_struct *work, container_of(work, typeof(*phy), works[event]); struct hisi_hba *hisi_hba = phy->hisi_hba; struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct asd_sas_port *sas_port = sas_phy->port; + struct hisi_sas_port *port = phy->port; + struct device *dev = hisi_hba->dev; + struct domain_device *port_dev; int phy_no = sas_phy->id; + if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) && + sas_port && port && (port->id != phy->port_id)) { + dev_info(dev, "phy%d's hw port id changed from %d to %llu\n", + phy_no, port->id, phy->port_id); + port_dev = sas_port->port_dev; + if (port_dev && !dev_is_expander(port_dev->dev_type)) { + /* + * Set the device state to gone to block + * sending IO to the device. + */ + set_bit(SAS_DEV_GONE, &port_dev->state); + hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); + return; + } + } + phy->wait_phyup_cnt = 0; if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); @@ -1218,7 +1270,7 @@ static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, sas_phy->phy->minimum_linkrate = min; hisi_sas_phy_enable(hisi_hba, phy_no, 0); - msleep(100); + msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE); hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); hisi_sas_phy_enable(hisi_hba, phy_no, 1); @@ -1248,7 +1300,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, case PHY_FUNC_LINK_RESET: hisi_sas_phy_enable(hisi_hba, phy_no, 0); - msleep(100); + msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE); hisi_sas_phy_enable(hisi_hba, phy_no, 1); break; @@ -1303,7 +1355,7 @@ static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, static int hisi_sas_softreset_ata_disk(struct domain_device *device) { - u8 fis[20] = {0}; + u8 fis[FIS_BUF_SIZE] = {0}; struct ata_port *ap = device->sata_dev.ap; struct ata_link *link; int rc = TMF_RESP_FUNC_FAILED; @@ -1320,6 +1372,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) } if (rc == TMF_RESP_FUNC_COMPLETE) { + usleep_range(DELAY_FOR_SOFTRESET_MIN, DELAY_FOR_SOFTRESET_MAX); ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); @@ -1383,6 +1436,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) { + u32 new_state = hisi_hba->hw->get_phys_state(hisi_hba); struct asd_sas_port *_sas_port = NULL; int phy_no; @@ -1396,7 +1450,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) continue; /* Report PHY state change to libsas */ - if (state & BIT(phy_no)) { + if (new_state & BIT(phy_no)) { if (do_port_check && sas_port && sas_port->port_dev) { struct domain_device *dev = sas_port->port_dev; @@ -1409,6 +1463,16 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) } } else { hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); + + /* + * The new_state is not ready but old_state is ready, + * the two possible causes: + * 1. The connected device is removed + * 2. Device exists but phyup timed out + */ + if (state & BIT(phy_no)) + hisi_sas_notify_phy_event(phy, + HISI_PHYE_LINK_RESET); } } } @@ -1438,7 +1502,7 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, struct device *dev = hisi_hba->dev; int rc = TMF_RESP_FUNC_FAILED; struct ata_link *link; - u8 fis[20] = {0}; + u8 fis[FIS_BUF_SIZE] = {0}; int i; for (i = 0; i < hisi_hba->n_phy; i++) { @@ -1505,14 +1569,16 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); scsi_block_requests(shost); - hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); + hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, + WAIT_CMD_COMPLETE_DELAY, + WAIT_CMD_COMPLETE_TMROUT); /* * hisi_hba->timer is only used for v1/v2 hw, and check hw->sht * which is also only used for v1/v2 hw to skip it for v3 hw */ if (hisi_hba->hw->sht) - del_timer_sync(&hisi_hba->timer); + timer_delete_sync(&hisi_hba->timer); set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); } @@ -1544,10 +1610,16 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) /* Init and wait for PHYs to come up and all libsas event finished. */ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; - if (!(hisi_hba->phy_state & BIT(phy_no))) + if (!sas_phy->phy->enabled) continue; + if (!(hisi_hba->phy_state & BIT(phy_no))) { + hisi_sas_phy_enable(hisi_hba, phy_no, 1); + continue; + } + async_schedule_domain(hisi_sas_async_init_wait_phyup, phy, &async); } @@ -1800,7 +1872,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT, smp_ata_check_ready_type); } else { - msleep(2000); + msleep(DELAY_FOR_LINK_READY); } return rc; @@ -1823,33 +1895,14 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device) } hisi_sas_dereg_device(hisi_hba, device); - rc = hisi_sas_debug_I_T_nexus_reset(device); - if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { - struct sas_phy *local_phy; - + if (dev_is_sata(device)) { rc = hisi_sas_softreset_ata_disk(device); - switch (rc) { - case -ECOMM: - rc = -ENODEV; - break; - case TMF_RESP_FUNC_FAILED: - case -EMSGSIZE: - case -EIO: - local_phy = sas_get_local_phy(device); - rc = sas_phy_enable(local_phy, 0); - if (!rc) { - local_phy->enabled = 0; - dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", - SAS_ADDR(device->sas_addr), rc); - rc = -ENODEV; - } - sas_put_local_phy(local_phy); - break; - default: - break; - } + if (rc == TMF_RESP_FUNC_FAILED) + dev_err(dev, "ata disk %016llx reset (%d)\n", + SAS_ADDR(device->sas_addr), rc); } + rc = hisi_sas_debug_I_T_nexus_reset(device); if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) hisi_sas_release_task(hisi_hba, device); @@ -1872,12 +1925,9 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) hisi_sas_dereg_device(hisi_hba, device); if (dev_is_sata(device)) { - struct sas_phy *phy; - - phy = sas_get_local_phy(device); + struct sas_phy *phy = sas_get_local_phy(device); rc = sas_phy_reset(phy, true); - if (rc == 0) hisi_sas_release_task(hisi_hba, device); sas_put_local_phy(phy); @@ -2061,7 +2111,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); hisi_sas_port_notify_formed(sas_phy); } else { - struct hisi_sas_port *port = phy->port; + struct hisi_sas_port *port = phy->port; if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || phy->in_reset) { @@ -2234,12 +2284,14 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) goto err_out; /* roundup to avoid overly large block size */ - max_command_entries_ru = roundup(max_command_entries, 64); + max_command_entries_ru = roundup(max_command_entries, + BLK_CNT_OPTIMIZE_MARK); if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); else sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); - sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); + + sz_slot_buf_ru = roundup(sz_slot_buf_ru, BLK_CNT_OPTIMIZE_MARK); s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; slots_per_blk = s / sz_slot_buf_ru; @@ -2301,7 +2353,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) hisi_hba->last_slot_index = 0; - hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); + hisi_hba->wq = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, dev_name(dev)); if (!hisi_hba->wq) { dev_err(dev, "sas_alloc: failed to create workqueue\n"); goto err_out; @@ -2320,7 +2373,7 @@ void hisi_sas_free(struct hisi_hba *hisi_hba) for (i = 0; i < hisi_hba->n_phy; i++) { struct hisi_sas_phy *phy = &hisi_hba->phy[i]; - del_timer_sync(&phy->timer); + timer_delete_sync(&phy->timer); } if (hisi_hba->wq) @@ -2403,7 +2456,8 @@ int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) if (IS_ERR(refclk)) dev_dbg(dev, "no ref clk property\n"); else - hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; + hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / + HZ_TO_MHZ; if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { dev_err(dev, "could not get property phy-count\n"); @@ -2448,6 +2502,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; + if (hisi_hba->hw->fw_info_check) { + if (hisi_hba->hw->fw_info_check(hisi_hba)) + goto err_out; + } + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (error) { dev_err(dev, "No usable DMA addressing method\n"); @@ -2520,7 +2579,7 @@ int hisi_sas_probe(struct platform_device *pdev, shost->max_id = HISI_SAS_MAX_DEVICES; shost->max_lun = ~0; shost->max_channel = 1; - shost->max_cmd_len = 16; + shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN; if (hisi_hba->hw->slot_index_alloc) { shost->can_queue = HISI_SAS_MAX_COMMANDS; shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; @@ -2577,7 +2636,7 @@ void hisi_sas_remove(struct platform_device *pdev) struct hisi_hba *hisi_hba = sha->lldd_ha; struct Scsi_Host *shost = sha->shost; - del_timer_sync(&hisi_hba->timer); + timer_delete_sync(&hisi_hba->timer); sas_unregister_ha(sha); sas_remove_host(shost); @@ -2628,10 +2687,10 @@ static __init int hisi_sas_init(void) static __exit void hisi_sas_exit(void) { - sas_release_transport(hisi_sas_stt); - if (hisi_sas_debugfs_enable) debugfs_remove(hisi_sas_debugfs_dir); + + sas_release_transport(hisi_sas_stt); } module_init(hisi_sas_init); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 3c555579f9a1..6d97339371fb 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1734,35 +1734,37 @@ static struct attribute *host_v1_hw_attrs[] = { ATTRIBUTE_GROUPS(host_v1_hw); +static int check_fw_info_v1_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 9) { + dev_err(dev, "invalid phy number from FW\n"); + return -EINVAL; + } + + if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 32) { + dev_err(dev, "invalid queue count from FW\n"); + return -EINVAL; + } + + return 0; +} + static const struct scsi_host_template sht_v1_hw = { - .name = DRV_NAME, - .proc_name = DRV_NAME, - .module = THIS_MODULE, - .queuecommand = sas_queuecommand, - .dma_need_drain = ata_scsi_dma_need_drain, - .target_alloc = sas_target_alloc, - .slave_configure = hisi_sas_slave_configure, + LIBSAS_SHT_BASE_NO_SLAVE_INIT + .sdev_configure = hisi_sas_sdev_configure, .scan_finished = hisi_sas_scan_finished, .scan_start = hisi_sas_scan_start, - .change_queue_depth = sas_change_queue_depth, - .bios_param = sas_bios_param, - .this_id = -1, .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, - .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_target_reset_handler = sas_eh_target_reset_handler, - .slave_alloc = hisi_sas_slave_alloc, - .target_destroy = sas_target_destroy, - .ioctl = sas_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = sas_ioctl, -#endif + .sdev_init = hisi_sas_sdev_init, .shost_groups = host_v1_hw_groups, - .host_reset = hisi_sas_host_reset, + .host_reset = hisi_sas_host_reset, }; static const struct hisi_sas_hw hisi_sas_v1_hw = { .hw_init = hisi_sas_v1_init, + .fw_info_check = check_fw_info_v1_hw, .setup_itct = setup_itct_v1_hw, .sl_notify_ssp = sl_notify_ssp_v1_hw, .clear_itct = clear_itct_v1_hw, @@ -1800,11 +1802,11 @@ MODULE_DEVICE_TABLE(acpi, sas_v1_acpi_match); static struct platform_driver hisi_sas_v1_driver = { .probe = hisi_sas_v1_probe, - .remove_new = hisi_sas_remove, + .remove = hisi_sas_remove, .driver = { .name = DRV_NAME, .of_match_table = sas_v1_of_match, - .acpi_match_table = ACPI_PTR(sas_v1_acpi_match), + .acpi_match_table = sas_v1_acpi_match, }, }; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 73b378837da7..2adfedb8484c 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -2372,18 +2372,18 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba, case STAT_IO_COMPLETE: /* internal abort command complete */ ts->stat = TMF_RESP_FUNC_SUCC; - del_timer_sync(&slot->internal_abort_timer); + timer_delete_sync(&slot->internal_abort_timer); goto out; case STAT_IO_NO_DEVICE: ts->stat = TMF_RESP_FUNC_COMPLETE; - del_timer_sync(&slot->internal_abort_timer); + timer_delete_sync(&slot->internal_abort_timer); goto out; case STAT_IO_NOT_VALID: /* abort single io, controller don't find * the io need to abort */ ts->stat = TMF_RESP_FUNC_FAILED; - del_timer_sync(&slot->internal_abort_timer); + timer_delete_sync(&slot->internal_abort_timer); goto out; default: break; @@ -2501,6 +2501,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_port *port = to_hisi_sas_port(sas_port); struct sas_ata_task *ata_task = &task->ata_task; struct sas_tmf_task *tmf = slot->tmf; + int phy_id; u8 *buf_cmd; int has_data = 0, hdr_tag = 0; u32 dw0, dw1 = 0, dw2 = 0; @@ -2508,10 +2509,14 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, /* create header */ /* dw0 */ dw0 = port->id << CMD_HDR_PORT_OFF; - if (parent_dev && dev_is_expander(parent_dev->dev_type)) + if (parent_dev && dev_is_expander(parent_dev->dev_type)) { dw0 |= 3 << CMD_HDR_CMD_OFF; - else + } else { + phy_id = device->phy->identify.phy_identifier; + dw0 |= (1U << phy_id) << CMD_HDR_PHY_ID_OFF; + dw0 |= CMD_HDR_FORCE_PHY_MSK; dw0 |= 4 << CMD_HDR_CMD_OFF; + } if (tmf && ata_task->force_phy) { dw0 |= CMD_HDR_FORCE_PHY_MSK; @@ -2538,9 +2543,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, (task->ata_task.fis.control & ATA_SRST)) dw1 |= 1 << CMD_HDR_RESET_OFF; - dw1 |= (hisi_sas_get_ata_protocol( - &task->ata_task.fis, task->data_dir)) - << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF; dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; hdr->dw1 = cpu_to_le32(dw1); @@ -2656,7 +2659,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) if (is_sata_phy_v2_hw(hisi_hba, phy_no)) goto end; - del_timer(&phy->timer); + timer_delete(&phy->timer); if (phy_no == 8) { u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); @@ -2732,7 +2735,7 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) struct hisi_sas_port *port = phy->port; struct device *dev = hisi_hba->dev; - del_timer(&phy->timer); + timer_delete(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); @@ -2746,7 +2749,7 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id)) if (!check_any_wideports_v2_hw(hisi_hba) && timer_pending(&hisi_hba->timer)) - del_timer(&hisi_hba->timer); + timer_delete(&hisi_hba->timer); txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, @@ -2768,7 +2771,7 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff; while (irq_msk) { - if (irq_msk & 1) { + if (irq_msk & 1) { u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); @@ -3108,7 +3111,7 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) return IRQ_HANDLED; } -static irqreturn_t cq_thread_v2_hw(int irq_no, void *p) +static irqreturn_t cq_thread_v2_hw(int irq_no, void *p) { struct hisi_sas_cq *cq = p; struct hisi_hba *hisi_hba = cq->hisi_hba; @@ -3206,7 +3209,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; int phy_no, offset; - del_timer(&phy->timer); + timer_delete(&phy->timer); phy_no = sas_phy->id; initial_fis = &hisi_hba->initial_fis[phy_no]; @@ -3496,7 +3499,7 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type, * numbered drive in the fourth byte. * See SFF-8485 Rev. 0.7 Table 24. */ - void __iomem *reg_addr = hisi_hba->sgpio_regs + + void __iomem *reg_addr = hisi_hba->sgpio_regs + reg_index * 4 + phy_no; int data_idx = phy_no + 3 - (phy_no % 4) * 2; @@ -3544,6 +3547,11 @@ static struct attribute *host_v2_hw_attrs[] = { ATTRIBUTE_GROUPS(host_v2_hw); +static const struct attribute_group *sdev_groups_v2_hw[] = { + &sas_ata_sdev_attr_group, + NULL +}; + static void map_queues_v2_hw(struct Scsi_Host *shost) { struct hisi_hba *hisi_hba = shost_priv(shost); @@ -3561,30 +3569,32 @@ static void map_queues_v2_hw(struct Scsi_Host *shost) } } +static int check_fw_info_v2_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 9) { + dev_err(dev, "invalid phy number from FW\n"); + return -EINVAL; + } + + if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 16) { + dev_err(dev, "invalid queue count from FW\n"); + return -EINVAL; + } + + return 0; +} + static const struct scsi_host_template sht_v2_hw = { - .name = DRV_NAME, - .proc_name = DRV_NAME, - .module = THIS_MODULE, - .queuecommand = sas_queuecommand, - .dma_need_drain = ata_scsi_dma_need_drain, - .target_alloc = sas_target_alloc, - .slave_configure = hisi_sas_slave_configure, + LIBSAS_SHT_BASE_NO_SLAVE_INIT + .sdev_configure = hisi_sas_sdev_configure, .scan_finished = hisi_sas_scan_finished, .scan_start = hisi_sas_scan_start, - .change_queue_depth = sas_change_queue_depth, - .bios_param = sas_bios_param, - .this_id = -1, .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, - .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_target_reset_handler = sas_eh_target_reset_handler, - .slave_alloc = hisi_sas_slave_alloc, - .target_destroy = sas_target_destroy, - .ioctl = sas_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = sas_ioctl, -#endif + .sdev_init = hisi_sas_sdev_init, .shost_groups = host_v2_hw_groups, + .sdev_groups = sdev_groups_v2_hw, .host_reset = hisi_sas_host_reset, .map_queues = map_queues_v2_hw, .host_tagset = 1, @@ -3592,6 +3602,7 @@ static const struct scsi_host_template sht_v2_hw = { static const struct hisi_sas_hw hisi_sas_v2_hw = { .hw_init = hisi_sas_v2_init, + .fw_info_check = check_fw_info_v2_hw, .interrupt_preinit = hisi_sas_v2_interrupt_preinit, .setup_itct = setup_itct_v2_hw, .slot_index_alloc = slot_index_alloc_quirk_v2_hw, @@ -3641,11 +3652,11 @@ MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match); static struct platform_driver hisi_sas_v2_driver = { .probe = hisi_sas_v2_probe, - .remove_new = hisi_sas_remove, + .remove = hisi_sas_remove, .driver = { .name = DRV_NAME, .of_match_table = sas_v2_of_match, - .acpi_match_table = ACPI_PTR(sas_v2_acpi_match), + .acpi_match_table = sas_v2_acpi_match, }, }; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 34f96cc35342..bc5d5356dd00 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -43,6 +43,7 @@ #define CQ_INT_CONVERGE_EN 0xb0 #define CFG_AGING_TIME 0xbc #define HGC_DFX_CFG2 0xc0 +#define CFG_ICT_TIMER_STEP_TRSH 0xc8 #define CFG_ABT_SET_QUERY_IPTT 0xd4 #define CFG_SET_ABORTED_IPTT_OFF 0 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) @@ -358,6 +359,10 @@ #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) #define CMD_HDR_TLR_CTRL_OFF 6 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) +#define CMD_HDR_PHY_ID_OFF 8 +#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF) +#define CMD_HDR_FORCE_PHY_OFF 17 +#define CMD_HDR_FORCE_PHY_MSK (0x1U << CMD_HDR_FORCE_PHY_OFF) #define CMD_HDR_PORT_OFF 18 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) #define CMD_HDR_PRIORITY_OFF 27 @@ -461,6 +466,12 @@ #define ITCT_HDR_RTOLT_OFF 48 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) +/*debugfs*/ +#define TWO_PARA_PER_LINE 2 +#define FOUR_PARA_PER_LINE 4 +#define DUMP_BUF_SIZE 8 +#define BIST_BUF_SIZE 16 + struct hisi_sas_protect_iu_v3_hw { u32 dw0; u32 lbrtcv; @@ -531,6 +542,43 @@ struct hisi_sas_err_record_v3 { #define BASE_VECTORS_V3_HW 16 #define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1) +#define IRQ_PHY_UP_DOWN_INDEX 1 +#define IRQ_CHL_INDEX 2 +#define IRQ_AXI_INDEX 11 + +#define DELAY_FOR_RESET_HW 100 +#define HDR_SG_MOD 0x2 +#define LUN_SIZE 8 +#define ATTR_PRIO_REGION 9 +#define CDB_REGION 12 +#define PRIO_OFF 3 +#define TMF_REGION 10 +#define TAG_MSB 12 +#define TAG_LSB 13 +#define SMP_FRAME_TYPE 2 +#define SMP_CRC_SIZE 4 +#define HDR_TAG_OFF 3 +#define HOST_NO_OFF 6 +#define PHY_NO_OFF 7 +#define IDENTIFY_REG_READ 6 +#define LINK_RESET_TIMEOUT_OFF 4 +#define DECIMALISM_FLAG 10 +#define WAIT_RETRY 100 +#define WAIT_TMROUT 5000 + +#define ID_DWORD0_INDEX 0 +#define ID_DWORD1_INDEX 1 +#define ID_DWORD2_INDEX 2 +#define ID_DWORD3_INDEX 3 +#define ID_DWORD4_INDEX 4 +#define ID_DWORD5_INDEX 5 +#define TICKS_BIT_INDEX 24 +#define COUNT_BIT_INDEX 8 + +#define PORT_REG_LENGTH 0x100 +#define GLOBAL_REG_LENGTH 0x800 +#define AXI_REG_LENGTH 0x61 +#define RAS_REG_LENGTH 0x10 #define CHNL_INT_STS_MSK 0xeeeeeeee #define CHNL_INT_STS_PHY_MSK 0xe @@ -638,9 +686,12 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, TRANS_LOCK_ICT_TIME, 0x4A817C80); hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, CFG_ICT_TIMER_STEP_TRSH, 0xf4240); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); + /* configure the interrupt coalescing timeout period 10us */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0xa); + /* configure the count of CQ entries 10 */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0xa); hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN, hisi_sas_intr_conv); hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); @@ -682,7 +733,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); - hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); + hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7ffffff); hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, 0x30f4240); @@ -803,17 +854,17 @@ static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no) identify_buffer = (u32 *)(&identify_frame); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, - __swab32(identify_buffer[0])); + __swab32(identify_buffer[ID_DWORD0_INDEX])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, - __swab32(identify_buffer[1])); + __swab32(identify_buffer[ID_DWORD1_INDEX])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, - __swab32(identify_buffer[2])); + __swab32(identify_buffer[ID_DWORD2_INDEX])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, - __swab32(identify_buffer[3])); + __swab32(identify_buffer[ID_DWORD3_INDEX])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, - __swab32(identify_buffer[4])); + __swab32(identify_buffer[ID_DWORD4_INDEX])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, - __swab32(identify_buffer[5])); + __swab32(identify_buffer[ID_DWORD5_INDEX])); } static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, @@ -933,7 +984,7 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) /* Disable all of the PHYs */ hisi_sas_stop_phys(hisi_hba); - udelay(50); + udelay(HISI_SAS_DELAY_FOR_PHY_DISABLE); /* Ensure axi bus idle */ ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val, @@ -973,7 +1024,7 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba) return rc; } - msleep(100); + msleep(DELAY_FOR_RESET_HW); init_reg_v3_hw(hisi_hba); if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) { @@ -1022,7 +1073,7 @@ static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) cfg &= ~PHY_CFG_ENA_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); - mdelay(50); + mdelay(HISI_SAS_DELAY_FOR_PHY_DISABLE); state = hisi_sas_read32(hisi_hba, PHY_STATE); if (state & BIT(phy_no)) { @@ -1058,7 +1109,7 @@ static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, txid_auto | TX_HARDRST_MSK); } - msleep(100); + msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE); hisi_sas_phy_enable(hisi_hba, phy_no, 1); } @@ -1103,7 +1154,8 @@ static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) for (i = 0; i < hisi_hba->n_phy; i++) if (phy_state & BIT(i)) - if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) + if (((phy_port_num_ma >> (i * HISI_SAS_REG_MEM_SIZE)) & 0xf) == + port_id) bitmap |= BIT(i); return bitmap; @@ -1300,10 +1352,10 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, /* map itct entry */ dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; - dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) - + 3) / 4) << CMD_HDR_CFL_OFF) | - ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | - (2 << CMD_HDR_SG_MOD_OFF); + dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) + + 3) / BYTE_TO_DW) << CMD_HDR_CFL_OFF) | + ((HISI_SAS_MAX_SSP_RESP_SZ / BYTE_TO_DW) << CMD_HDR_MRFL_OFF) | + (HDR_SG_MOD << CMD_HDR_SG_MOD_OFF); hdr->dw2 = cpu_to_le32(dw2); hdr->transfer_tags = cpu_to_le32(slot->idx); @@ -1323,18 +1375,19 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + sizeof(struct ssp_frame_hdr); - memcpy(buf_cmd, &task->ssp_task.LUN, 8); + memcpy(buf_cmd, &task->ssp_task.LUN, LUN_SIZE); if (!tmf) { - buf_cmd[9] = ssp_task->task_attr; - memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); + buf_cmd[ATTR_PRIO_REGION] = ssp_task->task_attr; + memcpy(buf_cmd + CDB_REGION, scsi_cmnd->cmnd, + scsi_cmnd->cmd_len); } else { - buf_cmd[10] = tmf->tmf; + buf_cmd[TMF_REGION] = tmf->tmf; switch (tmf->tmf) { case TMF_ABORT_TASK: case TMF_QUERY_TASK: - buf_cmd[12] = + buf_cmd[TAG_MSB] = (tmf->tag_of_task_to_be_managed >> 8) & 0xff; - buf_cmd[13] = + buf_cmd[TAG_LSB] = tmf->tag_of_task_to_be_managed & 0xff; break; default: @@ -1367,7 +1420,8 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, unsigned int interval = scsi_prot_interval(scsi_cmnd); unsigned int ilog2_interval = ilog2(interval); - len = (task->total_xfer_len >> ilog2_interval) * 8; + len = (task->total_xfer_len >> ilog2_interval) * + BYTE_TO_DDW; } } @@ -1387,6 +1441,7 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev = device->lldd_dev; dma_addr_t req_dma_addr; unsigned int req_len; + u32 cfl; /* req */ sg_req = &task->smp_task.smp_req; @@ -1397,7 +1452,7 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, /* dw0 */ hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ - (2 << CMD_HDR_CMD_OFF)); /* smp */ + (SMP_FRAME_TYPE << CMD_HDR_CMD_OFF)); /* smp */ /* map itct entry */ hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | @@ -1405,8 +1460,9 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, (DIR_NO_DATA << CMD_HDR_DIR_OFF)); /* dw2 */ - hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | - (HISI_SAS_MAX_SMP_RESP_SZ / 4 << + cfl = (req_len - SMP_CRC_SIZE) / BYTE_TO_DW; + hdr->dw2 = cpu_to_le32((cfl << CMD_HDR_CFL_OFF) | + (HISI_SAS_MAX_SMP_RESP_SZ / BYTE_TO_DW << CMD_HDR_MRFL_OFF)); hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); @@ -1425,15 +1481,21 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct asd_sas_port *sas_port = device->port; struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + int phy_id; u8 *buf_cmd; int has_data = 0, hdr_tag = 0; u32 dw1 = 0, dw2 = 0; hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); - if (parent_dev && dev_is_expander(parent_dev->dev_type)) + if (parent_dev && dev_is_expander(parent_dev->dev_type)) { hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); - else + } else { + phy_id = device->phy->identify.phy_identifier; + hdr->dw0 |= cpu_to_le32((1U << phy_id) + << CMD_HDR_PHY_ID_OFF); + hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK; hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF); + } switch (task->data_dir) { case DMA_TO_DEVICE: @@ -1452,9 +1514,7 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, (task->ata_task.fis.control & ATA_SRST)) dw1 |= 1 << CMD_HDR_RESET_OFF; - dw1 |= (hisi_sas_get_ata_protocol( - &task->ata_task.fis, task->data_dir)) - << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF; dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) @@ -1467,12 +1527,13 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, struct ata_queued_cmd *qc = task->uldd_task; hdr_tag = qc->tag; - task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + task->ata_task.fis.sector_count |= + (u8)(hdr_tag << HDR_TAG_OFF); dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; } - dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | - 2 << CMD_HDR_SG_MOD_OFF; + dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / BYTE_TO_DW) << CMD_HDR_CFL_OFF | + HDR_SG_MOD << CMD_HDR_SG_MOD_OFF; hdr->dw2 = cpu_to_le32(dw2); /* dw3 */ @@ -1532,9 +1593,9 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); - port_id = (port_id >> (4 * phy_no)) & 0xf; + port_id = (port_id >> (HISI_SAS_REG_MEM_SIZE * phy_no)) & 0xf; link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); - link_rate = (link_rate >> (phy_no * 4)) & 0xf; + link_rate = (link_rate >> (phy_no * HISI_SAS_REG_MEM_SIZE)) & 0xf; if (port_id == 0xf) { dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); @@ -1567,8 +1628,8 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) sas_phy->oob_mode = SATA_OOB_MODE; attached_sas_addr[0] = 0x50; - attached_sas_addr[6] = shost->host_no; - attached_sas_addr[7] = phy_no; + attached_sas_addr[HOST_NO_OFF] = shost->host_no; + attached_sas_addr[PHY_NO_OFF] = phy_no; memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE); @@ -1584,7 +1645,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) (struct sas_identify_frame *)frame_rcvd; dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); - for (i = 0; i < 6; i++) { + for (i = 0; i < IDENTIFY_REG_READ; i++) { u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, RX_IDAF_DWORD0 + (i * 4)); frame_rcvd[i] = __swab32(idaf); @@ -1607,7 +1668,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) phy->port_id = port_id; spin_lock(&phy->lock); /* Delete timer and set phy_attached atomically */ - del_timer(&phy->timer); + timer_delete(&phy->timer); phy->phy_attached = 1; spin_unlock(&phy->lock); @@ -1641,7 +1702,7 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) atomic_inc(&phy->down_cnt); - del_timer(&phy->timer); + timer_delete(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); @@ -1689,7 +1750,7 @@ static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) & 0x11111111; while (irq_msk) { - if (irq_msk & 1) { + if (irq_msk & 1) { u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); @@ -1854,7 +1915,7 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) dev_warn(dev, "phy%d stp link timeout (0x%x)\n", phy_no, reg_value); - if (reg_value & BIT(4)) + if (reg_value & BIT(LINK_RESET_TIMEOUT_OFF)) hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); } @@ -1912,8 +1973,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) u32 irq_msk; int phy_no = 0; - irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) - & CHNL_INT_STS_MSK; + irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) & CHNL_INT_STS_MSK; while (irq_msk) { if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH))) @@ -2421,7 +2481,7 @@ out: spin_lock_irqsave(&device->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &ha->state)) { spin_unlock_irqrestore(&device->done_lock, flags); - dev_info(dev, "slot complete: task(%pK) ignored\n ", + dev_info(dev, "slot complete: task(%pK) ignored\n", task); return; } @@ -2493,6 +2553,7 @@ static int complete_v3_hw(struct hisi_sas_cq *cq) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + cond_resched(); return completed; } @@ -2557,7 +2618,6 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) if (vectors < 0) return -ENOENT; - hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW - hisi_hba->iopoll_q_cnt; shost->nr_hw_queues = hisi_hba->cq_nvecs + hisi_hba->iopoll_q_cnt; @@ -2570,7 +2630,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) struct pci_dev *pdev = hisi_hba->pci_dev; int rc, i; - rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), + rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX), int_phy_up_down_bcast_v3_hw, 0, DRV_NAME " phy", hisi_hba); if (rc) { @@ -2578,7 +2638,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) return -ENOENT; } - rc = devm_request_irq(dev, pci_irq_vector(pdev, 2), + rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_CHL_INDEX), int_chnl_int_v3_hw, 0, DRV_NAME " channel", hisi_hba); if (rc) { @@ -2586,7 +2646,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) return -ENOENT; } - rc = devm_request_irq(dev, pci_irq_vector(pdev, 11), + rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_AXI_INDEX), fatal_axi_int_v3_hw, 0, DRV_NAME " fatal", hisi_hba); if (rc) { @@ -2599,7 +2659,8 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; - int nr = hisi_sas_intr_conv ? 16 : 16 + i; + int nr = hisi_sas_intr_conv ? BASE_VECTORS_V3_HW : + BASE_VECTORS_V3_HW + i; unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : IRQF_ONESHOT; @@ -2657,14 +2718,14 @@ static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) struct pci_dev *pdev = hisi_hba->pci_dev; int i; - synchronize_irq(pci_irq_vector(pdev, 1)); - synchronize_irq(pci_irq_vector(pdev, 2)); - synchronize_irq(pci_irq_vector(pdev, 11)); + synchronize_irq(pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX)); + synchronize_irq(pci_irq_vector(pdev, IRQ_CHL_INDEX)); + synchronize_irq(pci_irq_vector(pdev, IRQ_AXI_INDEX)); for (i = 0; i < hisi_hba->queue_count; i++) hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); for (i = 0; i < hisi_hba->cq_nvecs; i++) - synchronize_irq(pci_irq_vector(pdev, i + 16)); + synchronize_irq(pci_irq_vector(pdev, i + BASE_VECTORS_V3_HW)); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); @@ -2696,7 +2757,7 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_stop_phys(hisi_hba); - mdelay(10); + mdelay(HISI_SAS_DELAY_FOR_PHY_DISABLE); reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL); @@ -2796,14 +2857,15 @@ static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba) { /* config those registers between enable and disable PHYs */ hisi_sas_stop_phys(hisi_hba); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); if (hisi_hba->intr_coal_ticks == 0 || hisi_hba->intr_coal_count == 0) { - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + /* configure the interrupt coalescing timeout period 10us */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0xa); + /* configure the count of CQ entries 10 */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0xa); } else { - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, hisi_hba->intr_coal_ticks); hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, @@ -2832,13 +2894,13 @@ static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev, u32 intr_coal_ticks; int ret; - ret = kstrtou32(buf, 10, &intr_coal_ticks); + ret = kstrtou32(buf, DECIMALISM_FLAG, &intr_coal_ticks); if (ret) { dev_err(dev, "Input data of interrupt coalesce unmatch\n"); return -EINVAL; } - if (intr_coal_ticks >= BIT(24)) { + if (intr_coal_ticks >= BIT(TICKS_BIT_INDEX)) { dev_err(dev, "intr_coal_ticks must be less than 2^24!\n"); return -EINVAL; } @@ -2871,13 +2933,13 @@ static ssize_t intr_coal_count_v3_hw_store(struct device *dev, u32 intr_coal_count; int ret; - ret = kstrtou32(buf, 10, &intr_coal_count); + ret = kstrtou32(buf, DECIMALISM_FLAG, &intr_coal_count); if (ret) { dev_err(dev, "Input data of interrupt coalesce unmatch\n"); return -EINVAL; } - if (intr_coal_count >= BIT(8)) { + if (intr_coal_count >= BIT(COUNT_BIT_INDEX)) { dev_err(dev, "intr_coal_count must be less than 2^8!\n"); return -EINVAL; } @@ -2902,11 +2964,12 @@ static ssize_t iopoll_q_cnt_v3_hw_show(struct device *dev, } static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw); -static int slave_configure_v3_hw(struct scsi_device *sdev) +static int sdev_configure_v3_hw(struct scsi_device *sdev, + struct queue_limits *lim) { struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev); struct hisi_hba *hisi_hba = shost_priv(shost); - int ret = hisi_sas_slave_configure(sdev); + int ret = hisi_sas_sdev_configure(sdev, lim); struct device *dev = hisi_hba->dev; if (ret) @@ -2937,6 +3000,11 @@ static struct attribute *host_v3_hw_attrs[] = { ATTRIBUTE_GROUPS(host_v3_hw); +static const struct attribute_group *sdev_groups_v3_hw[] = { + &sas_ata_sdev_attr_group, + NULL +}; + #define HISI_SAS_DEBUGFS_REG(x) {#x, x} struct hisi_sas_debugfs_reg_lu { @@ -3003,7 +3071,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = { static const struct hisi_sas_debugfs_reg debugfs_port_reg = { .lu = debugfs_port_reg_lu, - .count = 0x100, + .count = PORT_REG_LENGTH, .base_off = PORT_BASE, }; @@ -3077,7 +3145,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = { static const struct hisi_sas_debugfs_reg debugfs_global_reg = { .lu = debugfs_global_reg_lu, - .count = 0x800, + .count = GLOBAL_REG_LENGTH, }; static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = { @@ -3090,7 +3158,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = { static const struct hisi_sas_debugfs_reg debugfs_axi_reg = { .lu = debugfs_axi_reg_lu, - .count = 0x61, + .count = AXI_REG_LENGTH, .base_off = AXI_MASTER_CFG_BASE, }; @@ -3107,7 +3175,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = { static const struct hisi_sas_debugfs_reg debugfs_ras_reg = { .lu = debugfs_ras_reg_lu, - .count = 0x10, + .count = RAS_REG_LENGTH, .base_off = RAS_BASE, }; @@ -3116,7 +3184,7 @@ static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) struct Scsi_Host *shost = hisi_hba->shost; scsi_block_requests(shost); - wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000); + wait_cmds_complete_timeout_v3_hw(hisi_hba, WAIT_RETRY, WAIT_TMROUT); set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); hisi_sas_sync_cqs(hisi_hba); @@ -3157,7 +3225,7 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba, return; } - memset(buf, 0, cache_dw_size * 4); + memset(buf, 0, cache_dw_size * BYTE_TO_DW); buf[0] = val; for (i = 1; i < cache_dw_size; i++) @@ -3204,7 +3272,7 @@ static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba) reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); /* init OOB link rate as 1.5 Gbits */ reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; - reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF); + reg_val |= (SAS_LINK_RATE_1_5_GBPS << CFG_PROG_OOB_PHY_LINK_RATE_OFF); hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val); /* enable PHY */ @@ -3213,6 +3281,9 @@ static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba) #define SAS_PHY_BIST_CODE_INIT 0x1 #define SAS_PHY_BIST_CODE1_INIT 0X80 +#define SAS_PHY_BIST_INIT_DELAY 100 +#define SAS_PHY_BIST_LOOP_TEST_0 1 +#define SAS_PHY_BIST_LOOP_TEST_1 2 static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) { u32 reg_val, mode_tmp; @@ -3231,12 +3302,13 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS], ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE], fix_code[FIXED_CODE_1]); - mode_tmp = path_mode ? 2 : 1; + mode_tmp = path_mode ? SAS_PHY_BIST_LOOP_TEST_1 : + SAS_PHY_BIST_LOOP_TEST_0; if (enable) { /* some preparations before bist test */ hisi_sas_bist_test_prep_v3_hw(hisi_hba); - /* set linkrate of bit test*/ + /* set linkrate of bit test */ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; @@ -3274,13 +3346,13 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) SAS_PHY_BIST_CODE1_INIT); } - mdelay(100); + mdelay(SAS_PHY_BIST_INIT_DELAY); reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val); /* clear error bit */ - mdelay(100); + mdelay(SAS_PHY_BIST_INIT_DELAY); hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT); } else { /* disable bist test and recover it */ @@ -3316,40 +3388,25 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost) if (i == HCTX_TYPE_POLL) blk_mq_map_queues(qmap); else - blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, - BASE_VECTORS_V3_HW); + blk_mq_map_hw_queues(qmap, hisi_hba->dev, + BASE_VECTORS_V3_HW); qoff += qmap->nr_queues; } } static const struct scsi_host_template sht_v3_hw = { - .name = DRV_NAME, - .proc_name = DRV_NAME, - .module = THIS_MODULE, - .queuecommand = sas_queuecommand, - .dma_need_drain = ata_scsi_dma_need_drain, - .target_alloc = sas_target_alloc, - .slave_configure = slave_configure_v3_hw, + LIBSAS_SHT_BASE_NO_SLAVE_INIT + .sdev_configure = sdev_configure_v3_hw, .scan_finished = hisi_sas_scan_finished, .scan_start = hisi_sas_scan_start, .map_queues = hisi_sas_map_queues, - .change_queue_depth = sas_change_queue_depth, - .bios_param = sas_bios_param, - .this_id = -1, .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, .sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT, - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, - .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_target_reset_handler = sas_eh_target_reset_handler, - .slave_alloc = hisi_sas_slave_alloc, - .target_destroy = sas_target_destroy, - .ioctl = sas_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = sas_ioctl, -#endif + .sdev_init = hisi_sas_sdev_init, .shost_groups = host_v3_hw_groups, - .tag_alloc_policy = BLK_TAG_ALLOC_RR, - .host_reset = hisi_sas_host_reset, + .sdev_groups = sdev_groups_v3_hw, + .tag_alloc_policy_rr = true, + .host_reset = hisi_sas_host_reset, .host_tagset = 1, .mq_poll = queue_complete_v3_hw, }; @@ -3380,6 +3437,23 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .debugfs_snapshot_regs = debugfs_snapshot_regs_v3_hw, }; +static int check_fw_info_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 8) { + dev_err(dev, "invalid phy number from FW\n"); + return -EINVAL; + } + + if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 16) { + dev_err(dev, "invalid queue count from FW\n"); + return -EINVAL; + } + + return 0; +} + static struct Scsi_Host * hisi_sas_shost_alloc_pci(struct pci_dev *pdev) { @@ -3410,6 +3484,9 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; + if (check_fw_info_v3_hw(hisi_hba) < 0) + goto err_out; + if (experimental_iopoll_q_cnt < 0 || experimental_iopoll_q_cnt >= hisi_hba->queue_count) dev_err(dev, "iopoll queue count %d cannot exceed or equal 16, using default 0\n", @@ -3471,7 +3548,7 @@ static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba *hisi_hba) for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data; for (i = 0; i < port->count; i++, databuf++) { - offset = port->base_off + 4 * i; + offset = port->base_off + HISI_SAS_REG_MEM_SIZE * i; *databuf = hisi_sas_phy_read32(hisi_hba, phy_cnt, offset); } @@ -3485,7 +3562,8 @@ static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba) int i; for (i = 0; i < debugfs_global_reg.count; i++, databuf++) - *databuf = hisi_sas_read32(hisi_hba, 4 * i); + *databuf = hisi_sas_read32(hisi_hba, + HISI_SAS_REG_MEM_SIZE * i); } static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba) @@ -3496,7 +3574,9 @@ static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba) int i; for (i = 0; i < axi->count; i++, databuf++) - *databuf = hisi_sas_read32(hisi_hba, 4 * i + axi->base_off); + *databuf = hisi_sas_read32(hisi_hba, + HISI_SAS_REG_MEM_SIZE * i + + axi->base_off); } static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba) @@ -3507,7 +3587,9 @@ static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba) int i; for (i = 0; i < ras->count; i++, databuf++) - *databuf = hisi_sas_read32(hisi_hba, 4 * i + ras->base_off); + *databuf = hisi_sas_read32(hisi_hba, + HISI_SAS_REG_MEM_SIZE * i + + ras->base_off); } static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba *hisi_hba) @@ -3559,18 +3641,22 @@ debugfs_to_reg_name_v3_hw(int off, int base_off, return NULL; } +static bool debugfs_dump_is_generated_v3_hw(void *p) +{ + return p ? true : false; +} + static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s, const struct hisi_sas_debugfs_reg *reg) { int i; for (i = 0; i < reg->count; i++) { - int off = i * 4; + int off = i * HISI_SAS_REG_MEM_SIZE; const char *name; name = debugfs_to_reg_name_v3_hw(off, reg->base_off, reg->lu); - if (name) seq_printf(s, "0x%08x 0x%08x %s\n", off, regs_val[i], name); @@ -3584,6 +3670,9 @@ static int debugfs_global_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *global = s->private; + if (!debugfs_dump_is_generated_v3_hw(global->data)) + return -EPERM; + debugfs_print_reg_v3_hw(global->data, s, &debugfs_global_reg); @@ -3595,6 +3684,9 @@ static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *axi = s->private; + if (!debugfs_dump_is_generated_v3_hw(axi->data)) + return -EPERM; + debugfs_print_reg_v3_hw(axi->data, s, &debugfs_axi_reg); @@ -3606,6 +3698,9 @@ static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *ras = s->private; + if (!debugfs_dump_is_generated_v3_hw(ras->data)) + return -EPERM; + debugfs_print_reg_v3_hw(ras->data, s, &debugfs_ras_reg); @@ -3618,6 +3713,9 @@ static int debugfs_port_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_port *port = s->private; const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg; + if (!debugfs_dump_is_generated_v3_hw(port->data)) + return -EPERM; + debugfs_print_reg_v3_hw(port->data, s, reg_port); return 0; @@ -3631,9 +3729,9 @@ static void debugfs_show_row_64_v3_hw(struct seq_file *s, int index, /* completion header size not fixed per HW version */ seq_printf(s, "index %04d:\n\t", index); - for (i = 1; i <= sz / 8; i++, ptr++) { + for (i = 1; i <= sz / BYTE_TO_DDW; i++, ptr++) { seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr)); - if (!(i % 2)) + if (!(i % TWO_PARA_PER_LINE)) seq_puts(s, "\n\t"); } @@ -3647,9 +3745,9 @@ static void debugfs_show_row_32_v3_hw(struct seq_file *s, int index, /* completion header size not fixed per HW version */ seq_printf(s, "index %04d:\n\t", index); - for (i = 1; i <= sz / 4; i++, ptr++) { + for (i = 1; i <= sz / BYTE_TO_DW; i++, ptr++) { seq_printf(s, " 0x%08x", le32_to_cpu(*ptr)); - if (!(i % 4)) + if (!(i % FOUR_PARA_PER_LINE)) seq_puts(s, "\n\t"); } seq_puts(s, "\n"); @@ -3673,6 +3771,9 @@ static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_cq *debugfs_cq = s->private; int slot; + if (!debugfs_dump_is_generated_v3_hw(debugfs_cq->complete_hdr)) + return -EPERM; + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq); @@ -3694,8 +3795,12 @@ static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot, static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p) { + struct hisi_sas_debugfs_dq *debugfs_dq = s->private; int slot; + if (!debugfs_dump_is_generated_v3_hw(debugfs_dq->hdr)) + return -EPERM; + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) debugfs_dq_show_slot_v3_hw(s, slot, s->private); @@ -3709,6 +3814,9 @@ static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_iost *iost = debugfs_iost->iost; int i, max_command_entries = HISI_SAS_MAX_COMMANDS; + if (!debugfs_dump_is_generated_v3_hw(iost)) + return -EPERM; + for (i = 0; i < max_command_entries; i++, iost++) { __le64 *data = &iost->qw0; @@ -3724,10 +3832,13 @@ static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private; struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache; - u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; + u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * BYTE_TO_DW; int i, tab_idx; __le64 *iost; + if (!debugfs_dump_is_generated_v3_hw(iost_cache)) + return -EPERM; + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) { /* * Data struct of IOST cache: @@ -3751,6 +3862,9 @@ static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_itct *debugfs_itct = s->private; struct hisi_sas_itct *itct = debugfs_itct->itct; + if (!debugfs_dump_is_generated_v3_hw(itct)) + return -EPERM; + for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { __le64 *data = &itct->qw0; @@ -3766,10 +3880,13 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private; struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache; - u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; + u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * BYTE_TO_DW; int i, tab_idx; __le64 *itct; + if (!debugfs_dump_is_generated_v3_hw(itct_cache)) + return -EPERM; + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) { /* * Data struct of ITCT cache: @@ -3787,82 +3904,81 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) } DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw); -static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) +static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index) { u64 *debugfs_timestamp; - int dump_index = hisi_hba->debugfs_dump_index; struct dentry *dump_dentry; struct dentry *dentry; - char name[256]; + char name[NAME_BUF_SIZE]; int p; int c; int d; - snprintf(name, 256, "%d", dump_index); + snprintf(name, NAME_BUF_SIZE, "%d", index); dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry); - debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index]; + debugfs_timestamp = &hisi_hba->debugfs_timestamp[index]; debugfs_create_u64("timestamp", 0400, dump_dentry, debugfs_timestamp); debugfs_create_file("global", 0400, dump_dentry, - &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL], + &hisi_hba->debugfs_regs[index][DEBUGFS_GLOBAL], &debugfs_global_v3_hw_fops); /* Create port dir and files */ dentry = debugfs_create_dir("port", dump_dentry); for (p = 0; p < hisi_hba->n_phy; p++) { - snprintf(name, 256, "%d", p); + snprintf(name, NAME_BUF_SIZE, "%d", p); debugfs_create_file(name, 0400, dentry, - &hisi_hba->debugfs_port_reg[dump_index][p], + &hisi_hba->debugfs_port_reg[index][p], &debugfs_port_v3_hw_fops); } /* Create CQ dir and files */ dentry = debugfs_create_dir("cq", dump_dentry); for (c = 0; c < hisi_hba->queue_count; c++) { - snprintf(name, 256, "%d", c); + snprintf(name, NAME_BUF_SIZE, "%d", c); debugfs_create_file(name, 0400, dentry, - &hisi_hba->debugfs_cq[dump_index][c], + &hisi_hba->debugfs_cq[index][c], &debugfs_cq_v3_hw_fops); } /* Create DQ dir and files */ dentry = debugfs_create_dir("dq", dump_dentry); for (d = 0; d < hisi_hba->queue_count; d++) { - snprintf(name, 256, "%d", d); + snprintf(name, NAME_BUF_SIZE, "%d", d); debugfs_create_file(name, 0400, dentry, - &hisi_hba->debugfs_dq[dump_index][d], + &hisi_hba->debugfs_dq[index][d], &debugfs_dq_v3_hw_fops); } debugfs_create_file("iost", 0400, dump_dentry, - &hisi_hba->debugfs_iost[dump_index], + &hisi_hba->debugfs_iost[index], &debugfs_iost_v3_hw_fops); debugfs_create_file("iost_cache", 0400, dump_dentry, - &hisi_hba->debugfs_iost_cache[dump_index], + &hisi_hba->debugfs_iost_cache[index], &debugfs_iost_cache_v3_hw_fops); debugfs_create_file("itct", 0400, dump_dentry, - &hisi_hba->debugfs_itct[dump_index], + &hisi_hba->debugfs_itct[index], &debugfs_itct_v3_hw_fops); debugfs_create_file("itct_cache", 0400, dump_dentry, - &hisi_hba->debugfs_itct_cache[dump_index], + &hisi_hba->debugfs_itct_cache[index], &debugfs_itct_cache_v3_hw_fops); debugfs_create_file("axi", 0400, dump_dentry, - &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI], + &hisi_hba->debugfs_regs[index][DEBUGFS_AXI], &debugfs_axi_v3_hw_fops); debugfs_create_file("ras", 0400, dump_dentry, - &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS], + &hisi_hba->debugfs_regs[index][DEBUGFS_RAS], &debugfs_ras_v3_hw_fops); } @@ -3871,9 +3987,9 @@ static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file, size_t count, loff_t *ppos) { struct hisi_hba *hisi_hba = file->f_inode->i_private; - char buf[8]; + char buf[DUMP_BUF_SIZE]; - if (count > 8) + if (count > DUMP_BUF_SIZE) return -EFAULT; if (copy_from_user(buf, user_buf, count)) @@ -3937,7 +4053,7 @@ static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp, { struct seq_file *m = filp->private_data; struct hisi_hba *hisi_hba = m->private; - char kbuf[16] = {}, *pkbuf; + char kbuf[BIST_BUF_SIZE] = {}, *pkbuf; bool found = false; int i; @@ -3954,7 +4070,7 @@ static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp, for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) { if (!strncmp(debugfs_loop_linkrate_v3_hw[i].name, - pkbuf, 16)) { + pkbuf, BIST_BUF_SIZE)) { hisi_hba->debugfs_bist_linkrate = debugfs_loop_linkrate_v3_hw[i].value; found = true; @@ -4012,7 +4128,7 @@ static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp, { struct seq_file *m = filp->private_data; struct hisi_hba *hisi_hba = m->private; - char kbuf[16] = {}, *pkbuf; + char kbuf[BIST_BUF_SIZE] = {}, *pkbuf; bool found = false; int i; @@ -4029,7 +4145,7 @@ static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp, for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) { if (!strncmp(debugfs_loop_code_mode_v3_hw[i].name, - pkbuf, 16)) { + pkbuf, BIST_BUF_SIZE)) { hisi_hba->debugfs_bist_code_mode = debugfs_loop_code_mode_v3_hw[i].value; found = true; @@ -4144,7 +4260,7 @@ static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp, { struct seq_file *m = filp->private_data; struct hisi_hba *hisi_hba = m->private; - char kbuf[16] = {}, *pkbuf; + char kbuf[BIST_BUF_SIZE] = {}, *pkbuf; bool found = false; int i; @@ -4160,7 +4276,8 @@ static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp, pkbuf = strstrip(kbuf); for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) { - if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, 16)) { + if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, + BIST_BUF_SIZE)) { hisi_hba->debugfs_bist_mode = debugfs_loop_modes_v3_hw[i].value; found = true; @@ -4439,8 +4556,9 @@ static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p) debugfs_read_fifo_data_v3_hw(phy); - debugfs_show_row_32_v3_hw(s, 0, HISI_SAS_FIFO_DATA_DW_SIZE * 4, - (__le32 *)phy->fifo.rd_data); + debugfs_show_row_32_v3_hw(s, 0, + HISI_SAS_FIFO_DATA_DW_SIZE * HISI_SAS_REG_MEM_SIZE, + (__le32 *)phy->fifo.rd_data); return 0; } @@ -4525,22 +4643,34 @@ static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index) int i; devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache); + hisi_hba->debugfs_iost_cache[dump_index].cache = NULL; devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache); + hisi_hba->debugfs_itct_cache[dump_index].cache = NULL; devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost); + hisi_hba->debugfs_iost[dump_index].iost = NULL; devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct); + hisi_hba->debugfs_itct[dump_index].itct = NULL; - for (i = 0; i < hisi_hba->queue_count; i++) + for (i = 0; i < hisi_hba->queue_count; i++) { devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr); + hisi_hba->debugfs_dq[dump_index][i].hdr = NULL; + } - for (i = 0; i < hisi_hba->queue_count; i++) + for (i = 0; i < hisi_hba->queue_count; i++) { devm_kfree(dev, hisi_hba->debugfs_cq[dump_index][i].complete_hdr); + hisi_hba->debugfs_cq[dump_index][i].complete_hdr = NULL; + } - for (i = 0; i < DEBUGFS_REGS_NUM; i++) + for (i = 0; i < DEBUGFS_REGS_NUM; i++) { devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data); + hisi_hba->debugfs_regs[dump_index][i].data = NULL; + } - for (i = 0; i < hisi_hba->n_phy; i++) + for (i = 0; i < hisi_hba->n_phy; i++) { devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data); + hisi_hba->debugfs_port_reg[dump_index][i].data = NULL; + } } static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = { @@ -4560,14 +4690,14 @@ static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index) struct hisi_sas_debugfs_regs *regs = &hisi_hba->debugfs_regs[dump_index][r]; - sz = debugfs_reg_array_v3_hw[r]->count * 4; + sz = debugfs_reg_array_v3_hw[r]->count * HISI_SAS_REG_MEM_SIZE; regs->data = devm_kmalloc(dev, sz, GFP_KERNEL); if (!regs->data) goto fail; regs->hisi_hba = hisi_hba; } - sz = debugfs_port_reg.count * 4; + sz = debugfs_port_reg.count * HISI_SAS_REG_MEM_SIZE; for (p = 0; p < hisi_hba->n_phy; p++) { struct hisi_sas_debugfs_port *port = &hisi_hba->debugfs_port_reg[dump_index][p]; @@ -4667,8 +4797,6 @@ static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba) debugfs_snapshot_itct_reg_v3_hw(hisi_hba); debugfs_snapshot_iost_reg_v3_hw(hisi_hba); - debugfs_create_files_v3_hw(hisi_hba); - debugfs_snapshot_restore_v3_hw(hisi_hba); hisi_hba->debugfs_dump_index++; @@ -4679,11 +4807,11 @@ static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba) { struct dentry *dir = debugfs_create_dir("phy_down_cnt", hisi_hba->debugfs_dir); - char name[16]; + char name[NAME_BUF_SIZE]; int phy_no; for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { - snprintf(name, 16, "%d", phy_no); + snprintf(name, NAME_BUF_SIZE, "%d", phy_no); debugfs_create_file(name, 0600, dir, &hisi_hba->phy[phy_no], &debugfs_phy_down_cnt_v3_hw_fops); @@ -4752,6 +4880,34 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba) hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS; } +static int debugfs_dump_index_v3_hw_show(struct seq_file *s, void *p) +{ + int *debugfs_dump_index = s->private; + + if (*debugfs_dump_index > 0) + seq_printf(s, "%d\n", *debugfs_dump_index - 1); + else + seq_puts(s, "dump not triggered\n"); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_dump_index_v3_hw); + +static void debugfs_dump_init_v3_hw(struct hisi_hba *hisi_hba) +{ + int i; + + hisi_hba->debugfs_dump_dentry = + debugfs_create_dir("dump", hisi_hba->debugfs_dir); + + debugfs_create_file("latest_dump", 0400, hisi_hba->debugfs_dump_dentry, + &hisi_hba->debugfs_dump_index, + &debugfs_dump_index_v3_hw_fops); + + for (i = 0; i < hisi_sas_debugfs_dump_count; i++) + debugfs_create_files_v3_hw(hisi_hba, i); +} + static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba) { debugfs_remove_recursive(hisi_hba->debugfs_dir); @@ -4764,19 +4920,17 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba) hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), hisi_sas_debugfs_dir); - debugfs_create_file("trigger_dump", 0200, - hisi_hba->debugfs_dir, - hisi_hba, - &debugfs_trigger_dump_v3_hw_fops); - /* create bist structures */ debugfs_bist_init_v3_hw(hisi_hba); - hisi_hba->debugfs_dump_dentry = - debugfs_create_dir("dump", hisi_hba->debugfs_dir); + debugfs_dump_init_v3_hw(hisi_hba); debugfs_phy_down_cnt_init_v3_hw(hisi_hba); debugfs_fifo_init_v3_hw(hisi_hba); + debugfs_create_file("trigger_dump", 0200, + hisi_hba->debugfs_dir, + hisi_hba, + &debugfs_trigger_dump_v3_hw_fops); } static int @@ -4842,7 +4996,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) shost->max_id = HISI_SAS_MAX_DEVICES; shost->max_lun = ~0; shost->max_channel = 1; - shost->max_cmd_len = 16; + shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN; shost->can_queue = HISI_SAS_UNRESERVED_IPTT; shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; if (hisi_hba->iopoll_q_cnt) @@ -4869,16 +5023,13 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) SHOST_DIX_GUARD_CRC); } - if (hisi_sas_debugfs_enable) - debugfs_init_v3_hw(hisi_hba); - rc = interrupt_preinit_v3_hw(hisi_hba); if (rc) - goto err_out_undo_debugfs; + goto err_out_free_host; rc = scsi_add_host(shost, dev); if (rc) - goto err_out_undo_debugfs; + goto err_out_free_host; rc = sas_register_ha(sha); if (rc) @@ -4889,6 +5040,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_out_unregister_ha; scsi_scan_host(shost); + if (hisi_sas_debugfs_enable) + debugfs_init_v3_hw(hisi_hba); pm_runtime_set_autosuspend_delay(dev, 5000); pm_runtime_use_autosuspend(dev); @@ -4909,9 +5062,6 @@ err_out_unregister_ha: sas_unregister_ha(sha); err_out_remove_host: scsi_remove_host(shost); -err_out_undo_debugfs: - if (hisi_sas_debugfs_enable) - debugfs_exit_v3_hw(hisi_hba); err_out_free_host: hisi_sas_free(hisi_hba); scsi_host_put(shost); @@ -4924,12 +5074,13 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) { int i; - devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba); - devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba); - devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX), hisi_hba); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_CHL_INDEX), hisi_hba); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_AXI_INDEX), hisi_hba); for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; - int nr = hisi_sas_intr_conv ? 16 : 16 + i; + int nr = hisi_sas_intr_conv ? BASE_VECTORS_V3_HW : + BASE_VECTORS_V3_HW + i; devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq); } @@ -4943,6 +5094,8 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) struct Scsi_Host *shost = sha->shost; pm_runtime_get_noresume(dev); + if (hisi_sas_debugfs_enable) + debugfs_exit_v3_hw(hisi_hba); sas_unregister_ha(sha); flush_workqueue(hisi_hba->wq); @@ -4950,9 +5103,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) hisi_sas_v3_destroy_irqs(pdev, hisi_hba); hisi_sas_free(hisi_hba); - if (hisi_sas_debugfs_enable) - debugfs_exit_v3_hw(hisi_hba); - scsi_host_put(shost); } @@ -4960,9 +5110,11 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; + struct Scsi_Host *shost = hisi_hba->shost; struct device *dev = hisi_hba->dev; int rc; + wait_event(shost->host_wait, !scsi_host_in_recovery(shost)); dev_info(dev, "FLR prepare\n"); down(&hisi_hba->sem); set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); @@ -5043,7 +5195,8 @@ static int _suspend_v3_hw(struct device *device) interrupt_disable_v3_hw(hisi_hba); #ifdef CONFIG_PM - if (atomic_read(&device->power.usage_count)) { + if ((device->power.runtime_status == RPM_SUSPENDING) && + atomic_read(&device->power.usage_count)) { dev_err(dev, "PM suspend: host status cannot be suspended\n"); rc = -EBUSY; goto err_out; diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 2d92549e5243..e021f1106bea 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -292,11 +292,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, } if (shost->transportt->create_work_queue) { - snprintf(shost->work_q_name, sizeof(shost->work_q_name), - "scsi_wq_%d", shost->host_no); - shost->work_q = alloc_workqueue("%s", - WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, - 1, shost->work_q_name); + shost->work_q = alloc_workqueue( + "scsi_wq_%d", + WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 1, + shost->host_no); if (!shost->work_q) { error = -EINVAL; @@ -479,6 +478,12 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv else shost->max_segment_size = BLK_MAX_SEGMENT_SIZE; + /* 32-byte (dword) is a common minimum for HBAs. */ + if (sht->dma_alignment) + shost->dma_alignment = sht->dma_alignment; + else + shost->dma_alignment = 3; + /* * assume a 4GB boundary, if not set */ diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index af18d20f3079..c73a71ac3c29 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -51,7 +51,7 @@ #include <linux/jiffies.h> #include <linux/percpu-defs.h> #include <linux/percpu.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <asm/div64.h> #include "hpsa_cmd.h" #include "hpsa.h" @@ -283,9 +283,10 @@ static int hpsa_scan_finished(struct Scsi_Host *sh, static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); -static int hpsa_slave_alloc(struct scsi_device *sdev); -static int hpsa_slave_configure(struct scsi_device *sdev); -static void hpsa_slave_destroy(struct scsi_device *sdev); +static int hpsa_sdev_init(struct scsi_device *sdev); +static int hpsa_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim); +static void hpsa_sdev_destroy(struct scsi_device *sdev); static void hpsa_update_scsi_devices(struct ctlr_info *h); static int check_for_unit_attention(struct ctlr_info *h, @@ -452,17 +453,13 @@ static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int status, len; + int status; struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); - char tmpbuf[10]; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; - len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; - strncpy(tmpbuf, buf, len); - tmpbuf[len] = '\0'; - if (sscanf(tmpbuf, "%d", &status) != 1) + if (kstrtoint(buf, 10, &status)) return -EINVAL; h = shost_to_hba(shost); h->acciopath_status = !!status; @@ -476,17 +473,13 @@ static ssize_t host_store_raid_offload_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int debug_level, len; + int debug_level; struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); - char tmpbuf[10]; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; - len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; - strncpy(tmpbuf, buf, len); - tmpbuf[len] = '\0'; - if (sscanf(tmpbuf, "%d", &debug_level) != 1) + if (kstrtoint(buf, 10, &debug_level)) return -EINVAL; if (debug_level < 0) debug_level = 0; @@ -978,9 +971,9 @@ static const struct scsi_host_template hpsa_driver_template = { .this_id = -1, .eh_device_reset_handler = hpsa_eh_device_reset_handler, .ioctl = hpsa_ioctl, - .slave_alloc = hpsa_slave_alloc, - .slave_configure = hpsa_slave_configure, - .slave_destroy = hpsa_slave_destroy, + .sdev_init = hpsa_sdev_init, + .sdev_configure = hpsa_sdev_configure, + .sdev_destroy = hpsa_sdev_destroy, #ifdef CONFIG_COMPAT .compat_ioctl = hpsa_compat_ioctl, #endif @@ -2107,7 +2100,7 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, return NULL; } -static int hpsa_slave_alloc(struct scsi_device *sdev) +static int hpsa_sdev_init(struct scsi_device *sdev) { struct hpsa_scsi_dev_t *sd = NULL; unsigned long flags; @@ -2142,7 +2135,8 @@ static int hpsa_slave_alloc(struct scsi_device *sdev) /* configure scsi device based on internal per-device structure */ #define CTLR_TIMEOUT (120 * HZ) -static int hpsa_slave_configure(struct scsi_device *sdev) +static int hpsa_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct hpsa_scsi_dev_t *sd; int queue_depth; @@ -2173,7 +2167,7 @@ static int hpsa_slave_configure(struct scsi_device *sdev) return 0; } -static void hpsa_slave_destroy(struct scsi_device *sdev) +static void hpsa_sdev_destroy(struct scsi_device *sdev) { struct hpsa_scsi_dev_t *hdev = NULL; @@ -5850,7 +5844,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h) { struct Scsi_Host *sh; - sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info)); + sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info *)); if (sh == NULL) { dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); return -ENOMEM; @@ -7236,8 +7230,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev, static void init_driver_version(char *driver_version, int len) { - memset(driver_version, 0, len); - strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); + strscpy_pad(driver_version, HPSA " " HPSA_DRIVER_VERSION, len); } static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) @@ -7509,7 +7502,7 @@ fallback: */ static int hpsa_interrupt_mode(struct ctlr_info *h) { - unsigned int flags = PCI_IRQ_LEGACY; + unsigned int flags = PCI_IRQ_INTX; int ret; /* Some boards advertise MSI but don't really support it */ diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index f5334ccbf2ca..21f1d9871a33 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -1151,11 +1151,11 @@ static struct attribute *hptiop_host_attrs[] = { ATTRIBUTE_GROUPS(hptiop_host); -static int hptiop_slave_config(struct scsi_device *sdev) +static int hptiop_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { if (sdev->type == TYPE_TAPE) - blk_queue_max_hw_sectors(sdev->request_queue, 8192); - + lim->max_hw_sectors = 8192; return 0; } @@ -1168,7 +1168,7 @@ static const struct scsi_host_template driver_template = { .emulated = 0, .proc_name = driver_name, .shost_groups = hptiop_host_groups, - .slave_configure = hptiop_slave_config, + .sdev_configure = hptiop_sdev_configure, .this_id = -1, .change_queue_depth = hptiop_adjust_disk_queue_depth, .cmd_size = sizeof(struct hpt_cmd_priv), @@ -1634,7 +1634,7 @@ static struct hptiop_adapter_ops hptiop_mvfrey_ops = { .host_phy_flag = cpu_to_le64(1), }; -static struct pci_device_id hptiop_id_table[] = { +static const struct pci_device_id hptiop_id_table[] = { { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops }, diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 05b126bfd18b..4c493b06062a 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -37,6 +37,7 @@ static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT; static u64 max_lun = IBMVFC_MAX_LUN; static unsigned int max_targets = IBMVFC_MAX_TARGETS; static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; +static u16 max_sectors = IBMVFC_MAX_SECTORS; static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH; static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; static unsigned int ibmvfc_debug = IBMVFC_DEBUG; @@ -83,6 +84,9 @@ MODULE_PARM_DESC(default_timeout, module_param_named(max_requests, max_requests, uint, S_IRUGO); MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. " "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]"); +module_param_named(max_sectors, max_sectors, ushort, S_IRUGO); +MODULE_PARM_DESC(max_sectors, "Maximum sectors for this adapter. " + "[Default=" __stringify(IBMVFC_MAX_SECTORS) "]"); module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO); MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. " "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]"); @@ -1106,7 +1110,7 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) } else evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED); - del_timer(&evt->timer); + timer_delete(&evt->timer); } /** @@ -1494,7 +1498,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) memset(login_info, 0, sizeof(*login_info)); login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX); - login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9); + login_info->max_dma_len = cpu_to_be64(max_sectors << 9); login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu)); login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp)); login_info->partition_num = cpu_to_be32(vhost->partition_number); @@ -1750,7 +1754,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt, atomic_set(&evt->active, 0); list_del(&evt->queue_list); spin_unlock_irqrestore(&evt->queue->l_lock, flags); - del_timer(&evt->timer); + timer_delete(&evt->timer); /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. * Firmware will send a CRQ with a transport event (0xFF) to @@ -3389,7 +3393,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time) } /** - * ibmvfc_slave_alloc - Setup the device's task set value + * ibmvfc_sdev_init - Setup the device's task set value * @sdev: struct scsi_device device to configure * * Set the device's task set value so that error handling works as @@ -3398,7 +3402,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time) * Returns: * 0 on success / -ENXIO if device does not exist **/ -static int ibmvfc_slave_alloc(struct scsi_device *sdev) +static int ibmvfc_sdev_init(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); @@ -3437,8 +3441,9 @@ static int ibmvfc_target_alloc(struct scsi_target *starget) } /** - * ibmvfc_slave_configure - Configure the device + * ibmvfc_sdev_configure - Configure the device * @sdev: struct scsi_device device to configure + * @lim: Request queue limits * * Enable allow_restart for a device if it is a disk. Adjust the * queue_depth here also. @@ -3446,7 +3451,8 @@ static int ibmvfc_target_alloc(struct scsi_target *starget) * Returns: * 0 **/ -static int ibmvfc_slave_configure(struct scsi_device *sdev) +static int ibmvfc_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct Scsi_Host *shost = sdev->host; unsigned long flags = 0; @@ -3635,7 +3641,7 @@ static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR, * number of bytes printed to buffer **/ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); @@ -3658,13 +3664,13 @@ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute ibmvfc_trace_attr = { +static const struct bin_attribute ibmvfc_trace_attr = { .attr = { .name = "trace", .mode = S_IRUGO, }, .size = 0, - .read = ibmvfc_read_trace, + .read_new = ibmvfc_read_trace, }; #endif @@ -3692,8 +3698,8 @@ static const struct scsi_host_template driver_template = { .eh_device_reset_handler = ibmvfc_eh_device_reset_handler, .eh_target_reset_handler = ibmvfc_eh_target_reset_handler, .eh_host_reset_handler = ibmvfc_eh_host_reset_handler, - .slave_alloc = ibmvfc_slave_alloc, - .slave_configure = ibmvfc_slave_configure, + .sdev_init = ibmvfc_sdev_init, + .sdev_configure = ibmvfc_sdev_configure, .target_alloc = ibmvfc_target_alloc, .scan_finished = ibmvfc_scan_finished, .change_queue_depth = ibmvfc_change_queue_depth, @@ -3826,7 +3832,7 @@ static void ibmvfc_tasklet(void *data) spin_unlock_irqrestore(vhost->host->host_lock, flags); list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { - del_timer(&evt->timer); + timer_delete(&evt->timer); list_del(&evt->queue_list); ibmvfc_trc_end(evt); evt->done(evt); @@ -3932,7 +3938,7 @@ static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq) spin_unlock_irqrestore(scrq->q_lock, flags); list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { - del_timer(&evt->timer); + timer_delete(&evt->timer); list_del(&evt->queue_list); ibmvfc_trc_end(evt); evt->done(evt); @@ -4536,7 +4542,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); - del_timer(&tgt->timer); + timer_delete(&tgt->timer); switch (status) { case IBMVFC_MAD_SUCCESS: @@ -4735,7 +4741,7 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) { vhost->discovery_threads--; - del_timer(&tgt->timer); + timer_delete(&tgt->timer); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); kref_put(&tgt->kref, ibmvfc_release_tgt); } else @@ -5230,7 +5236,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) } vhost->logged_in = 1; - npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS); + npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), max_sectors); dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", rsp->partition_name, rsp->device_name, rsp->port_loc_code, rsp->drc_name, npiv_max_sectors); @@ -5513,7 +5519,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT); spin_unlock_irqrestore(vhost->host->host_lock, flags); fc_remote_port_delete(rport); - del_timer_sync(&tgt->timer); + timer_delete_sync(&tgt->timer); kref_put(&tgt->kref, ibmvfc_release_tgt); return; } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { @@ -5541,8 +5547,6 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) rport->supported_classes |= FC_COS_CLASS2; if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000) rport->supported_classes |= FC_COS_CLASS3; - if (rport->rqst_q) - blk_queue_max_segments(rport->rqst_q, 1); } else tgt_dbg(tgt, "rport add failed\n"); spin_unlock_irqrestore(vhost->host->host_lock, flags); @@ -5668,7 +5672,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) spin_unlock_irqrestore(vhost->host->host_lock, flags); if (rport) fc_remote_port_delete(rport); - del_timer_sync(&tgt->timer); + timer_delete_sync(&tgt->timer); kref_put(&tgt->kref, ibmvfc_release_tgt); return; } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { @@ -6331,7 +6335,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) shost->can_queue = scsi_qdepth; shost->max_lun = max_lun; shost->max_id = max_targets; - shost->max_sectors = IBMVFC_MAX_SECTORS; + shost->max_sectors = max_sectors; shost->max_cmd_len = IBMVFC_MAX_CDB_LEN; shost->unique_id = shost->host_no; shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1; @@ -6391,8 +6395,6 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) ibmvfc_init_sub_crqs(vhost); - if (shost_to_fc_host(shost)->rqst_q) - blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1); dev_set_drvdata(dev, vhost); spin_lock(&ibmvfc_driver_lock); list_add_tail(&vhost->queue, &ibmvfc_head); @@ -6547,6 +6549,7 @@ static struct fc_function_template ibmvfc_transport_functions = { .get_starget_port_id = ibmvfc_get_starget_port_id, .show_starget_port_id = 1, + .max_bsg_segments = 1, .bsg_request = ibmvfc_bsg_request, .bsg_timeout = ibmvfc_bsg_timeout, }; @@ -6559,6 +6562,7 @@ static struct fc_function_template ibmvfc_transport_functions = { **/ static int __init ibmvfc_module_init(void) { + int min_max_sectors = PAGE_SIZE >> 9; int rc; if (!firmware_has_feature(FW_FEATURE_VIO)) @@ -6567,6 +6571,16 @@ static int __init ibmvfc_module_init(void) printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n", IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE); + /* + * Range check the max_sectors module parameter. The upper bounds is + * implicity checked since the parameter is a ushort. + */ + if (max_sectors < min_max_sectors) { + printk(KERN_ERR IBMVFC_NAME ": max_sectors must be at least %d.\n", + min_max_sectors); + max_sectors = min_max_sectors; + } + ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions); if (!ibmvfc_transport_template) return -ENOMEM; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 745ad5ac7251..c73ed2314ad0 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -32,7 +32,7 @@ #define IBMVFC_DEBUG 0 #define IBMVFC_MAX_TARGETS 1024 #define IBMVFC_MAX_LUN 0xffffffff -#define IBMVFC_MAX_SECTORS 0xffffu +#define IBMVFC_MAX_SECTORS 2048 #define IBMVFC_MAX_DISC_THREADS 4 #define IBMVFC_TGT_MEMPOOL_SZ 64 #define IBMVFC_MAX_CMDS_PER_LUN 64 diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 71f3e9563520..d65a45860b33 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -789,7 +789,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) while (!list_empty(&hostdata->sent)) { evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list); list_del(&evt->list); - del_timer(&evt->timer); + timer_delete(&evt->timer); spin_unlock_irqrestore(hostdata->host->host_lock, flags); if (evt->cmnd) { @@ -944,7 +944,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, be64_to_cpu(crq_as_u64[1])); if (rc != 0) { list_del(&evt_struct->list); - del_timer(&evt_struct->timer); + timer_delete(&evt_struct->timer); /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. * Firmware will send a CRQ with a transport event (0xFF) to @@ -1840,7 +1840,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta), &hostdata->request_limit); - del_timer(&evt_struct->timer); + timer_delete(&evt_struct->timer); if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd) evt_struct->cmnd->result = DID_ERROR << 16; @@ -1860,14 +1860,16 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, } /** - * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk. + * ibmvscsi_sdev_configure: Set the "allow_restart" flag for each disk. * @sdev: struct scsi_device device to configure + * @lim: Request queue limits * * Enable allow_restart for a device if it is a disk. Adjust the * queue_depth here also as is required by the documentation for * struct scsi_host_template. */ -static int ibmvscsi_slave_configure(struct scsi_device *sdev) +static int ibmvscsi_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct Scsi_Host *shost = sdev->host; unsigned long lock_flags = 0; @@ -2091,7 +2093,7 @@ static struct scsi_host_template driver_template = { .eh_abort_handler = ibmvscsi_eh_abort_handler, .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler, - .slave_configure = ibmvscsi_slave_configure, + .sdev_configure = ibmvscsi_sdev_configure, .change_queue_depth = ibmvscsi_change_queue_depth, .host_reset = ibmvscsi_host_reset, .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT, diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 68b99924ee4f..9e42230e42b8 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -2922,9 +2922,7 @@ static long ibmvscsis_alloctimer(struct scsi_info *vscsi) struct timer_cb *p_timer; p_timer = &vscsi->rsp_q_timer; - hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - - p_timer->timer.function = ibmvscsis_service_wait_q; + hrtimer_setup(&p_timer->timer, ibmvscsis_service_wait_q, CLOCK_MONOTONIC, HRTIMER_MODE_REL); p_timer->started = false; p_timer->timer_pops = 0; @@ -3425,7 +3423,6 @@ static int ibmvscsis_probe(struct vio_dev *vdev, struct scsi_info *vscsi; int rc = 0; long hrc = 0; - char wq_name[24]; vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL); if (!vscsi) { @@ -3536,8 +3533,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev, init_completion(&vscsi->wait_idle); init_completion(&vscsi->unconfig); - snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); - vscsi->work_q = create_workqueue(wq_name); + vscsi->work_q = alloc_workqueue("ibmvscsis%s", WQ_MEM_RECLAIM, 1, + dev_name(&vdev->dev)); if (!vscsi->work_q) { rc = -ENOMEM; dev_err(&vscsi->dev, "create_workqueue failed\n"); @@ -3613,12 +3610,6 @@ static void ibmvscsis_remove(struct vio_dev *vdev) kfree(vscsi); } -static ssize_t system_id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_emit(buf, "%s\n", system_id); -} - static ssize_t partition_number_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -3982,8 +3973,7 @@ static const struct target_core_fabric_ops ibmvscsis_ops = { static void ibmvscsis_dev_release(struct device *dev) {}; -static struct device_attribute dev_attr_system_id = - __ATTR(system_id, S_IRUGO, system_id_show, NULL); +static DEVICE_STRING_ATTR_RO(system_id, S_IRUGO, system_id); static struct device_attribute dev_attr_partition_number = __ATTR(partition_number, S_IRUGO, partition_number_show, NULL); @@ -3992,7 +3982,7 @@ static struct device_attribute dev_attr_unit_address = __ATTR(unit_address, S_IRUGO, unit_address_show, NULL); static struct attribute *ibmvscsis_dev_attrs[] = { - &dev_attr_system_id.attr, + &dev_attr_system_id.attr.attr, &dev_attr_partition_number.attr, &dev_attr_unit_address.attr, }; diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 180a5ddedb2c..0821cf994b98 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c @@ -1100,16 +1100,6 @@ static int device_check(imm_struct *dev, bool autodetect) return -ENODEV; } -/* - * imm cannot deal with highmem, so this causes all IO pages for this host - * to reside in low memory (hence mapped) - */ -static int imm_adjust_queue(struct scsi_device *device) -{ - blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH); - return 0; -} - static const struct scsi_host_template imm_template = { .module = THIS_MODULE, .proc_name = "imm", @@ -1123,7 +1113,6 @@ static const struct scsi_host_template imm_template = { .this_id = 7, .sg_tablesize = SG_ALL, .can_queue = 1, - .slave_alloc = imm_adjust_queue, .cmd_size = sizeof(struct scsi_pointer), }; @@ -1285,8 +1274,8 @@ static struct parport_driver imm_driver = { .name = "imm", .match_port = imm_attach, .detach = imm_detach, - .devmodel = true, }; module_parport_driver(imm_driver); +MODULE_DESCRIPTION("IOMEGA MatchMaker parallel port SCSI host adapter driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 625fd547ee60..8648bd965287 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c @@ -2941,7 +2941,7 @@ static void initio_remove_one(struct pci_dev *pdev) MODULE_LICENSE("GPL"); -static struct pci_device_id initio_pci_tbl[] = { +static const struct pci_device_id initio_pci_tbl[] = { {PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 3819f7c42788..d89135fb8faa 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -873,7 +873,7 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET); - del_timer(&ipr_cmd->timer); + timer_delete(&ipr_cmd->timer); ipr_cmd->done(ipr_cmd); } spin_unlock(&hrrq->_lock); @@ -3366,7 +3366,7 @@ static void ipr_worker_thread(struct work_struct *work) * number of bytes printed to buffer **/ static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); @@ -3383,13 +3383,13 @@ static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj, return ret; } -static struct bin_attribute ipr_trace_attr = { +static const struct bin_attribute ipr_trace_attr = { .attr = { .name = "trace", .mode = S_IRUGO, }, .size = 0, - .read = ipr_read_trace, + .read_new = ipr_read_trace, }; #endif @@ -4087,7 +4087,7 @@ static struct device_attribute ipr_ioa_fw_type_attr = { }; static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *cdev = kobj_to_dev(kobj); @@ -4111,7 +4111,7 @@ static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj, } static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *cdev = kobj_to_dev(kobj); @@ -4134,14 +4134,14 @@ static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj, return count; } -static struct bin_attribute ipr_ioa_async_err_log = { +static const struct bin_attribute ipr_ioa_async_err_log = { .attr = { .name = "async_err_log", .mode = S_IRUGO | S_IWUSR, }, .size = 0, - .read = ipr_read_async_err_log, - .write = ipr_next_async_err_log + .read_new = ipr_read_async_err_log, + .write_new = ipr_next_async_err_log }; static struct attribute *ipr_ioa_attrs[] = { @@ -4172,7 +4172,7 @@ ATTRIBUTE_GROUPS(ipr_ioa); * number of bytes printed to buffer **/ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *cdev = kobj_to_dev(kobj); @@ -4361,7 +4361,7 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) * number of bytes printed to buffer **/ static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *cdev = kobj_to_dev(kobj); @@ -4385,14 +4385,14 @@ static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute ipr_dump_attr = { +static const struct bin_attribute ipr_dump_attr = { .attr = { .name = "dump", .mode = S_IRUSR | S_IWUSR, }, .size = 0, - .read = ipr_read_dump, - .write = ipr_write_dump + .read_new = ipr_read_dump, + .write_new = ipr_write_dump }; #else static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; @@ -4745,13 +4745,13 @@ static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev) } /** - * ipr_slave_destroy - Unconfigure a SCSI device + * ipr_sdev_destroy - Unconfigure a SCSI device * @sdev: scsi device struct * * Return value: * nothing **/ -static void ipr_slave_destroy(struct scsi_device *sdev) +static void ipr_sdev_destroy(struct scsi_device *sdev) { struct ipr_resource_entry *res; struct ipr_ioa_cfg *ioa_cfg; @@ -4769,15 +4769,17 @@ static void ipr_slave_destroy(struct scsi_device *sdev) } /** - * ipr_slave_configure - Configure a SCSI device + * ipr_sdev_configure - Configure a SCSI device * @sdev: scsi device struct + * @lim: queue limits * * This function configures the specified scsi device. * * Return value: * 0 on success **/ -static int ipr_slave_configure(struct scsi_device *sdev) +static int ipr_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; struct ipr_resource_entry *res; @@ -4798,7 +4800,7 @@ static int ipr_slave_configure(struct scsi_device *sdev) sdev->no_report_opcodes = 1; blk_queue_rq_timeout(sdev->request_queue, IPR_VSET_RW_TIMEOUT); - blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); + lim->max_hw_sectors = IPR_VSET_MAX_SECTORS; } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); @@ -4813,7 +4815,7 @@ static int ipr_slave_configure(struct scsi_device *sdev) } /** - * ipr_slave_alloc - Prepare for commands to a device. + * ipr_sdev_init - Prepare for commands to a device. * @sdev: scsi device struct * * This function saves a pointer to the resource entry @@ -4824,7 +4826,7 @@ static int ipr_slave_configure(struct scsi_device *sdev) * Return value: * 0 on success / -ENXIO if device does not exist **/ -static int ipr_slave_alloc(struct scsi_device *sdev) +static int ipr_sdev_init(struct scsi_device *sdev) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; struct ipr_resource_entry *res; @@ -5345,7 +5347,7 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; list_del(&ioa_cfg->reset_cmd->queue); - del_timer(&ioa_cfg->reset_cmd->timer); + timer_delete(&ioa_cfg->reset_cmd->timer); ipr_reset_ioa_job(ioa_cfg->reset_cmd); return IRQ_HANDLED; } @@ -5360,7 +5362,7 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); list_del(&ioa_cfg->reset_cmd->queue); - del_timer(&ioa_cfg->reset_cmd->timer); + timer_delete(&ioa_cfg->reset_cmd->timer); ipr_reset_ioa_job(ioa_cfg->reset_cmd); } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { if (ioa_cfg->clear_isr) { @@ -5479,7 +5481,7 @@ static int ipr_iopoll(struct irq_poll *iop, int budget) list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { list_del(&ipr_cmd->queue); - del_timer(&ipr_cmd->timer); + timer_delete(&ipr_cmd->timer); ipr_cmd->fast_done(ipr_cmd); } @@ -5548,7 +5550,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) spin_unlock_irqrestore(hrrq->lock, hrrq_flags); list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { list_del(&ipr_cmd->queue); - del_timer(&ipr_cmd->timer); + timer_delete(&ipr_cmd->timer); ipr_cmd->fast_done(ipr_cmd); } return rc; @@ -5598,7 +5600,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { list_del(&ipr_cmd->queue); - del_timer(&ipr_cmd->timer); + timer_delete(&ipr_cmd->timer); ipr_cmd->fast_done(ipr_cmd); } return rc; @@ -6396,9 +6398,9 @@ static const struct scsi_host_template driver_template = { .eh_abort_handler = ipr_eh_abort, .eh_device_reset_handler = ipr_eh_dev_reset, .eh_host_reset_handler = ipr_eh_host_reset, - .slave_alloc = ipr_slave_alloc, - .slave_configure = ipr_slave_configure, - .slave_destroy = ipr_slave_destroy, + .sdev_init = ipr_sdev_init, + .sdev_configure = ipr_sdev_configure, + .sdev_destroy = ipr_sdev_destroy, .scan_finished = ipr_scan_finished, .target_destroy = ipr_target_destroy, .change_queue_depth = ipr_change_queue_depth, @@ -9463,7 +9465,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev, ipr_number_of_msix = IPR_MAX_MSIX_VECTORS; } - irq_flag = PCI_IRQ_LEGACY; + irq_flag = PCI_IRQ_INTX; if (ioa_cfg->ipr_chip->has_msi) irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX; rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag); @@ -9842,7 +9844,7 @@ static void ipr_shutdown(struct pci_dev *pdev) } } -static struct pci_device_id ipr_pci_table[] = { +static const struct pci_device_id ipr_pci_table[] = { { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 }, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index c77d6ca1a210..fde7145835de 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -13,7 +13,7 @@ #ifndef _IPR_H #define _IPR_H -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/types.h> #include <linux/completion.h> #include <linux/list.h> @@ -1030,7 +1030,7 @@ struct ipr_hostrcb_fabric_desc { #define IPR_PATH_FAILED 0x03 __be16 num_entries; - struct ipr_hostrcb_config_element elem[1]; + struct ipr_hostrcb_config_element elem[]; }__attribute__((packed, aligned (4))); struct ipr_hostrcb64_fabric_desc { @@ -1044,7 +1044,7 @@ struct ipr_hostrcb64_fabric_desc { u8 res_path[8]; u8 reserved3[6]; __be16 num_entries; - struct ipr_hostrcb64_config_element elem[1]; + struct ipr_hostrcb64_config_element elem[]; }__attribute__((packed, aligned (8))); #define for_each_hrrq(hrrq, ioa_cfg) \ diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 10cf5775a939..94adb6ac02a4 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -364,7 +364,7 @@ static struct scsi_host_template ips_driver_template = { .proc_name = "ips", .show_info = ips_show_info, .write_info = ips_write_info, - .slave_configure = ips_slave_configure, + .sdev_configure = ips_sdev_configure, .bios_param = ips_biosparam, .this_id = -1, .sg_tablesize = IPS_MAX_SG, @@ -1166,7 +1166,7 @@ static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev, /****************************************************************************/ /* */ -/* Routine Name: ips_slave_configure */ +/* Routine Name: ips_sdev_configure */ /* */ /* Routine Description: */ /* */ @@ -1174,7 +1174,7 @@ static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev, /* */ /****************************************************************************/ static int -ips_slave_configure(struct scsi_device * SDptr) +ips_sdev_configure(struct scsi_device *SDptr, struct queue_limits *lim) { ips_ha_t *ha; int min; @@ -3631,8 +3631,8 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) break; - case RESERVE: - case RELEASE: + case RESERVE_6: + case RELEASE_6: scb->scsi_cmd->result = DID_OK << 16; break; @@ -3899,8 +3899,8 @@ ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus) case WRITE_6: case READ_10: case WRITE_10: - case RESERVE: - case RELEASE: + case RESERVE_6: + case RELEASE_6: break; case MODE_SENSE: diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h index 65edf000e447..8ac932ec4444 100644 --- a/drivers/scsi/ips.h +++ b/drivers/scsi/ips.h @@ -400,7 +400,8 @@ */ static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]); - static int ips_slave_configure(struct scsi_device *SDptr); + static int ips_sdev_configure(struct scsi_device *SDptr, + struct queue_limits *lim); /* * Raid Command Formats diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 35589b6af90d..c108b5b940c3 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -1271,22 +1271,22 @@ void isci_host_deinit(struct isci_host *ihost) /* Cancel any/all outstanding port timers */ for (i = 0; i < ihost->logical_port_entries; i++) { struct isci_port *iport = &ihost->ports[i]; - del_timer_sync(&iport->timer.timer); + timer_delete_sync(&iport->timer.timer); } /* Cancel any/all outstanding phy timers */ for (i = 0; i < SCI_MAX_PHYS; i++) { struct isci_phy *iphy = &ihost->phys[i]; - del_timer_sync(&iphy->sata_timer.timer); + timer_delete_sync(&iphy->sata_timer.timer); } - del_timer_sync(&ihost->port_agent.timer.timer); + timer_delete_sync(&ihost->port_agent.timer.timer); - del_timer_sync(&ihost->power_control.timer.timer); + timer_delete_sync(&ihost->power_control.timer.timer); - del_timer_sync(&ihost->timer.timer); + timer_delete_sync(&ihost->timer.timer); - del_timer_sync(&ihost->phy_timer.timer); + timer_delete_sync(&ihost->phy_timer.timer); } static void __iomem *scu_base(struct isci_host *isci_host) diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index c582a3932cea..acf0c2038d20 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -65,11 +65,7 @@ #include "task.h" #include "probe_roms.h" -#define MAJ 1 -#define MIN 2 -#define BUILD 0 -#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ - __stringify(BUILD) +#define DRV_VERSION "1.2.0" MODULE_VERSION(DRV_VERSION); @@ -95,31 +91,31 @@ MODULE_DEVICE_TABLE(pci, isci_id_table); /* linux isci specific settings */ -unsigned char no_outbound_task_to = 2; +static unsigned char no_outbound_task_to = 2; module_param(no_outbound_task_to, byte, 0); MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)"); -u16 ssp_max_occ_to = 20; +static u16 ssp_max_occ_to = 20; module_param(ssp_max_occ_to, ushort, 0); MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)"); -u16 stp_max_occ_to = 5; +static u16 stp_max_occ_to = 5; module_param(stp_max_occ_to, ushort, 0); MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)"); -u16 ssp_inactive_to = 5; +static u16 ssp_inactive_to = 5; module_param(ssp_inactive_to, ushort, 0); MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)"); -u16 stp_inactive_to = 5; +static u16 stp_inactive_to = 5; module_param(stp_inactive_to, ushort, 0); MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)"); -unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED; +static unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED; module_param(phy_gen, byte, 0); MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); -unsigned char max_concurr_spinup; +static unsigned char max_concurr_spinup; module_param(max_concurr_spinup, byte, 0); MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); @@ -149,33 +145,20 @@ static struct attribute *isci_host_attrs[] = { ATTRIBUTE_GROUPS(isci_host); -static const struct scsi_host_template isci_sht = { +static const struct attribute_group *isci_sdev_groups[] = { + &sas_ata_sdev_attr_group, + NULL +}; - .module = THIS_MODULE, - .name = DRV_NAME, - .proc_name = DRV_NAME, - .queuecommand = sas_queuecommand, - .dma_need_drain = ata_scsi_dma_need_drain, - .target_alloc = sas_target_alloc, - .slave_configure = sas_slave_configure, +static const struct scsi_host_template isci_sht = { + LIBSAS_SHT_BASE .scan_finished = isci_host_scan_finished, .scan_start = isci_host_start, - .change_queue_depth = sas_change_queue_depth, - .bios_param = sas_bios_param, .can_queue = ISCI_CAN_QUEUE_VAL, - .this_id = -1, .sg_tablesize = SG_ALL, - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, - .eh_abort_handler = sas_eh_abort_handler, - .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_target_reset_handler = sas_eh_target_reset_handler, - .slave_alloc = sas_slave_alloc, - .target_destroy = sas_target_destroy, - .ioctl = sas_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = sas_ioctl, -#endif + .eh_abort_handler = sas_eh_abort_handler, .shost_groups = isci_host_groups, + .sdev_groups = isci_sdev_groups, .track_queue_depth = 1, }; @@ -771,6 +754,7 @@ static __exit void isci_exit(void) sas_release_transport(isci_transport_template); } +MODULE_DESCRIPTION("Intel(R) C600 Series Chipset SAS Controller driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_FIRMWARE(ISCI_FW_NAME); module_init(isci_init); diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index 4e6b1decbca7..d827e49c1d55 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h @@ -473,13 +473,6 @@ static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt) dest[word_cnt] = swab32(src[word_cnt]); } -extern unsigned char no_outbound_task_to; -extern u16 ssp_max_occ_to; -extern u16 stp_max_occ_to; -extern u16 ssp_inactive_to; -extern u16 stp_inactive_to; -extern unsigned char phy_gen; -extern unsigned char max_concurr_spinup; extern uint cable_selection_override; irqreturn_t isci_msix_isr(int vec, void *data); @@ -488,9 +481,9 @@ irqreturn_t isci_error_isr(int vec, void *data); /* * Each timer is associated with a cancellation flag that is set when - * del_timer() is called and checked in the timer callback function. This - * is needed since del_timer_sync() cannot be called with sci_lock held. - * For deinit however, del_timer_sync() is used without holding the lock. + * timer_delete() is called and checked in the timer callback function. This + * is needed since timer_delete_sync() cannot be called with sci_lock held. + * For deinit however, timer_delete_sync() is used without holding the lock. */ struct sci_timer { struct timer_list timer; @@ -513,7 +506,7 @@ static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec) static inline void sci_del_timer(struct sci_timer *tmr) { tmr->cancel = true; - del_timer(&tmr->timer); + timer_delete(&tmr->timer); } struct sci_base_state_machine { diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 866950a02965..82deb6a83a8c 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -392,51 +392,6 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, } } -enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) -{ - struct sci_base_state_machine *sm = &idev->sm; - enum sci_remote_device_states state = sm->current_state_id; - - switch (state) { - case SCI_DEV_INITIAL: - case SCI_DEV_STOPPED: - case SCI_DEV_STARTING: - case SCI_SMP_DEV_IDLE: - case SCI_SMP_DEV_CMD: - case SCI_DEV_STOPPING: - case SCI_DEV_FAILED: - case SCI_DEV_RESETTING: - case SCI_DEV_FINAL: - default: - dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", - __func__, dev_state_name(state)); - return SCI_FAILURE_INVALID_STATE; - case SCI_DEV_READY: - case SCI_STP_DEV_IDLE: - case SCI_STP_DEV_CMD: - case SCI_STP_DEV_NCQ: - case SCI_STP_DEV_NCQ_ERROR: - case SCI_STP_DEV_AWAIT_RESET: - sci_change_state(sm, SCI_DEV_RESETTING); - return SCI_SUCCESS; - } -} - -enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) -{ - struct sci_base_state_machine *sm = &idev->sm; - enum sci_remote_device_states state = sm->current_state_id; - - if (state != SCI_DEV_RESETTING) { - dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", - __func__, dev_state_name(state)); - return SCI_FAILURE_INVALID_STATE; - } - - sci_change_state(sm, SCI_DEV_READY); - return SCI_SUCCESS; -} - enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, u32 frame_index) { @@ -1694,20 +1649,6 @@ enum sci_status sci_remote_device_abort_requests_pending_abort( return sci_remote_device_terminate_reqs_checkabort(idev, 1); } -enum sci_status isci_remote_device_reset_complete( - struct isci_host *ihost, - struct isci_remote_device *idev) -{ - unsigned long flags; - enum sci_status status; - - spin_lock_irqsave(&ihost->scic_lock, flags); - status = sci_remote_device_reset_complete(idev); - spin_unlock_irqrestore(&ihost->scic_lock, flags); - - return status; -} - void isci_dev_set_hang_detection_timeout( struct isci_remote_device *idev, u32 timeout) diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 3ad681c4c20a..c1fdf45751cd 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -160,34 +160,6 @@ enum sci_status sci_remote_device_stop( u32 timeout); /** - * sci_remote_device_reset() - This method will reset the device making it - * ready for operation. This method must be called anytime the device is - * reset either through a SMP phy control or a port hard reset request. - * @remote_device: This parameter specifies the device to be reset. - * - * This method does not actually cause the device hardware to be reset. This - * method resets the software object so that it will be operational after a - * device hardware reset completes. An indication of whether the device reset - * was accepted. SCI_SUCCESS This value is returned if the device reset is - * started. - */ -enum sci_status sci_remote_device_reset( - struct isci_remote_device *idev); - -/** - * sci_remote_device_reset_complete() - This method informs the device object - * that the reset operation is complete and the device can resume operation - * again. - * @remote_device: This parameter specifies the device which is to be informed - * of the reset complete operation. - * - * An indication that the device is resuming operation. SCI_SUCCESS the device - * is resuming operation. - */ -enum sci_status sci_remote_device_reset_complete( - struct isci_remote_device *idev); - -/** * enum sci_remote_device_states - This enumeration depicts all the states * for the common remote device state machine. * @SCI_DEV_INITIAL: Simply the initial state for the base remote device @@ -211,7 +183,7 @@ enum sci_status sci_remote_device_reset_complete( * device. When there are no active IO for the device it is is in this * state. * - * @SCI_STP_DEV_CMD: This is the command state for for the STP remote + * @SCI_STP_DEV_CMD: This is the command state for the STP remote * device. This state is entered when the device is processing a * non-NCQ command. The device object will fail any new start IO * requests until this command is complete. @@ -364,10 +336,6 @@ enum sci_status isci_remote_device_reset( struct isci_host *ihost, struct isci_remote_device *idev); -enum sci_status isci_remote_device_reset_complete( - struct isci_host *ihost, - struct isci_remote_device *idev); - enum sci_status isci_remote_device_suspend_terminate( struct isci_host *ihost, struct isci_remote_device *idev, diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 8e14cea15f98..7b4fe0e6afb2 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -17,7 +17,6 @@ * Zhenyu Wang */ -#include <crypto/hash.h> #include <linux/types.h> #include <linux/inet.h> #include <linux/slab.h> @@ -468,8 +467,7 @@ static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, * sufficient room. */ if (conn->hdrdgst_en) { - iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen, - hdr + hdrlen); + iscsi_tcp_dgst_header(hdr, hdrlen, hdr + hdrlen); hdrlen += ISCSI_DIGEST_SIZE; } @@ -494,7 +492,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; - struct ahash_request *tx_hash = NULL; + u32 *tx_crcp = NULL; unsigned int hdr_spec_len; ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len, @@ -507,11 +505,10 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); if (conn->datadgst_en) - tx_hash = tcp_sw_conn->tx_hash; + tx_crcp = &tcp_sw_conn->tx_crc; return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment, - sg, count, offset, len, - NULL, tx_hash); + sg, count, offset, len, NULL, tx_crcp); } static void @@ -520,7 +517,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data, { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; - struct ahash_request *tx_hash = NULL; + u32 *tx_crcp = NULL; unsigned int hdr_spec_len; ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ? @@ -532,10 +529,10 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data, WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); if (conn->datadgst_en) - tx_hash = tcp_sw_conn->tx_hash; + tx_crcp = &tcp_sw_conn->tx_crc; iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment, - data, len, NULL, tx_hash); + data, len, NULL, tx_crcp); } static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task, @@ -583,7 +580,6 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn; struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; - struct crypto_ahash *tfm; cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn), conn_idx); @@ -596,37 +592,9 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session, tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q; mutex_init(&tcp_sw_conn->sock_lock); - - tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - goto free_conn; - - tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL); - if (!tcp_sw_conn->tx_hash) - goto free_tfm; - ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL); - - tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL); - if (!tcp_sw_conn->rx_hash) - goto free_tx_hash; - ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL); - - tcp_conn->rx_hash = tcp_sw_conn->rx_hash; + tcp_conn->rx_crcp = &tcp_sw_conn->rx_crc; return cls_conn; - -free_tx_hash: - ahash_request_free(tcp_sw_conn->tx_hash); -free_tfm: - crypto_free_ahash(tfm); -free_conn: - iscsi_conn_printk(KERN_ERR, conn, - "Could not create connection due to crc32c " - "loading error. Make sure the crc32c " - "module is built as a module or into the " - "kernel\n"); - iscsi_tcp_conn_teardown(cls_conn); - return NULL; } static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn) @@ -664,20 +632,8 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn) static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; iscsi_sw_tcp_release_conn(conn); - - ahash_request_free(tcp_sw_conn->rx_hash); - if (tcp_sw_conn->tx_hash) { - struct crypto_ahash *tfm; - - tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash); - ahash_request_free(tcp_sw_conn->tx_hash); - crypto_free_ahash(tfm); - } - iscsi_tcp_conn_teardown(cls_conn); } @@ -943,6 +899,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, shost->max_id = 0; shost->max_channel = 0; shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; + shost->dma_alignment = 0; rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max); if (rc < 0) @@ -1056,16 +1013,15 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param) return 0; } -static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev) +static int iscsi_sw_tcp_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host); struct iscsi_session *session = tcp_sw_host->session; struct iscsi_conn *conn = session->leadconn; if (conn->datadgst_en) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, - sdev->request_queue); - blk_queue_dma_alignment(sdev->request_queue, 0); + lim->features |= BLK_FEAT_STABLE_WRITES; return 0; } @@ -1083,7 +1039,7 @@ static const struct scsi_host_template iscsi_sw_tcp_sht = { .eh_device_reset_handler= iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .dma_boundary = PAGE_SIZE - 1, - .slave_configure = iscsi_sw_tcp_slave_configure, + .sdev_configure = iscsi_sw_tcp_sdev_configure, .proc_name = "iscsi_tcp", .this_id = -1, .track_queue_depth = 1, diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 89a6fc552f0b..c3e5d9fa6add 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -41,8 +41,8 @@ struct iscsi_sw_tcp_conn { void (*old_write_space)(struct sock *); /* data and header digests */ - struct ahash_request *tx_hash; /* CRC32C (Tx) */ - struct ahash_request *rx_hash; /* CRC32C (Rx) */ + u32 tx_crc; /* CRC32C (Tx) */ + u32 rx_crc; /* CRC32C (Rx) */ /* MIB custom statistics */ uint32_t sendpage_failures_cnt; diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c index fb04b0b515ab..35137f5cfb3a 100644 --- a/drivers/scsi/jazz_esp.c +++ b/drivers/scsi/jazz_esp.c @@ -196,7 +196,7 @@ MODULE_ALIAS("platform:jazz_esp"); static struct platform_driver esp_jazz_driver = { .probe = esp_jazz_probe, - .remove_new = esp_jazz_remove, + .remove = esp_jazz_remove, .driver = { .name = "jazz_esp", }, diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 384f48ff64d7..60d621ad0024 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -26,7 +26,7 @@ #include <linux/export.h> #include <linux/list.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/fc/fc_gs.h> diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index 8d3006edbe12..4fa18a317f77 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -10,7 +10,7 @@ */ #include <linux/export.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/fc/fc_gs.h> #include <scsi/fc/fc_ns.h> #include <scsi/fc/fc_els.h> diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h index 6b7e4ca6b7b5..02e31db31d68 100644 --- a/drivers/scsi/libfc/fc_encode.h +++ b/drivers/scsi/libfc/fc_encode.h @@ -7,7 +7,7 @@ #ifndef _FC_ENCODE_H_ #define _FC_ENCODE_H_ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/utsname.h> #include <scsi/fc/fc_ms.h> diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 1d91c457527f..f84a7e6ae379 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -2693,7 +2693,8 @@ int fc_setup_exch_mgr(void) fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); fc_cpu_mask = (1 << fc_cpu_order) - 1; - fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); + fc_exch_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, + "fc_exch_workqueue"); if (!fc_exch_workqueue) goto err; return 0; diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 80be3a936d92..e705c30b4e1b 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1329,7 +1329,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp, fsp->state |= FC_SRB_COMPL; spin_unlock_bh(&fsp->scsi_pkt_lock); - del_timer_sync(&fsp->timer); + timer_delete_sync(&fsp->timer); spin_lock_bh(&fsp->scsi_pkt_lock); if (fsp->seq_ptr) { @@ -1961,7 +1961,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) fsp->state |= FC_SRB_COMPL; if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) { spin_unlock_bh(&fsp->scsi_pkt_lock); - del_timer_sync(&fsp->timer); + timer_delete_sync(&fsp->timer); spin_lock_bh(&fsp->scsi_pkt_lock); } @@ -2222,13 +2222,13 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) EXPORT_SYMBOL(fc_eh_host_reset); /** - * fc_slave_alloc() - Configure the queue depth of a Scsi_Host + * fc_sdev_init() - Configure the queue depth of a Scsi_Host * @sdev: The SCSI device that identifies the SCSI host * * Configures queue depth based on host's cmd_per_len. If not set * then we use the libfc default. */ -int fc_slave_alloc(struct scsi_device *sdev) +int fc_sdev_init(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); @@ -2238,7 +2238,7 @@ int fc_slave_alloc(struct scsi_device *sdev) scsi_change_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH); return 0; } -EXPORT_SYMBOL(fc_slave_alloc); +EXPORT_SYMBOL(fc_sdev_init); /** * fc_fcp_destroy() - Tear down the FCP layer for a given local port diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index ab06e9aeb613..310fa5add5f0 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -79,7 +79,7 @@ #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/fc/fc_gs.h> diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 33da3c1085f0..c25979d96808 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -55,7 +55,7 @@ #include <linux/export.h> #include <linux/rculist.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/libfc.h> @@ -2263,7 +2263,8 @@ struct fc4_prov fc_rport_t0_prov = { */ int fc_setup_rport(void) { - rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); + rport_event_queue = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fc_rport_eq"); if (!rport_event_queue) return -ENOMEM; return 0; diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 0fda8905eabd..1ddaf7228340 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -15,7 +15,7 @@ #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/module.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <net/tcp.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@ -1945,7 +1945,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, session->tmf_state != TMF_QUEUED); if (signal_pending(current)) flush_signals(current); - del_timer_sync(&session->tmf_timer); + timer_delete_sync(&session->tmf_timer); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); @@ -3247,7 +3247,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) iscsi_remove_conn(cls_conn); - del_timer_sync(&conn->transport_timer); + timer_delete_sync(&conn->transport_timer); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); @@ -3411,7 +3411,7 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) conn->stop_stage = flag; spin_unlock_bh(&session->frwd_lock); - del_timer_sync(&conn->transport_timer); + timer_delete_sync(&conn->transport_timer); iscsi_suspend_tx(conn); spin_lock_bh(&session->frwd_lock); diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index c182aa83f2c9..e90805ba868f 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -15,7 +15,7 @@ * Zhenyu Wang */ -#include <crypto/hash.h> +#include <linux/crc32c.h> #include <linux/types.h> #include <linux/list.h> #include <linux/inet.h> @@ -168,7 +168,7 @@ iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest) segment->size = ISCSI_DIGEST_SIZE; segment->copied = 0; segment->sg = NULL; - segment->hash = NULL; + segment->crcp = NULL; } /** @@ -191,29 +191,27 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment, int recv, unsigned copied) { - struct scatterlist sg; unsigned int pad; ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n", segment->copied, copied, segment->size, recv ? "recv" : "xmit"); - if (segment->hash && copied) { - /* - * If a segment is kmapd we must unmap it before sending - * to the crypto layer since that will try to kmap it again. - */ - iscsi_tcp_segment_unmap(segment); - - if (!segment->data) { - sg_init_table(&sg, 1); - sg_set_page(&sg, sg_page(segment->sg), copied, - segment->copied + segment->sg_offset + - segment->sg->offset); - } else - sg_init_one(&sg, segment->data + segment->copied, - copied); - ahash_request_set_crypt(segment->hash, &sg, NULL, copied); - crypto_ahash_update(segment->hash); + if (segment->crcp && copied) { + if (segment->data) { + *segment->crcp = crc32c(*segment->crcp, + segment->data + segment->copied, + copied); + } else { + const void *data; + + data = kmap_local_page(sg_page(segment->sg)); + *segment->crcp = crc32c(*segment->crcp, + data + segment->copied + + segment->sg_offset + + segment->sg->offset, + copied); + kunmap_local(data); + } } segment->copied += copied; @@ -258,10 +256,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, * Set us up for transferring the data digest. hdr digest * is completely handled in hdr done function. */ - if (segment->hash) { - ahash_request_set_crypt(segment->hash, NULL, - segment->digest, 0); - crypto_ahash_final(segment->hash); + if (segment->crcp) { + put_unaligned_le32(~*segment->crcp, segment->digest); iscsi_tcp_segment_splice_digest(segment, recv ? segment->recv_digest : segment->digest); return 0; @@ -282,8 +278,7 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done); * given buffer, and returns the number of bytes * consumed, which can actually be less than @len. * - * If hash digest is enabled, the function will update the - * hash while copying. + * If CRC is enabled, the function will update the CRC while copying. * Combining these two operations doesn't buy us a lot (yet), * but in the future we could implement combined copy+crc, * just way we do for network layer checksums. @@ -311,14 +306,10 @@ iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn, } inline void -iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr, - size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE]) +iscsi_tcp_dgst_header(const void *hdr, size_t hdrlen, + unsigned char digest[ISCSI_DIGEST_SIZE]) { - struct scatterlist sg; - - sg_init_one(&sg, hdr, hdrlen); - ahash_request_set_crypt(hash, &sg, digest, hdrlen); - crypto_ahash_digest(hash); + put_unaligned_le32(~crc32c(~0, hdr, hdrlen), digest); } EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header); @@ -343,24 +334,23 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn, */ static inline void __iscsi_segment_init(struct iscsi_segment *segment, size_t size, - iscsi_segment_done_fn_t *done, struct ahash_request *hash) + iscsi_segment_done_fn_t *done, u32 *crcp) { memset(segment, 0, sizeof(*segment)); segment->total_size = size; segment->done = done; - if (hash) { - segment->hash = hash; - crypto_ahash_init(hash); + if (crcp) { + segment->crcp = crcp; + *crcp = ~0; } } inline void iscsi_segment_init_linear(struct iscsi_segment *segment, void *data, - size_t size, iscsi_segment_done_fn_t *done, - struct ahash_request *hash) + size_t size, iscsi_segment_done_fn_t *done, u32 *crcp) { - __iscsi_segment_init(segment, size, done, hash); + __iscsi_segment_init(segment, size, done, crcp); segment->data = data; segment->size = size; } @@ -370,13 +360,12 @@ inline int iscsi_segment_seek_sg(struct iscsi_segment *segment, struct scatterlist *sg_list, unsigned int sg_count, unsigned int offset, size_t size, - iscsi_segment_done_fn_t *done, - struct ahash_request *hash) + iscsi_segment_done_fn_t *done, u32 *crcp) { struct scatterlist *sg; unsigned int i; - __iscsi_segment_init(segment, size, done, hash); + __iscsi_segment_init(segment, size, done, crcp); for_each_sg(sg_list, sg, sg_count, i) { if (offset < sg->length) { iscsi_tcp_segment_init_sg(segment, sg, offset); @@ -393,7 +382,7 @@ EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg); * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception * @tcp_conn: iscsi connection to prep for * - * This function always passes NULL for the hash argument, because when this + * This function always passes NULL for the crcp argument, because when this * function is called we do not yet know the final size of the header and want * to delay the digest processing until we know that. */ @@ -434,15 +423,15 @@ static void iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn) { struct iscsi_conn *conn = tcp_conn->iscsi_conn; - struct ahash_request *rx_hash = NULL; + u32 *rx_crcp = NULL; if (conn->datadgst_en && !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) - rx_hash = tcp_conn->rx_hash; + rx_crcp = tcp_conn->rx_crcp; iscsi_segment_init_linear(&tcp_conn->in.segment, conn->data, tcp_conn->in.datalen, - iscsi_tcp_data_recv_done, rx_hash); + iscsi_tcp_data_recv_done, rx_crcp); } /** @@ -730,7 +719,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) if (tcp_conn->in.datalen) { struct iscsi_tcp_task *tcp_task = task->dd_data; - struct ahash_request *rx_hash = NULL; + u32 *rx_crcp = NULL; struct scsi_data_buffer *sdb = &task->sc->sdb; /* @@ -743,7 +732,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) */ if (conn->datadgst_en && !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) - rx_hash = tcp_conn->rx_hash; + rx_crcp = tcp_conn->rx_crcp; ISCSI_DBG_TCP(conn, "iscsi_tcp_begin_data_in( " "offset=%d, datalen=%d)\n", @@ -756,7 +745,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) tcp_task->data_offset, tcp_conn->in.datalen, iscsi_tcp_process_data_in, - rx_hash); + rx_crcp); spin_unlock(&conn->session->back_lock); return rc; } @@ -878,7 +867,7 @@ iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, return 0; } - iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr, + iscsi_tcp_dgst_header(hdr, segment->total_copied - ISCSI_DIGEST_SIZE, segment->digest); diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 12e2653846e3..7b4e7a61965a 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -564,7 +564,6 @@ static struct ata_port_operations sas_sata_ops = { .error_handler = ata_std_error_handler, .post_internal_cmd = sas_ata_post_internal, .qc_defer = ata_std_qc_defer, - .qc_prep = ata_noop_qc_prep, .qc_issue = sas_ata_qc_issue, .qc_fill_rtf = sas_ata_qc_fill_rtf, .set_dmamode = sas_ata_set_dmamode, @@ -572,15 +571,6 @@ static struct ata_port_operations sas_sata_ops = { .end_eh = sas_ata_end_eh, }; -static struct ata_port_info sata_port_info = { - .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ | - ATA_FLAG_SAS_HOST | ATA_FLAG_FPDMA_AUX, - .pio_mask = ATA_PIO4, - .mwdma_mask = ATA_MWDMA2, - .udma_mask = ATA_UDMA6, - .port_ops = &sas_sata_ops -}; - int sas_ata_init(struct domain_device *found_dev) { struct sas_ha_struct *ha = found_dev->port->ha; @@ -597,28 +587,35 @@ int sas_ata_init(struct domain_device *found_dev) ata_host_init(ata_host, ha->dev, &sas_sata_ops); - ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost); + ap = ata_port_alloc(ata_host); if (!ap) { - pr_err("ata_sas_port_alloc failed.\n"); + pr_err("ata_port_alloc failed.\n"); rc = -ENODEV; goto free_host; } + ap->port_no = 0; + ap->pio_mask = ATA_PIO4; + ap->mwdma_mask = ATA_MWDMA2; + ap->udma_mask = ATA_UDMA6; + ap->flags |= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ | + ATA_FLAG_SAS_HOST | ATA_FLAG_FPDMA_AUX; + ap->ops = &sas_sata_ops; ap->private_data = found_dev; ap->cbl = ATA_CBL_SATA; ap->scsi_host = shost; - rc = ata_sas_tport_add(ata_host->dev, ap); + rc = ata_tport_add(ata_host->dev, ap); if (rc) - goto destroy_port; + goto free_port; found_dev->sata_dev.ata_host = ata_host; found_dev->sata_dev.ap = ap; return 0; -destroy_port: - kfree(ap); +free_port: + ata_port_free(ap); free_host: ata_host_put(ata_host); return rc; @@ -964,3 +961,87 @@ int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id) force_phy_id, &tmf_task); } EXPORT_SYMBOL_GPL(sas_execute_ata_cmd); + +static ssize_t sas_ncq_prio_supported_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct domain_device *ddev = sdev_to_domain_dev(sdev); + bool supported; + int rc; + + rc = ata_ncq_prio_supported(ddev->sata_dev.ap, sdev, &supported); + if (rc) + return rc; + + return sysfs_emit(buf, "%d\n", supported); +} + +static struct device_attribute dev_attr_sas_ncq_prio_supported = + __ATTR(ncq_prio_supported, S_IRUGO, sas_ncq_prio_supported_show, NULL); + +static ssize_t sas_ncq_prio_enable_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct domain_device *ddev = sdev_to_domain_dev(sdev); + bool enabled; + int rc; + + rc = ata_ncq_prio_enabled(ddev->sata_dev.ap, sdev, &enabled); + if (rc) + return rc; + + return sysfs_emit(buf, "%d\n", enabled); +} + +static ssize_t sas_ncq_prio_enable_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct domain_device *ddev = sdev_to_domain_dev(sdev); + bool enable; + int rc; + + rc = kstrtobool(buf, &enable); + if (rc) + return rc; + + rc = ata_ncq_prio_enable(ddev->sata_dev.ap, sdev, enable); + if (rc) + return rc; + + return len; +} + +static struct device_attribute dev_attr_sas_ncq_prio_enable = + __ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR, + sas_ncq_prio_enable_show, sas_ncq_prio_enable_store); + +static struct attribute *sas_ata_sdev_attrs[] = { + &dev_attr_sas_ncq_prio_supported.attr, + &dev_attr_sas_ncq_prio_enable.attr, + NULL +}; + +static umode_t sas_ata_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *dev = kobj_to_dev(kobj); + struct scsi_device *sdev = to_scsi_device(dev); + struct domain_device *ddev = sdev_to_domain_dev(sdev); + + if (!dev_is_sata(ddev)) + return 0; + + return attr->mode; +} + +const struct attribute_group sas_ata_sdev_attr_group = { + .attrs = sas_ata_sdev_attrs, + .is_visible = sas_ata_attr_is_visible, +}; +EXPORT_SYMBOL_GPL(sas_ata_sdev_attr_group); diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 8fb7c41c0962..951bdc554a10 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -300,8 +300,8 @@ void sas_free_device(struct kref *kref) kfree(dev->ex_dev.ex_phy); if (dev_is_sata(dev) && dev->sata_dev.ap) { - ata_sas_tport_delete(dev->sata_dev.ap); - kfree(dev->sata_dev.ap); + ata_tport_delete(dev->sata_dev.ap); + ata_port_free(dev->sata_dev.ap); ata_host_put(dev->sata_dev.ata_host); dev->sata_dev.ata_host = NULL; dev->sata_dev.ap = NULL; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f6e6db8b8aba..869b5d4db44c 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -11,7 +11,7 @@ #include <linux/scatterlist.h> #include <linux/blkdev.h> #include <linux/slab.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "sas_internal.h" @@ -26,6 +26,28 @@ static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include); static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr); +static void sas_port_add_ex_phy(struct sas_port *port, struct ex_phy *ex_phy) +{ + sas_port_add_phy(port, ex_phy->phy); + ex_phy->port = port; + ex_phy->phy_state = PHY_DEVICE_DISCOVERED; +} + +static void sas_ex_add_parent_port(struct domain_device *dev, int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; + + if (!ex->parent_port) { + ex->parent_port = sas_port_alloc(&dev->rphy->dev, phy_id); + /* FIXME: error handling */ + BUG_ON(!ex->parent_port); + BUG_ON(sas_port_add(ex->parent_port)); + sas_port_mark_backlink(ex->parent_port); + } + sas_port_add_ex_phy(ex->parent_port, ex_phy); +} + /* ---------- SMP task management ---------- */ /* Give it some long enough timeout. In seconds. */ @@ -67,7 +89,7 @@ static int smp_execute_task_sg(struct domain_device *dev, res = i->dft->lldd_execute_task(task, GFP_KERNEL); if (res) { - del_timer_sync(&task->slow_task->timer); + timer_delete_sync(&task->slow_task->timer); pr_notice("executing SMP task failed:%d\n", res); break; } @@ -239,8 +261,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, /* help some expanders that fail to zero sas_address in the 'no * device' case */ - if (phy->attached_dev_type == SAS_PHY_UNUSED || - phy->linkrate < SAS_LINK_RATE_1_5_GBPS) + if (phy->attached_dev_type == SAS_PHY_UNUSED) memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); else memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); @@ -857,9 +878,7 @@ static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id) if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, SAS_ADDR_SIZE) && ephy->port) { - sas_port_add_phy(ephy->port, phy->phy); - phy->port = ephy->port; - phy->phy_state = PHY_DEVICE_DISCOVERED; + sas_port_add_ex_phy(ephy->port, phy); return true; } } @@ -963,11 +982,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) /* Parent and domain coherency */ if (!dev->parent && sas_phy_match_port_addr(dev->port, ex_phy)) { - sas_add_parent_port(dev, phy_id); + sas_ex_add_parent_port(dev, phy_id); return 0; } if (dev->parent && sas_phy_match_dev_addr(dev->parent, ex_phy)) { - sas_add_parent_port(dev, phy_id); + sas_ex_add_parent_port(dev, phy_id); if (ex_phy->routing_attr == TABLE_ROUTING) sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1); return 0; @@ -1849,9 +1868,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, if (phy->port) { sas_port_delete_phy(phy->port, phy->phy); sas_device_set_phy(found, phy->port); - if (phy->port->num_phys == 0) + if (phy->port->num_phys == 0) { list_add_tail(&phy->port->del_list, &parent->port->sas_port_del_list); + if (ex_dev->parent_port == phy->port) + ex_dev->parent_port = NULL; + } phy->port = NULL; } } diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 9c8cc723170d..8566bb1208a0 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -122,12 +122,12 @@ int sas_register_ha(struct sas_ha_struct *sas_ha) error = -ENOMEM; snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev)); - sas_ha->event_q = create_singlethread_workqueue(name); + sas_ha->event_q = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name); if (!sas_ha->event_q) goto Undo_ports; snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev)); - sas_ha->disco_q = create_singlethread_workqueue(name); + sas_ha->disco_q = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name); if (!sas_ha->disco_q) goto Undo_event_q; diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 3804aef165ad..03d6ec1eb970 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -145,6 +145,20 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i func, dev->parent ? "exp-attached" : "direct-attached", SAS_ADDR(dev->sas_addr), err); + + /* + * If the device probe failed, the expander phy attached address + * needs to be reset so that the phy will not be treated as flutter + * in the next revalidation + */ + if (dev->parent && !dev_is_expander(dev->dev_type)) { + struct sas_phy *phy = dev->phy; + struct domain_device *parent = dev->parent; + struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number]; + + memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + } + sas_unregister_dev(dev->port, dev); } @@ -189,21 +203,6 @@ static inline void sas_phy_set_target(struct asd_sas_phy *p, struct domain_devic } } -static inline void sas_add_parent_port(struct domain_device *dev, int phy_id) -{ - struct expander_device *ex = &dev->ex_dev; - struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; - - if (!ex->parent_port) { - ex->parent_port = sas_port_alloc(&dev->rphy->dev, phy_id); - /* FIXME: error handling */ - BUG_ON(!ex->parent_port); - BUG_ON(sas_port_add(ex->parent_port)); - sas_port_mark_backlink(ex->parent_port); - } - sas_port_add_phy(ex->parent_port, ex_phy->phy); -} - static inline struct domain_device *sas_alloc_device(void) { struct domain_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL); diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 9047cfcd1072..feb2461b90e8 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -804,14 +804,14 @@ EXPORT_SYMBOL_GPL(sas_target_alloc); #define SAS_DEF_QD 256 -int sas_slave_configure(struct scsi_device *scsi_dev) +int sas_sdev_configure(struct scsi_device *scsi_dev, struct queue_limits *lim) { struct domain_device *dev = sdev_to_domain_dev(scsi_dev); BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE); if (dev_is_sata(dev)) { - ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap); + ata_sas_sdev_configure(scsi_dev, lim, dev->sata_dev.ap); return 0; } @@ -829,7 +829,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev) return 0; } -EXPORT_SYMBOL_GPL(sas_slave_configure); +EXPORT_SYMBOL_GPL(sas_sdev_configure); int sas_change_queue_depth(struct scsi_device *sdev, int depth) { @@ -859,7 +859,7 @@ EXPORT_SYMBOL_GPL(sas_bios_param); void sas_task_internal_done(struct sas_task *task) { - del_timer(&task->slow_task->timer); + timer_delete(&task->slow_task->timer); complete(&task->slow_task->completion); } @@ -911,7 +911,7 @@ static int sas_execute_internal_abort(struct domain_device *device, res = i->dft->lldd_execute_task(task, GFP_KERNEL); if (res) { - del_timer_sync(&task->slow_task->timer); + timer_delete_sync(&task->slow_task->timer); pr_err("Executing internal abort failed %016llx (%d)\n", SAS_ADDR(device->sas_addr), res); break; @@ -1010,7 +1010,7 @@ int sas_execute_tmf(struct domain_device *device, void *parameter, res = i->dft->lldd_execute_task(task, GFP_KERNEL); if (res) { - del_timer_sync(&task->slow_task->timer); + timer_delete_sync(&task->slow_task->timer); pr_err("executing TMF task failed %016llx (%d)\n", SAS_ADDR(device->sas_addr), res); break; @@ -1180,7 +1180,7 @@ void sas_task_abort(struct sas_task *task) if (!slow) return; - if (!del_timer(&slow->timer)) + if (!timer_delete(&slow->timer)) return; slow->timer.function(&slow->timer); return; @@ -1193,14 +1193,14 @@ void sas_task_abort(struct sas_task *task) } EXPORT_SYMBOL_GPL(sas_task_abort); -int sas_slave_alloc(struct scsi_device *sdev) +int sas_sdev_init(struct scsi_device *sdev) { if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun) return -ENXIO; return 0; } -EXPORT_SYMBOL_GPL(sas_slave_alloc); +EXPORT_SYMBOL_GPL(sas_sdev_init); void sas_target_destroy(struct scsi_target *starget) { diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 98ca7df003ef..fe4fb67eb50c 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -74,8 +74,7 @@ struct lpfc_sli2_slim; * queue depths when there are driver resource error or Firmware * resource error. */ -/* 1 Second */ -#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1)) +#define QUEUE_RAMP_DOWN_INTERVAL (secs_to_jiffies(1)) /* Number of exchanges reserved for discovery to complete */ #define LPFC_DISC_IOCB_BUFF_COUNT 20 @@ -306,6 +305,14 @@ struct lpfc_stats { struct lpfc_hba; +/* Data structure to keep withheld FLOGI_ACC information */ +struct lpfc_defer_flogi_acc { + bool flag; + u16 rx_id; + u16 ox_id; + struct lpfc_nodelist *ndlp; + +}; #define LPFC_VMID_TIMER 300 /* timer interval in seconds */ @@ -393,6 +400,37 @@ enum hba_state { LPFC_HBA_ERROR = -1 }; +enum lpfc_hba_flag { /* hba generic flags */ + HBA_ERATT_HANDLED = 0, /* This flag is set when eratt handled */ + DEFER_ERATT = 1, /* Deferred error attn in progress */ + HBA_FCOE_MODE = 2, /* HBA function in FCoE Mode */ + HBA_SP_QUEUE_EVT = 3, /* Slow-path qevt posted to worker thread*/ + HBA_POST_RECEIVE_BUFFER = 4, /* Rcv buffers need to be posted */ + HBA_PERSISTENT_TOPO = 5, /* Persistent topology support in hba */ + ELS_XRI_ABORT_EVENT = 6, /* ELS_XRI abort event was queued */ + ASYNC_EVENT = 7, + LINK_DISABLED = 8, /* Link disabled by user */ + FCF_TS_INPROG = 9, /* FCF table scan in progress */ + FCF_RR_INPROG = 10, /* FCF roundrobin flogi in progress */ + HBA_FIP_SUPPORT = 11, /* FIP support in HBA */ + HBA_DEVLOSS_TMO = 13, /* HBA in devloss timeout */ + HBA_RRQ_ACTIVE = 14, /* process the rrq active list */ + HBA_IOQ_FLUSH = 15, /* I/O queues being flushed */ + HBA_RECOVERABLE_UE = 17, /* FW supports recoverable UE */ + HBA_FORCED_LINK_SPEED = 18, /* + * Firmware supports Forced Link + * Speed capability + */ + HBA_FLOGI_ISSUED = 20, /* FLOGI was issued */ + HBA_DEFER_FLOGI = 23, /* Defer FLOGI till read_sparm cmpl */ + HBA_SETUP = 24, /* HBA setup completed */ + HBA_NEEDS_CFG_PORT = 25, /* SLI3: CONFIG_PORT mbox needed */ + HBA_HBEAT_INP = 26, /* mbox HBEAT is in progress */ + HBA_HBEAT_TMO = 27, /* HBEAT initiated after timeout */ + HBA_FLOGI_OUTSTANDING = 28, /* FLOGI is outstanding */ + HBA_RHBA_CMPL = 29, /* RHBA FDMI cmd is successful */ +}; + struct lpfc_trunk_link_state { enum hba_state state; uint8_t fault; @@ -1007,35 +1045,7 @@ struct lpfc_hba { #define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */ #define LS_EXTERNAL_LOOPBACK 0x40 /* External loopback plug inserted */ - uint32_t hba_flag; /* hba generic flags */ -#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ -#define DEFER_ERATT 0x2 /* Deferred error attention in progress */ -#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */ -#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ -#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ -#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */ -#define ELS_XRI_ABORT_EVENT 0x40 /* ELS_XRI abort event was queued */ -#define ASYNC_EVENT 0x80 -#define LINK_DISABLED 0x100 /* Link disabled by user */ -#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ -#define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */ -#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ -#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ -#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ -#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */ -#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ -#define HBA_FORCED_LINK_SPEED 0x40000 /* - * Firmware supports Forced Link Speed - * capability - */ -#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ -#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ -#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */ -#define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */ -#define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */ -#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */ -#define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */ -#define HBA_RHBA_CMPL 0x20000000 /* RHBA FDMI command is successful */ + unsigned long hba_flag; /* hba generic flags */ struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ @@ -1284,6 +1294,7 @@ struct lpfc_hba { uint32_t total_scsi_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; + spinlock_t rrq_list_lock; /* lock for active_rrq_list */ struct list_head active_rrq_list; spinlock_t hbalock; struct work_struct unblock_request_work; /* SCSI layer unblock IOs */ @@ -1426,9 +1437,7 @@ struct lpfc_hba { uint16_t vlan_id; struct list_head fcf_conn_rec_list; - bool defer_flogi_acc_flag; - uint16_t defer_flogi_acc_rx_id; - uint16_t defer_flogi_acc_ox_id; + struct lpfc_defer_flogi_acc defer_flogi_acc; spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */ struct list_head ct_ev_waiters; @@ -1705,18 +1714,12 @@ lpfc_phba_elsring(struct lpfc_hba *phba) * Note: If no valid cpu found, then nr_cpu_ids is returned. * **/ -static inline unsigned int +static __always_inline unsigned int lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start) { - unsigned int cpu_it; - - for_each_cpu_wrap(cpu_it, mask, start) { - if (cpu_online(cpu_it)) - break; - } - - return cpu_it; + return cpumask_next_and_wrap(start, mask, cpu_online_mask); } + /** * lpfc_next_present_cpu - Finds next present CPU after n * @n: the cpu prior to search @@ -1724,16 +1727,9 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start) * Note: If no next present cpu, then fallback to first present cpu. * **/ -static inline unsigned int lpfc_next_present_cpu(int n) +static __always_inline unsigned int lpfc_next_present_cpu(int n) { - unsigned int cpu; - - cpu = cpumask_next(n, cpu_present_mask); - - if (cpu >= nr_cpu_ids) - cpu = cpumask_first(cpu_present_mask); - - return cpu; + return cpumask_next_wrap(n, cpu_present_mask); } /** diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 3c534b3cfe91..54ee8ecec3b3 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -291,6 +291,138 @@ buffer_done: return len; } +static ssize_t +lpfc_vmid_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_vmid *vmp; + int len = 0, i, j, k, cpu; + char hxstr[LPFC_MAX_VMID_SIZE * 3] = {0}; + struct timespec64 curr_tm; + struct lpfc_vmid_priority_range *vr; + u64 *lta, rct_acc = 0, max_lta = 0; + struct tm tm_val; + + ktime_get_ts64(&curr_tm); + + len += scnprintf(buf + len, PAGE_SIZE - len, "Key 'vmid':\n"); + + /* if enabled continue, else return */ + if (lpfc_is_vmid_enabled(phba)) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "lpfc VMID Page: ON\n\n"); + } else { + len += scnprintf(buf + len, PAGE_SIZE - len, + "lpfc VMID Page: OFF\n\n"); + return len; + } + + /* if using priority tagging */ + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "VMID priority ranges:\n"); + vr = vport->vmid_priority.vmid_range; + for (i = 0; i < vport->vmid_priority.num_descriptors; ++i) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "\t[x%x - x%x], qos: x%x\n", + vr->low, vr->high, vr->qos); + vr++; + } + } + + for (i = 0; i < phba->cfg_max_vmid; i++) { + vmp = &vport->vmid[i]; + max_lta = 0; + + /* only if the slot is used */ + if (!(vmp->flag & LPFC_VMID_SLOT_USED) || + !(vmp->flag & LPFC_VMID_REGISTERED)) + continue; + + /* if using priority tagging */ + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "VEM ID: %02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x:%02x:%02x\n", + vport->lpfc_vmid_host_uuid[0], + vport->lpfc_vmid_host_uuid[1], + vport->lpfc_vmid_host_uuid[2], + vport->lpfc_vmid_host_uuid[3], + vport->lpfc_vmid_host_uuid[4], + vport->lpfc_vmid_host_uuid[5], + vport->lpfc_vmid_host_uuid[6], + vport->lpfc_vmid_host_uuid[7], + vport->lpfc_vmid_host_uuid[8], + vport->lpfc_vmid_host_uuid[9], + vport->lpfc_vmid_host_uuid[10], + vport->lpfc_vmid_host_uuid[11], + vport->lpfc_vmid_host_uuid[12], + vport->lpfc_vmid_host_uuid[13], + vport->lpfc_vmid_host_uuid[14], + vport->lpfc_vmid_host_uuid[15]); + } + + /* IO stats */ + len += scnprintf(buf + len, PAGE_SIZE - len, + "ID00 READs:%llx WRITEs:%llx\n", + vmp->io_rd_cnt, + vmp->io_wr_cnt); + for (j = 0, k = 0; j < strlen(vmp->host_vmid); j++, k += 3) + sprintf((char *)(hxstr + k), "%2x ", vmp->host_vmid[j]); + /* UUIDs */ + len += scnprintf(buf + len, PAGE_SIZE - len, "UUID:\n"); + len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n", hxstr); + + len += scnprintf(buf + len, PAGE_SIZE - len, "String (%s)\n", + vmp->host_vmid); + + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) + len += scnprintf(buf + len, PAGE_SIZE - len, + "CS_CTL VMID: 0x%x\n", + vmp->un.cs_ctl_vmid); + else + len += scnprintf(buf + len, PAGE_SIZE - len, + "Application id: 0x%x\n", + vmp->un.app_id); + + /* calculate the last access time */ + for_each_possible_cpu(cpu) { + lta = per_cpu_ptr(vmp->last_io_time, cpu); + if (!lta) + continue; + + /* if last access time is less than timeout */ + if (time_after((unsigned long)*lta, jiffies)) + continue; + + if (*lta > max_lta) + max_lta = *lta; + } + + rct_acc = jiffies_to_msecs(jiffies - max_lta) / 1000; + /* current time */ + time64_to_tm(ktime_get_real_seconds(), + -(sys_tz.tz_minuteswest * 60) - rct_acc, &tm_val); + + len += scnprintf(buf + len, PAGE_SIZE - len, + "Last Access Time :" + "%ld-%d-%dT%02d:%02d:%02d\n\n", + 1900 + tm_val.tm_year, tm_val.tm_mon + 1, + tm_val.tm_mday, tm_val.tm_hour, + tm_val.tm_min, tm_val.tm_sec); + + if (len >= PAGE_SIZE) + return len; + + memset(hxstr, 0, LPFC_MAX_VMID_SIZE * 3); + } + return len; +} + /** * lpfc_drvr_version_show - Return the Emulex driver string with version number * @dev: class unused variable. @@ -322,7 +454,7 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - if (phba->hba_flag & HBA_FIP_SUPPORT) + if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) return scnprintf(buf, PAGE_SIZE, "1\n"); else return scnprintf(buf, PAGE_SIZE, "0\n"); @@ -1049,7 +1181,7 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr, case LPFC_INIT_MBX_CMDS: case LPFC_LINK_DOWN: case LPFC_HBA_ERROR: - if (phba->hba_flag & LINK_DISABLED) + if (test_bit(LINK_DISABLED, &phba->hba_flag)) len += scnprintf(buf + len, PAGE_SIZE-len, "Link Down - User disabled\n"); else @@ -1292,7 +1424,7 @@ lpfc_issue_lip(struct Scsi_Host *shost) * it doesn't make any sense to allow issue_lip */ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) || - (phba->hba_flag & LINK_DISABLED) || + test_bit(LINK_DISABLED, &phba->hba_flag) || (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) return -EPERM; @@ -1831,6 +1963,7 @@ static int lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out) { LPFC_MBOXQ_t *mbox = NULL; + u32 payload_len; unsigned long val = 0; char *pval = NULL; int rc = 0; @@ -1869,9 +2002,11 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out) if (!mbox) return -ENOMEM; + payload_len = sizeof(struct lpfc_mbx_set_trunk_mode) - + sizeof(struct lpfc_sli4_cfg_mhdr); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE, - 12, LPFC_SLI4_MBX_EMBED); + payload_len, LPFC_SLI4_MBX_EMBED); bf_set(lpfc_mbx_set_trunk_mode, &mbox->u.mqe.un.set_trunk_mode, @@ -1907,6 +2042,11 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr, /* Get transceiver information */ rdp_context = kmalloc(sizeof(*rdp_context), GFP_KERNEL); + if (!rdp_context) { + len = scnprintf(buf, PAGE_SIZE - len, + "SPF info NA: alloc failure\n"); + return len; + } rc = lpfc_get_sfp_info_wait(phba, rdp_context); if (rc) { @@ -2570,7 +2710,7 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, (old_val & DISABLE_FCP_RING_INT)) { spin_unlock_irq(&phba->hbalock); - del_timer(&phba->fcp_poll_timer); + timer_delete(&phba->fcp_poll_timer); spin_lock_irq(&phba->hbalock); if (lpfc_readl(phba->HCregaddr, &creg_val)) { spin_unlock_irq(&phba->hbalock); @@ -3003,6 +3143,7 @@ static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, NULL); static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL); +static DEVICE_ATTR_RO(lpfc_vmid_info); #define WWN_SZ 8 /** @@ -3635,7 +3776,8 @@ lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf) struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; return scnprintf(buf, PAGE_SIZE, "%d\n", - (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0); + test_bit(HBA_PERSISTENT_TOPO, + &phba->hba_flag) ? 1 : 0); } static DEVICE_ATTR(pt, 0444, lpfc_pt_show, NULL); @@ -4205,8 +4347,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, &phba->sli4_hba.sli_intf); if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); - if ((phba->hba_flag & HBA_PERSISTENT_TOPO || - (!phba->sli4_hba.pc_sli4_params.pls && + if ((test_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag) || + (!phba->sli4_hba.pc_sli4_params.pls && (sli_family == LPFC_SLI_INTF_FAMILY_G6 || if_type == LPFC_SLI_INTF_IF_TYPE_6))) && val == 4) { @@ -4309,7 +4451,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 && - phba->hba_flag & HBA_FORCED_LINK_SPEED) + test_bit(HBA_FORCED_LINK_SPEED, &phba->hba_flag)) return -EPERM; if (!strncmp(buf, "nolip ", strlen("nolip "))) { @@ -6108,6 +6250,7 @@ static struct attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_vmid_inactivity_timeout.attr, &dev_attr_lpfc_vmid_app_header.attr, &dev_attr_lpfc_vmid_priority_tagging.attr, + &dev_attr_lpfc_vmid_info.attr, NULL, }; @@ -6176,7 +6319,7 @@ const struct attribute_group *lpfc_vport_groups[] = { **/ static ssize_t sysfs_ctlreg_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { size_t buf_off; @@ -6235,7 +6378,7 @@ sysfs_ctlreg_write(struct file *filp, struct kobject *kobj, **/ static ssize_t sysfs_ctlreg_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { size_t buf_off; @@ -6271,14 +6414,14 @@ sysfs_ctlreg_read(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute sysfs_ctlreg_attr = { +static const struct bin_attribute sysfs_ctlreg_attr = { .attr = { .name = "ctlreg", .mode = S_IRUSR | S_IWUSR, }, .size = 256, - .read = sysfs_ctlreg_read, - .write = sysfs_ctlreg_write, + .read_new = sysfs_ctlreg_read, + .write_new = sysfs_ctlreg_write, }; /** @@ -6299,7 +6442,7 @@ static struct bin_attribute sysfs_ctlreg_attr = { **/ static ssize_t sysfs_mbox_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { return -EPERM; @@ -6323,20 +6466,20 @@ sysfs_mbox_write(struct file *filp, struct kobject *kobj, **/ static ssize_t sysfs_mbox_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { return -EPERM; } -static struct bin_attribute sysfs_mbox_attr = { +static const struct bin_attribute sysfs_mbox_attr = { .attr = { .name = "mbox", .mode = S_IRUSR | S_IWUSR, }, .size = MAILBOX_SYSFS_MAX, - .read = sysfs_mbox_read, - .write = sysfs_mbox_write, + .read_new = sysfs_mbox_read, + .write_new = sysfs_mbox_write, }; /** @@ -6497,7 +6640,8 @@ lpfc_get_host_speed(struct Scsi_Host *shost) struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) { + if ((lpfc_is_link_up(phba)) && + !test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { switch(phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; @@ -6533,7 +6677,8 @@ lpfc_get_host_speed(struct Scsi_Host *shost) fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } - } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) { + } else if (lpfc_is_link_up(phba) && + test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { switch (phba->fc_linkspeed) { case LPFC_ASYNC_LINK_SPEED_1GBPS: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; @@ -6718,7 +6863,7 @@ lpfc_get_stats(struct Scsi_Host *shost) hs->invalid_crc_count -= lso->invalid_crc_count; hs->error_frames -= lso->error_frames; - if (phba->hba_flag & HBA_FCOE_MODE) { + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { hs->lip_count = -1; hs->nos_count = (phba->link_events >> 1); hs->nos_count -= lso->link_events; @@ -6816,7 +6961,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; lso->error_frames = pmb->un.varRdLnk.crcCnt; - if (phba->hba_flag & HBA_FCOE_MODE) + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) lso->link_events = (phba->link_events >> 1); else lso->link_events = (phba->fc_eventTag >> 1); @@ -7161,11 +7306,11 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba) case PCI_DEVICE_ID_ZEPHYR_DCSP: case PCI_DEVICE_ID_TIGERSHARK: case PCI_DEVICE_ID_TOMCAT: - phba->hba_flag |= HBA_FCOE_MODE; + set_bit(HBA_FCOE_MODE, &phba->hba_flag); break; default: /* for others, clear the flag */ - phba->hba_flag &= ~HBA_FCOE_MODE; + clear_bit(HBA_FCOE_MODE, &phba->hba_flag); } } @@ -7236,7 +7381,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_get_hba_function_mode(phba); /* BlockGuard allowed for FC only. */ - if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) { + if (phba->cfg_enable_bg && test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0581 BlockGuard feature not supported\n"); /* If set, clear the BlockGuard support param */ diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 529df1768fa8..d61d979f9b77 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -120,6 +120,16 @@ enum ELX_LOOPBACK_CMD { #define ELX_LOOPBACK_HEADER_SZ \ (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) +/* For non-embedded read object command */ +#define READ_OBJ_EMB0_SCHEME_0 {1, 10, 256, 128} +#define READ_OBJ_EMB0_SCHEME_1 {11, LPFC_EMB0_MAX_RD_OBJ_HBD_CNT, 512, 192} +static const struct lpfc_read_object_cmd_scheme { + u32 min_hbd_cnt; + u32 max_hbd_cnt; + u32 cmd_size; + u32 payload_word_offset; +} rd_obj_scheme[2] = {READ_OBJ_EMB0_SCHEME_0, READ_OBJ_EMB0_SCHEME_1}; + struct lpfc_dmabufext { struct lpfc_dmabuf dma; uint32_t size; @@ -398,7 +408,11 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) /* in case no data is transferred */ bsg_reply->reply_payload_rcv_len = 0; - if (ndlp->nlp_flag & NLP_ELS_SND_MASK) + if (test_bit(NLP_PLOGI_SND, &ndlp->nlp_flag) || + test_bit(NLP_PRLI_SND, &ndlp->nlp_flag) || + test_bit(NLP_ADISC_SND, &ndlp->nlp_flag) || + test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) || + test_bit(NLP_RNID_SND, &ndlp->nlp_flag)) return -ENODEV; /* allocate our bsg tracking structure */ @@ -2673,8 +2687,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, evt->wait_time_stamp = jiffies; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - msecs_to_jiffies(1000 * - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); + secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT)); if (list_empty(&evt->events_to_see)) ret_val = (time_left) ? -EINTR : -ETIMEDOUT; else { @@ -3208,6 +3221,9 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) cmdiocbq->num_bdes = num_bde; cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK; + if (phba->cfg_vmid_app_header) + cmdiocbq->cmd_flag |= LPFC_IO_VMID; + cmdiocbq->vport = phba->pport; cmdiocbq->cmd_cmpl = NULL; cmdiocbq->bpl_dmabuf = txbmp; @@ -3241,8 +3257,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) evt->waiting = 1; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - msecs_to_jiffies(1000 * - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); + secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT)); evt->waiting = 0; if (list_empty(&evt->events_to_see)) { rc = (time_left) ? -EINTR : -ETIMEDOUT; @@ -3532,6 +3547,103 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) } /** + * lpfc_rd_obj_emb0_handle_job - Handles completion for non-embedded + * READ_OBJECT_V0 mailbox commands + * @phba: pointer to lpfc_hba data struct + * @pmb_buf: pointer to mailbox buffer + * @sli_cfg_mbx: pointer to SLI_CONFIG mailbox memory region + * @job: pointer to bsg_job struct + * @bsg_reply: point to bsg_reply struct + * + * Given a non-embedded READ_OBJECT_V0's HBD_CNT, this routine copies + * a READ_OBJECT_V0 mailbox command's read data payload into a bsg_job + * structure for passing back to application layer. + * + * Return codes + * 0 - successful + * -EINVAL - invalid HBD_CNT + * -ENODEV - pointer to bsg_job struct is NULL + **/ +static int +lpfc_rd_obj_emb0_handle_job(struct lpfc_hba *phba, u8 *pmb_buf, + struct lpfc_sli_config_mbox *sli_cfg_mbx, + struct bsg_job *job, + struct fc_bsg_reply *bsg_reply) +{ + struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; + struct lpfc_sli_config_emb0_subsys *emb0_subsys; + u32 hbd_cnt; + u32 dma_buf_len; + u8 i = 0; + size_t extra_bytes; + off_t skip = 0; + + if (!job) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2496 NULL job\n"); + return -ENODEV; + } + + if (!bsg_reply) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2498 NULL bsg_reply\n"); + return -ENODEV; + } + + emb0_subsys = &sli_cfg_mbx->un.sli_config_emb0_subsys; + + hbd_cnt = bsg_bf_get(lpfc_emb0_subcmnd_rd_obj_hbd_cnt, + emb0_subsys); + + /* Calculate where the read object's read data payload is located based + * on HBD count scheme. + */ + if (hbd_cnt >= rd_obj_scheme[0].min_hbd_cnt && + hbd_cnt <= rd_obj_scheme[0].max_hbd_cnt) { + skip = rd_obj_scheme[0].payload_word_offset * 4; + } else if (hbd_cnt >= rd_obj_scheme[1].min_hbd_cnt && + hbd_cnt <= rd_obj_scheme[1].max_hbd_cnt) { + skip = rd_obj_scheme[1].payload_word_offset * 4; + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2497 bad hbd_count 0x%08x\n", + hbd_cnt); + return -EINVAL; + } + + /* Copy SLI_CONFIG command and READ_OBJECT response first */ + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + pmb_buf, skip); + + /* Copy data from hbds */ + list_for_each_entry_safe(curr_dmabuf, next_dmabuf, + &phba->mbox_ext_buf_ctx.ext_dmabuf_list, + list) { + dma_buf_len = emb0_subsys->hbd[i].buf_len; + + /* Use sg_copy_buffer to specify a skip offset */ + extra_bytes = sg_copy_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + curr_dmabuf->virt, + dma_buf_len, skip, false); + + bsg_reply->reply_payload_rcv_len += extra_bytes; + + skip += extra_bytes; + + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2499 copied hbd[%d] " + "0x%zx bytes\n", + i, extra_bytes); + i++; + } + + return 0; +} + +/** * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl * @phba: Pointer to HBA context object. * @pmboxq: Pointer to mailbox command. @@ -3544,10 +3656,10 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct bsg_job_data *dd_data; struct bsg_job *job; - struct fc_bsg_reply *bsg_reply; + struct fc_bsg_reply *bsg_reply = NULL; uint8_t *pmb, *pmb_buf; unsigned long flags; - uint32_t size; + u32 size, opcode; int rc = 0; struct lpfc_dmabuf *dmabuf; struct lpfc_sli_config_mbox *sli_cfg_mbx; @@ -3584,6 +3696,24 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], &pmbx[sizeof(MAILBOX_t)], sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); + + /* Special handling for non-embedded READ_OBJECT */ + opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, + &sli_cfg_mbx->un.sli_config_emb0_subsys); + switch (opcode) { + case COMN_OPCODE_READ_OBJECT: + if (job) { + rc = lpfc_rd_obj_emb0_handle_job(phba, pmb_buf, + sli_cfg_mbx, + job, + bsg_reply); + bsg_reply->result = rc; + goto done; + } + break; + default: + break; + } } /* Complete the job if the job is still active */ @@ -3597,12 +3727,14 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) /* result for successful */ bsg_reply->result = 0; +done: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2937 SLI_CONFIG ext-buffer mailbox command " "(x%x/x%x) complete bsg job done, bsize:%d\n", phba->mbox_ext_buf_ctx.nembType, - phba->mbox_ext_buf_ctx.mboxType, size); + phba->mbox_ext_buf_ctx.mboxType, + job->reply_payload.payload_len); lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, phba->mbox_ext_buf_ctx.mboxType, @@ -3812,14 +3944,16 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, { struct fc_bsg_request *bsg_request = job->request; struct lpfc_sli_config_mbox *sli_cfg_mbx; + struct lpfc_sli_config_emb0_subsys *emb0_subsys; + struct list_head *ext_dmabuf_list; struct dfc_mbox_req *mbox_req; struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; - uint32_t ext_buf_cnt, ext_buf_index; + u32 ext_buf_cnt, ext_buf_index, hbd_cnt; struct lpfc_dmabuf *ext_dmabuf = NULL; struct bsg_job_data *dd_data = NULL; LPFC_MBOXQ_t *pmboxq = NULL; MAILBOX_t *pmb; - uint8_t *pmbx; + u8 *pmbx, opcode; int rc, i; mbox_req = @@ -3829,8 +3963,9 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; if (nemb_tp == nemb_mse) { + emb0_subsys = &sli_cfg_mbx->un.sli_config_emb0_subsys; ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, - &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); + &emb0_subsys->sli_config_hdr); if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2945 Handled SLI_CONFIG(mse) rd, " @@ -3840,6 +3975,57 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, rc = -ERANGE; goto job_error; } + + /* Special handling for non-embedded READ_OBJECT */ + opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, emb0_subsys); + switch (opcode) { + case COMN_OPCODE_READ_OBJECT: + hbd_cnt = bsg_bf_get(lpfc_emb0_subcmnd_rd_obj_hbd_cnt, + emb0_subsys); + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2449 SLI_CONFIG(mse) rd non-embedded " + "hbd count = %d\n", + hbd_cnt); + + ext_dmabuf_list = + &phba->mbox_ext_buf_ctx.ext_dmabuf_list; + + /* Allocate hbds */ + for (i = 0; i < hbd_cnt; i++) { + ext_dmabuf = lpfc_bsg_dma_page_alloc(phba); + if (!ext_dmabuf) { + rc = -ENOMEM; + goto job_error; + } + list_add_tail(&ext_dmabuf->list, + ext_dmabuf_list); + } + + /* Fill out the physical memory addresses for the + * hbds + */ + i = 0; + list_for_each_entry_safe(curr_dmabuf, next_dmabuf, + ext_dmabuf_list, list) { + emb0_subsys->hbd[i].pa_hi = + putPaddrHigh(curr_dmabuf->phys); + emb0_subsys->hbd[i].pa_lo = + putPaddrLow(curr_dmabuf->phys); + + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2495 SLI_CONFIG(hbd)[%d], " + "bufLen:%d, addrHi:x%x, " + "addrLo:x%x\n", i, + emb0_subsys->hbd[i].buf_len, + emb0_subsys->hbd[i].pa_hi, + emb0_subsys->hbd[i].pa_lo); + i++; + } + break; + default: + break; + } + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2941 Handled SLI_CONFIG(mse) rd, " "ext_buf_cnt:%d\n", ext_buf_cnt); @@ -4216,6 +4402,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, case COMN_OPCODE_GET_CNTL_ATTRIBUTES: case COMN_OPCODE_GET_PROFILE_CONFIG: case COMN_OPCODE_SET_FEATURES: + case COMN_OPCODE_READ_OBJECT: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3106 Handled SLI_CONFIG " "subsys_comn, opcode:x%x\n", @@ -4658,8 +4845,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, bsg_reply->reply_payload_rcv_len = 0; /* sanity check to protect driver */ - if (job->reply_payload.payload_len > BSG_MBOX_SIZE || - job->request_payload.payload_len > BSG_MBOX_SIZE) { + if (job->request_payload.payload_len > BSG_MBOX_SIZE) { rc = -ERANGE; goto job_done; } @@ -4730,6 +4916,19 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, pmb->mbxOwner = OWN_HOST; pmboxq->vport = vport; + /* non-embedded SLI_CONFIG requests already parsed, check others */ + if (unlikely(job->reply_payload.payload_len > BSG_MBOX_SIZE)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2729 Cmd x%x (x%x/x%x) request has " + "out-of-range reply payload length x%x\n", + pmb->mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, pmboxq), + lpfc_sli_config_mbox_opcode_get(phba, pmboxq), + job->reply_payload.payload_len); + rc = -ERANGE; + goto job_done; + } + /* If HBA encountered an error attention, allow only DUMP * or RESTART mailbox commands until the HBA is restarted. */ @@ -5002,7 +5201,8 @@ lpfc_forced_link_speed(struct bsg_job *job) goto job_error; } - forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED) + forced_reply->supported = test_bit(HBA_FORCED_LINK_SPEED, + &phba->hba_flag) ? LPFC_FORCED_LINK_SPEED_SUPPORTED : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; job_error: @@ -5409,7 +5609,7 @@ lpfc_get_cgnbuf_info(struct bsg_job *job) struct get_cgnbuf_info_req *cgnbuf_req; struct lpfc_cgn_info *cp; uint8_t *cgn_buff; - int size, cinfosz; + size_t size, cinfosz; int rc = 0; if (job->request_len < sizeof(struct fc_bsg_request) + diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index 3c04ca2d7455..27e7a033b53d 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2010-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -239,12 +239,27 @@ struct lpfc_sli_config_emb0_subsys { uint32_t timeout; /* comn_set_feature timeout */ uint32_t request_length; /* comn_set_feature request len */ uint32_t version; /* comn_set_feature version */ - uint32_t csf_feature; /* comn_set_feature feature */ + uint32_t word68; /* comn_set_feature feature */ +#define lpfc_emb0_subcmnd_csf_feat_SHIFT 0 +#define lpfc_emb0_subcmnd_csf_feat_MASK 0xffffffff +#define lpfc_emb0_subcmnd_csf_feat_WORD word68 +#define lpfc_emb0_subcmnd_rd_obj_des_rd_len_SHIFT 0 +#define lpfc_emb0_subcmnd_rd_obj_des_rd_len_MASK 0x00ffffff +#define lpfc_emb0_subcmnd_rd_obj_des_rd_len_WORD word68 uint32_t word69; /* comn_set_feature parameter len */ uint32_t word70; /* comn_set_feature parameter val0 */ #define lpfc_emb0_subcmnd_csf_p0_SHIFT 0 #define lpfc_emb0_subcmnd_csf_p0_MASK 0x3 #define lpfc_emb0_subcmnd_csf_p0_WORD word70 + uint32_t reserved71[25]; + uint32_t word96; /* rd_obj hbd_count */ +#define lpfc_emb0_subcmnd_rd_obj_hbd_cnt_SHIFT 0 +#define lpfc_emb0_subcmnd_rd_obj_hbd_cnt_MASK 0xffffffff +#define lpfc_emb0_subcmnd_rd_obj_hbd_cnt_WORD word96 +#define LPFC_EMB0_MAX_RD_OBJ_HBD_CNT 31 + struct lpfc_sli_config_hbd hbd[LPFC_EMB0_MAX_RD_OBJ_HBD_CNT]; + uint32_t word190; + uint32_t word191; }; struct lpfc_sli_config_emb1_subsys { diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index d4e46a08f94d..efeb61b15a5b 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -571,7 +571,7 @@ int lpfc_issue_reg_vfi(struct lpfc_vport *); int lpfc_issue_unreg_vfi(struct lpfc_vport *); int lpfc_selective_reset(struct lpfc_hba *); int lpfc_sli4_read_config(struct lpfc_hba *); -void lpfc_sli4_node_prep(struct lpfc_hba *); +void lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba); int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba); int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba); int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *sglist); @@ -660,6 +660,7 @@ void lpfc_wqe_cmd_template(void); void lpfc_nvmet_cmd_template(void); void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, uint32_t stat, uint32_t param); +void lpfc_nvmels_flush_cmd(struct lpfc_hba *phba); extern int lpfc_enable_nvmet_cnt; extern unsigned long long lpfc_enable_nvmet[]; extern int lpfc_no_hba_reset_cnt; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 8cc08e58dc05..12c67cdd7c19 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -291,9 +291,9 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp); if (ulp_status) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "6438 Unsol CT: status:x%x/x%x did : x%x\n", - ulp_status, ulp_word4, did); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "6438 Unsol CT: status:x%x/x%x did : x%x\n", + ulp_status, ulp_word4, did); return; } @@ -303,17 +303,17 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "6439 Unsol CT: NDLP Not Found for DID : x%x", - did); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "6439 Unsol CT: NDLP Not Found for DID : x%x", + did); return; } ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt; mi_cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "6442 : MI Cmd : x%x Not Supported\n", mi_cmd); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "6442 MI Cmd : x%x Not Supported\n", mi_cmd); lpfc_ct_reject_event(ndlp, ct_req, bf_get(wqe_ctxt_tag, &ctiocbq->wqe.xmit_els_rsp.wqe_com), @@ -735,7 +735,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0238 Process x%06x NameServer Rsp " - "Data: x%x x%x x%x x%lx x%x\n", Did, + "Data: x%lx x%x x%x x%lx x%x\n", Did, ndlp->nlp_flag, ndlp->nlp_fc4_type, ndlp->nlp_state, vport->fc_flag, vport->fc_rscn_id_cnt); @@ -744,7 +744,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) * state of ndlp hit devloss, change state to * allow rediscovery. */ - if (ndlp->nlp_flag & NLP_NPR_2B_DISC && + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && ndlp->nlp_state == NLP_STE_UNUSED_NODE) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -832,12 +832,10 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) if (ndlp->nlp_type != NLP_NVME_INITIATOR || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) continue; - spin_lock_irq(&ndlp->lock); if (ndlp->nlp_DID == Did) - ndlp->nlp_flag &= ~NLP_NVMET_RECOV; + clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); else - ndlp->nlp_flag |= NLP_NVMET_RECOV; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); } } } @@ -894,13 +892,11 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, */ if (vport->phba->nvmet_support) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { - if (!(ndlp->nlp_flag & NLP_NVMET_RECOV)) + if (!test_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag)) continue; lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NVMET_RECOV; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); } } @@ -1440,7 +1436,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (ndlp) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0242 Process x%x GFF " - "NameServer Rsp Data: x%x x%lx x%x\n", + "NameServer Rsp Data: x%lx x%lx x%x\n", did, ndlp->nlp_flag, vport->fc_flag, vport->fc_rscn_id_cnt); } else { @@ -1553,22 +1549,14 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE && ndlp->nlp_fc4_type) { ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; - /* This is a fabric topology so if discovery - * started with an unsolicited PLOGI, don't - * send a PRLI. Targets don't issue PLOGI or - * PRLI when acting as a target. Likely this is - * an initiator function. - */ - if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { - lpfc_nlp_set_state(vport, ndlp, - NLP_STE_PRLI_ISSUE); - lpfc_issue_els_prli(vport, ndlp, 0); - } + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_PRLI_ISSUE); + lpfc_issue_els_prli(vport, ndlp, 0); } else if (!ndlp->nlp_fc4_type) { /* If fc4 type is still unknown, then LOGO */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, - "6443 Sending LOGO ndlp x%px," + "6443 Sending LOGO ndlp x%px, " "DID x%06x with fc4_type: " "x%08x, state: %d\n", ndlp, did, ndlp->nlp_fc4_type, @@ -1580,8 +1568,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } } } else - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "3065 GFT_ID failed x%08x\n", ulp_status); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_DISCOVERY, + "3065 GFT_ID status x%08x\n", ulp_status); out: lpfc_ct_free_iocb(phba, cmdiocb); @@ -1655,6 +1643,16 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } out: + /* If the caller wanted a synchronous DA_ID completion, signal the + * wait obj and clear flag to reset the vport. + */ + if (test_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags)) { + if (ndlp->da_id_waitq) + wake_up(ndlp->da_id_waitq); + } + + clear_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags); + lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); return; @@ -2173,7 +2171,7 @@ lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask) struct lpfc_nodelist *ndlp; int i; - phba->hba_flag |= HBA_RHBA_CMPL; + set_bit(HBA_RHBA_CMPL, &phba->hba_flag); vports = lpfc_create_vport_work_array(phba); if (vports) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { @@ -2222,6 +2220,11 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ulp_status, ulp_word4, latt); if (latt || ulp_status) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, + "0229 FDMI cmd %04x failed, latt = %d " + "ulp_status: (x%x/x%x), sli_flag x%x\n", + be16_to_cpu(fdmi_cmd), latt, ulp_status, + ulp_word4, phba->sli.sli_flag); /* Look for a retryable error */ if (ulp_status == IOSTAT_LOCAL_REJECT) { @@ -2230,8 +2233,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, case IOERR_SLI_DOWN: /* Driver aborted this IO. No retry as error * is likely Offline->Online or some adapter - * error. Recovery will try again. + * error. Recovery will try again, but if port + * is not active there's no point to continue + * issuing follow up FDMI commands. */ + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { + free_ndlp = cmdiocb->ndlp; + lpfc_ct_free_iocb(phba, cmdiocb); + lpfc_nlp_put(free_ndlp); + return; + } break; case IOERR_ABORT_IN_PROGRESS: case IOERR_SEQUENCE_TIMEOUT: @@ -2252,12 +2263,6 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, break; } } - - lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0229 FDMI cmd %04x failed, latt = %d " - "ulp_status: x%x, rid x%x\n", - be16_to_cpu(fdmi_cmd), latt, ulp_status, - ulp_word4); } free_ndlp = cmdiocb->ndlp; @@ -2271,9 +2276,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for a CT LS_RJT response */ cmd = be16_to_cpu(fdmi_cmd); if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) { - /* FDMI rsp failed */ + /* Log FDMI reject */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS, - "0220 FDMI cmd failed FS_RJT Data: x%x", cmd); + "0220 FDMI cmd FS_RJT Data: x%x", cmd); /* Should we fallback to FDMI-2 / FDMI-1 ? */ switch (cmd) { @@ -2368,7 +2373,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * for the physical port completes successfully. * We may have to defer the RPRT accordingly. */ - if (phba->hba_flag & HBA_RHBA_CMPL) { + if (test_bit(HBA_RHBA_CMPL, &phba->hba_flag)) { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); } else { lpfc_printf_vlog(vport, KERN_INFO, @@ -2785,7 +2790,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr) u32 tcfg; u8 i, cnt; - if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { cnt = 0; if (phba->sli_rev == LPFC_SLI_REV4) { tcfg = phba->sli4_hba.conf_trunk; @@ -2859,7 +2864,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr) struct lpfc_hba *phba = vport->phba; u32 speeds = 0; - if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { switch (phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: speeds = HBA_PORTSPEED_1GFC; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a2d2b02b3418..3fd1aa5cc78c 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -870,8 +870,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) wwn_to_u64(ndlp->nlp_nodename.u.wwn)); len += scnprintf(buf+len, size-len, "RPI:x%04x ", ndlp->nlp_rpi); - len += scnprintf(buf+len, size-len, "flag:x%08x ", - ndlp->nlp_flag); + len += scnprintf(buf+len, size-len, "flag:x%08lx ", + ndlp->nlp_flag); if (!ndlp->nlp_type) len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE "); if (ndlp->nlp_type & NLP_FC_NODE) diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index f82615d87c4b..3d47dc7458d1 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -85,11 +85,13 @@ enum lpfc_fc4_xpt_flags { NLP_XPT_HAS_HH = 0x10 }; -enum lpfc_nlp_save_flags { +enum lpfc_nlp_save_flags { /* mask bits */ /* devloss occurred during recovery */ - NLP_IN_RECOV_POST_DEV_LOSS = 0x1, + NLP_IN_RECOV_POST_DEV_LOSS, /* wait for outstanding LOGO to cmpl */ - NLP_WAIT_FOR_LOGO = 0x2, + NLP_WAIT_FOR_LOGO, + /* wait for outstanding DA_ID to finish */ + NLP_WAIT_FOR_DA_ID }; struct lpfc_nodelist { @@ -100,7 +102,7 @@ struct lpfc_nodelist { spinlock_t lock; /* Node management lock */ - uint32_t nlp_flag; /* entry flags */ + unsigned long nlp_flag; /* entry flags */ uint32_t nlp_DID; /* FC D_ID of entry */ uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ uint16_t nlp_type; @@ -152,14 +154,19 @@ struct lpfc_nodelist { uint32_t fc4_prli_sent; /* flags to keep ndlp alive until special conditions are met */ - enum lpfc_nlp_save_flags save_flags; + unsigned long save_flags; enum lpfc_fc4_xpt_flags fc4_xpt_flags; uint32_t nvme_fb_size; /* NVME target's supported byte cnt */ #define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */ uint32_t nlp_defer_did; + + /* These wait objects are NPIV specific. These IOs must complete + * synchronously. + */ wait_queue_head_t *logo_waitq; + wait_queue_head_t *da_id_waitq; }; struct lpfc_node_rrq { @@ -175,37 +182,36 @@ struct lpfc_node_rrq { #define lpfc_ndlp_check_qdepth(phba, ndlp) \ (ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri) -/* Defines for nlp_flag (uint32) */ -#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ -#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ -#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */ -#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */ -#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ -#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ -#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ -#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ -#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ -#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ -#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */ -#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */ -#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */ -#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ -#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ -#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ -#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ -#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ -#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */ -#define NLP_IN_DEV_LOSS 0x00800000 /* devloss in progress */ -#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful +/* nlp_flag mask bits */ +enum lpfc_nlp_flag { + NLP_IGNR_REG_CMPL = 0, /* Rcvd rscn before we cmpl reg login */ + NLP_REG_LOGIN_SEND = 1, /* sent reglogin to adapter */ + NLP_SUPPRESS_RSP = 4, /* Remote NPort supports suppress rsp */ + NLP_PLOGI_SND = 5, /* sent PLOGI request for this entry */ + NLP_PRLI_SND = 6, /* sent PRLI request for this entry */ + NLP_ADISC_SND = 7, /* sent ADISC request for this entry */ + NLP_LOGO_SND = 8, /* sent LOGO request for this entry */ + NLP_RNID_SND = 10, /* sent RNID request for this entry */ + NLP_NVMET_RECOV = 12, /* NVMET auditing node for recovery. */ + NLP_UNREG_INP = 15, /* UNREG_RPI cmd is in progress */ + NLP_DROPPED = 16, /* Init ref count has been dropped */ + NLP_DELAY_TMO = 17, /* delay timeout is running for node */ + NLP_NPR_2B_DISC = 18, /* node is included in num_disc_nodes */ + NLP_RCV_PLOGI = 19, /* Rcv'ed PLOGI from remote system */ + NLP_LOGO_ACC = 20, /* Process LOGO after ACC completes */ + NLP_TGT_NO_SCSIID = 21, /* good PRLI but no binding for scsid */ + NLP_ISSUE_LOGO = 22, /* waiting to issue a LOGO */ + NLP_IN_DEV_LOSS = 23, /* devloss in progress */ + NLP_ACC_REGLOGIN = 24, /* Issue Reg Login after successful ACC */ -#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from + NLP_NPR_ADISC = 25, /* Issue ADISC when dq'ed from NPR list */ -#define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */ -#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ -#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ -#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ -#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ -#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ + NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */ + NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */ + NLP_SC_REQ = 29, /* Target requires authentication */ + NLP_FIRSTBURST = 30, /* Target supports FirstBurst */ + NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */ +}; /* There are 4 different double linked lists nodelist entries can reside on. * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index f7c28dc73bf6..375a879c31f1 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -189,11 +189,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, * If this command is for fabric controller and HBA running * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. */ - if ((did == Fabric_DID) && - (phba->hba_flag & HBA_FIP_SUPPORT) && - ((elscmd == ELS_CMD_FLOGI) || - (elscmd == ELS_CMD_FDISC) || - (elscmd == ELS_CMD_LOGO))) + if (did == Fabric_DID && + test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && + (elscmd == ELS_CMD_FLOGI || + elscmd == ELS_CMD_FDISC || + elscmd == ELS_CMD_LOGO)) switch (elscmd) { case ELS_CMD_FLOGI: elsiocb->cmd_flag |= @@ -725,11 +725,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { if ((np->nlp_state != NLP_STE_NPR_NODE) || - !(np->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &np->nlp_flag)) continue; - spin_lock_irq(&np->lock); - np->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&np->lock); + clear_bit(NLP_NPR_ADISC, &np->nlp_flag); lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); @@ -864,9 +862,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, sizeof(struct lpfc_name)); /* Set state will put ndlp onto node list if not already done */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) @@ -965,7 +961,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * In case of FIP mode, perform roundrobin FCF failover * due to new FCF discovery */ - if ((phba->hba_flag & HBA_FIP_SUPPORT) && + if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && (phba->fcf.fcf_flag & FCF_DISCOVERY)) { if (phba->link_state < LPFC_LINK_UP) goto stop_rr_fcf_flogi; @@ -979,7 +975,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, - "2611 FLOGI failed on FCF (x%x), " + "2611 FLOGI FCF (x%x), " "status:x%x/x%x, tmo:x%x, perform " "roundrobin FCF failover\n", phba->fcf.current_rec.fcf_indx, @@ -997,11 +993,11 @@ stop_rr_fcf_flogi: if (!(ulp_status == IOSTAT_LOCAL_REJECT && ((ulp_word4 & IOERR_PARAM_MASK) == IOERR_LOOP_OPEN_FAILURE))) - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "2858 FLOGI failure Status:x%x/x%x TMO" - ":x%x Data x%x x%x\n", - ulp_status, ulp_word4, tmo, - phba->hba_flag, phba->fcf.fcf_flag); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "2858 FLOGI Status:x%x/x%x TMO" + ":x%x Data x%lx x%x\n", + ulp_status, ulp_word4, tmo, + phba->hba_flag, phba->fcf.fcf_flag); /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { @@ -1018,12 +1014,12 @@ stop_rr_fcf_flogi: * registered with the SCSI transport, remove the initial * reference to trigger node release. */ - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) lpfc_nlp_put(ndlp); lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, - "0150 FLOGI failure Status:x%x/x%x " + "0150 FLOGI Status:x%x/x%x " "xri x%x TMO:x%x refcnt %d\n", ulp_status, ulp_word4, cmdiocb->sli4_xritag, tmo, kref_read(&ndlp->kref)); @@ -1032,11 +1028,11 @@ stop_rr_fcf_flogi: if (!(ulp_status == IOSTAT_LOCAL_REJECT && ((ulp_word4 & IOERR_PARAM_MASK) == IOERR_LOOP_OPEN_FAILURE))) { - /* FLOGI failure */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "0100 FLOGI failure Status:x%x/x%x " - "TMO:x%x\n", - ulp_status, ulp_word4, tmo); + /* Warn FLOGI status */ + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "0100 FLOGI Status:x%x/x%x " + "TMO:x%x\n", + ulp_status, ulp_word4, tmo); goto flogifail; } @@ -1099,8 +1095,10 @@ stop_rr_fcf_flogi: sp->cmn.priority_tagging, kref_read(&ndlp->kref)); /* reinitialize the VMID datastructure before returning */ - if (lpfc_is_vmid_enabled(phba)) + if (lpfc_is_vmid_enabled(phba)) { lpfc_reinit_vmid(vport); + vport->vmid_flag = 0; + } if (sp->cmn.priority_tagging) vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | LPFC_VMID_TYPE_PRIO); @@ -1119,7 +1117,7 @@ stop_rr_fcf_flogi: if (sp->cmn.fPort) rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, ulp_word4); - else if (!(phba->hba_flag & HBA_FCOE_MODE)) + else if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); else { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, @@ -1149,14 +1147,15 @@ stop_rr_fcf_flogi: lpfc_nlp_put(ndlp); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; - phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); + clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag); phba->fcf.fcf_redisc_attempted = 0; /* reset */ goto out; } if (!rc) { /* Mark the FCF discovery process done */ - if (phba->hba_flag & HBA_FIP_SUPPORT) + if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS, "2769 FLOGI to FCF (x%x) " @@ -1164,8 +1163,9 @@ stop_rr_fcf_flogi: phba->fcf.current_rec.fcf_indx); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; - phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); + clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag); phba->fcf.fcf_redisc_attempted = 0; /* reset */ goto out; } @@ -1202,7 +1202,7 @@ flogifail: } out: if (!flogi_in_retry) - phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; + clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); @@ -1232,9 +1232,9 @@ lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "6445 ELS completes after LINK_DOWN: " - " Status %x/%x cmd x%x flg x%x\n", + " Status %x/%x cmd x%x flg x%x iotag x%x\n", ulp_status, ulp_word4, cmd, - cmdiocb->cmd_flag); + cmdiocb->cmd_flag, cmdiocb->iotag); if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; @@ -1372,11 +1372,13 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } /* Avoid race with FLOGI completion and hba_flags. */ - phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); + set_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); + set_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); rc = lpfc_issue_fabric_iocb(phba, elsiocb); if (rc == IOCB_ERROR) { - phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); + clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); + clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; @@ -1386,7 +1388,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; /* Check for a deferred FLOGI ACC condition */ - if (phba->defer_flogi_acc_flag) { + if (phba->defer_flogi_acc.flag) { /* lookup ndlp for received FLOGI */ ndlp = lpfc_findnode_did(vport, 0); if (!ndlp) @@ -1400,34 +1402,38 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (phba->sli_rev == LPFC_SLI_REV4) { bf_set(wqe_ctxt_tag, &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, - phba->defer_flogi_acc_rx_id); + phba->defer_flogi_acc.rx_id); bf_set(wqe_rcvoxid, &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, - phba->defer_flogi_acc_ox_id); + phba->defer_flogi_acc.ox_id); } else { icmd = &defer_flogi_acc.iocb; - icmd->ulpContext = phba->defer_flogi_acc_rx_id; + icmd->ulpContext = phba->defer_flogi_acc.rx_id; icmd->unsli3.rcvsli3.ox_id = - phba->defer_flogi_acc_ox_id; + phba->defer_flogi_acc.ox_id; } lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3354 Xmit deferred FLOGI ACC: rx_id: x%x," - " ox_id: x%x, hba_flag x%x\n", - phba->defer_flogi_acc_rx_id, - phba->defer_flogi_acc_ox_id, phba->hba_flag); + " ox_id: x%x, hba_flag x%lx\n", + phba->defer_flogi_acc.rx_id, + phba->defer_flogi_acc.ox_id, phba->hba_flag); /* Send deferred FLOGI ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, ndlp, NULL); - phba->defer_flogi_acc_flag = false; - vport->fc_myDID = did; + phba->defer_flogi_acc.flag = false; - /* Decrement ndlp reference count to indicate the node can be - * released when other references are removed. + /* Decrement the held ndlp that was incremented when the + * deferred flogi acc flag was set. */ - lpfc_nlp_put(ndlp); + if (phba->defer_flogi_acc.ndlp) { + lpfc_nlp_put(phba->defer_flogi_acc.ndlp); + phba->defer_flogi_acc.ndlp = NULL; + } + + vport->fc_myDID = did; } return 0; @@ -1538,7 +1544,7 @@ lpfc_initial_flogi(struct lpfc_vport *vport) * Otherwise, decrement node reference to trigger release. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && - !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); return 0; } @@ -1587,7 +1593,7 @@ lpfc_initial_fdisc(struct lpfc_vport *vport) * Otherwise, decrement node reference to trigger release. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && - !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); return 0; } @@ -1665,9 +1671,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, struct lpfc_nodelist *new_ndlp; struct serv_parm *sp; uint8_t name[sizeof(struct lpfc_name)]; - uint32_t keepDID = 0, keep_nlp_flag = 0; + uint32_t keepDID = 0; int rc; - uint32_t keep_new_nlp_flag = 0; + unsigned long keep_nlp_flag = 0, keep_new_nlp_flag = 0; uint16_t keep_nlp_state; u32 keep_nlp_fc4_type = 0; struct lpfc_nvme_rport *keep_nrport = NULL; @@ -1694,8 +1700,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, } lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, - "3178 PLOGI confirm: ndlp x%x x%x x%x: " - "new_ndlp x%x x%x x%x\n", + "3178 PLOGI confirm: ndlp x%x x%lx x%x: " + "new_ndlp x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, (new_ndlp ? new_ndlp->nlp_DID : 0), (new_ndlp ? new_ndlp->nlp_flag : 0), @@ -1759,48 +1765,48 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, new_ndlp->nlp_flag = ndlp->nlp_flag; /* if new_ndlp had NLP_UNREG_INP set, keep it */ - if (keep_new_nlp_flag & NLP_UNREG_INP) - new_ndlp->nlp_flag |= NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &keep_new_nlp_flag)) + set_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag); /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ - if (keep_new_nlp_flag & NLP_RPI_REGISTERED) - new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; + if (test_bit(NLP_RPI_REGISTERED, &keep_new_nlp_flag)) + set_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + clear_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag); /* * Retain the DROPPED flag. This will take care of the init * refcount when affecting the state change */ - if (keep_new_nlp_flag & NLP_DROPPED) - new_ndlp->nlp_flag |= NLP_DROPPED; + if (test_bit(NLP_DROPPED, &keep_new_nlp_flag)) + set_bit(NLP_DROPPED, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &new_ndlp->nlp_flag); ndlp->nlp_flag = keep_new_nlp_flag; /* if ndlp had NLP_UNREG_INP set, keep it */ - if (keep_nlp_flag & NLP_UNREG_INP) - ndlp->nlp_flag |= NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &keep_nlp_flag)) + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); /* if ndlp had NLP_RPI_REGISTERED set, keep it */ - if (keep_nlp_flag & NLP_RPI_REGISTERED) - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + if (test_bit(NLP_RPI_REGISTERED, &keep_nlp_flag)) + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); /* * Retain the DROPPED flag. This will take care of the init * refcount when affecting the state change */ - if (keep_nlp_flag & NLP_DROPPED) - ndlp->nlp_flag |= NLP_DROPPED; + if (test_bit(NLP_DROPPED, &keep_nlp_flag)) + set_bit(NLP_DROPPED, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &ndlp->nlp_flag); spin_unlock_irq(&new_ndlp->lock); spin_unlock_irq(&ndlp->lock); @@ -1878,7 +1884,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, phba->active_rrq_pool); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, - "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", + "3173 PLOGI confirm exit: new_ndlp x%x x%lx x%x\n", new_ndlp->nlp_DID, new_ndlp->nlp_flag, new_ndlp->nlp_fc4_type); @@ -1954,16 +1960,16 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (ulp_status) { /* Check for retry */ - /* RRQ failed Don't print the vport to vport rjts */ + /* Warn RRQ status Don't print the vport to vport rjts */ if (ulp_status != IOSTAT_LS_RJT || (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || (phba)->pport->cfg_log_verbose & LOG_ELS) - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "2881 RRQ failure DID:%06X Status:" - "x%x/x%x\n", - ndlp->nlp_DID, ulp_status, - ulp_word4); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "2881 RRQ DID:%06X Status:" + "x%x/x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4); } lpfc_clr_rrq_active(phba, rrq->xritag, rrq); @@ -1999,7 +2005,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, IOCB_t *irsp; struct lpfc_nodelist *ndlp, *free_ndlp; struct lpfc_dmabuf *prsp; - int disc; + bool disc; struct serv_parm *sp = NULL; u32 ulp_status, ulp_word4, did, iotag; bool release_node = false; @@ -2034,10 +2040,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ - spin_lock_irq(&ndlp->lock); - disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* PLOGI completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, @@ -2050,9 +2053,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } @@ -2060,23 +2061,20 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ - if (disc) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); - } + if (disc) + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } - /* PLOGI failed Don't print the vport to vport rjts */ + /* Warn PLOGI status Don't print the vport to vport rjts */ if (ulp_status != IOSTAT_LS_RJT || (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || (phba)->pport->cfg_log_verbose & LOG_ELS) - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "2753 PLOGI failure DID:%06X " - "Status:x%x/x%x\n", - ndlp->nlp_DID, ulp_status, - ulp_word4); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "2753 PLOGI DID:%06X " + "Status:x%x/x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) @@ -2087,7 +2085,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * with the reglogin process. */ spin_lock_irq(&ndlp->lock); - if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && + if ((test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag) || + test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) && ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { spin_unlock_irq(&ndlp->lock); goto out; @@ -2098,8 +2097,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * start the device remove process. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2202,12 +2201,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) * outstanding UNREG_RPI mbox command completes, unless we * are going offline. This logic does not apply for Fabric DIDs */ - if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) && + if ((test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) || + test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) && ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && !test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4110 Issue PLOGI x%x deferred " - "on NPort x%x rpi x%x flg x%x Data:" + "on NPort x%x rpi x%x flg x%lx Data:" " x%px\n", ndlp->nlp_defer_did, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); @@ -2313,7 +2313,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_nodelist *ndlp; char *mode; - u32 loglevel; u32 ulp_status; u32 ulp_word4; bool release_node = false; @@ -2326,10 +2325,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_PRLI_SND; + clear_bit(NLP_PRLI_SND, &ndlp->nlp_flag); /* Driver supports multiple FC4 types. Counters matter. */ + spin_lock_irq(&ndlp->lock); vport->fc_prli_sent--; ndlp->fc4_prli_sent--; spin_unlock_irq(&ndlp->lock); @@ -2362,18 +2361,15 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * could be expected. */ if (test_bit(FC_FABRIC, &vport->fc_flag) || - vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) { - mode = KERN_ERR; - loglevel = LOG_TRACE_EVENT; - } else { + vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) + mode = KERN_WARNING; + else mode = KERN_INFO; - loglevel = LOG_ELS; - } - /* PRLI failed */ - lpfc_printf_vlog(vport, mode, loglevel, - "2754 PRLI failure DID:%06X Status:x%x/x%x, " - "data: x%x x%x x%x\n", + /* Warn PRLI status */ + lpfc_printf_vlog(vport, mode, LOG_ELS, + "2754 PRLI DID:%06X Status:x%x/x%x, " + "data: x%x x%x x%lx\n", ndlp->nlp_DID, ulp_status, ulp_word4, ndlp->nlp_state, ndlp->fc4_prli_sent, ndlp->nlp_flag); @@ -2390,10 +2386,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) || (ndlp->nlp_state == NLP_STE_NPR_NODE && - ndlp->nlp_flag & NLP_DELAY_TMO)) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag))) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "2784 PRLI cmpl: Allow Node recovery " - "DID x%06x nstate x%x nflag x%x\n", + "DID x%06x nstate x%x nflag x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag); goto out; @@ -2414,8 +2410,8 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_lock_irq(&ndlp->lock); if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && !ndlp->fc4_prli_sent) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2490,7 +2486,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); ndlp->nvme_fb_size = 0; send_next_prli: @@ -2621,8 +2618,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * the ndlp is used to track outstanding PRLIs for different * FC4 types. */ + set_bit(NLP_PRLI_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_PRLI_SND; vport->fc_prli_sent++; ndlp->fc4_prli_sent++; spin_unlock_irq(&ndlp->lock); @@ -2783,7 +2780,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_vport *vport = cmdiocb->vport; IOCB_t *irsp; struct lpfc_nodelist *ndlp; - int disc; + bool disc; u32 ulp_status, ulp_word4, tmo, iotag; bool release_node = false; @@ -2812,10 +2809,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ - spin_lock_irq(&ndlp->lock); - disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); - ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag); /* ADISC completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0104 ADISC completes to NPort x%x " @@ -2826,9 +2821,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } @@ -2837,18 +2830,16 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ if (disc) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_set_disctmo(vport); } goto out; } - /* ADISC failed */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "2755 ADISC failure DID:%06X Status:x%x/x%x\n", - ndlp->nlp_DID, ulp_status, - ulp_word4); + /* Warn ADISC status */ + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "2755 ADISC DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_ADISC); @@ -2858,8 +2849,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, */ spin_lock_irq(&ndlp->lock); if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2932,9 +2923,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->fc_stat.elsXmitADISC++; elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_ADISC_SND; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_ADISC_SND, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -2955,9 +2944,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_ADISC_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag); return 1; } @@ -2979,7 +2966,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp = cmdiocb->ndlp; struct lpfc_vport *vport = ndlp->vport; IOCB_t *irsp; - unsigned long flags; uint32_t skip_recovery = 0; int wake_up_waiter = 0; u32 ulp_status; @@ -3001,13 +2987,9 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, iotag = irsp->ulpIoTag; } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; - if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); + if (test_and_clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags)) wake_up_waiter = 1; - ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; - } - spin_unlock_irq(&ndlp->lock); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "LOGO cmpl: status:x%x/x%x did:x%x", @@ -3017,7 +2999,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0105 LOGO completes to NPort x%x " - "IoTag x%x refcnt %d nflags x%x xflags x%x " + "IoTag x%x refcnt %d nflags x%lx xflags x%x " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, iotag, kref_read(&ndlp->kref), ndlp->nlp_flag, @@ -3035,12 +3017,12 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * discovery. The PLOGI will retry. */ if (ulp_status) { - /* LOGO failed */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "2756 LOGO failure, No Retry DID:%06X " - "Status:x%x/x%x\n", - ndlp->nlp_DID, ulp_status, - ulp_word4); + /* Warn LOGO status */ + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "2756 LOGO, No Retry DID:%06X " + "Status:x%x/x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4); if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) skip_recovery = 1; @@ -3049,23 +3031,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Call state machine. This will unregister the rpi if needed. */ lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); - if (skip_recovery) - goto out; - - /* The driver sets this flag for an NPIV instance that doesn't want to - * log into the remote port. - */ - if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { - spin_lock_irq(&ndlp->lock); - if (phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_flag |= NLP_RELEASE_RPI; - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); - lpfc_disc_state_machine(vport, ndlp, cmdiocb, - NLP_EVT_DEVICE_RM); - goto out_rsrc_free; - } - out: /* At this point, the LOGO processing is complete. NOTE: For a * pt2pt topology, we are assuming the NPortID will only change @@ -3083,9 +3048,7 @@ out: if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && skip_recovery == 0) { lpfc_cancel_retry_delay_tmo(vport, ndlp); - spin_lock_irqsave(&ndlp->lock, flags); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irqrestore(&ndlp->lock, flags); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3187 LOGO completes to NPort x%x: Start " @@ -3107,13 +3070,11 @@ out: * register with the transport. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); } -out_rsrc_free: + /* Driver is done with the I/O. */ lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); @@ -3150,12 +3111,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint16_t cmdsize; int rc; - spin_lock_irq(&ndlp->lock); - if (ndlp->nlp_flag & NLP_LOGO_SND) { - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag)) return 0; - } - spin_unlock_irq(&ndlp->lock); cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, @@ -3174,10 +3131,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->fc_stat.elsXmitLOGO++; elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_SND; - ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_SND, &ndlp->nlp_flag); + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -3202,9 +3157,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); return 1; } @@ -3280,13 +3233,13 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, static int lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) { - int rc = 0; + int rc; struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ns_ndlp; LPFC_MBOXQ_t *mbox; - if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) - return rc; + if (test_bit(NLP_RPI_REGISTERED, &fc_ndlp->nlp_flag)) + return 0; ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); if (!ns_ndlp) @@ -3303,7 +3256,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0936 %s: no memory for reg_login " - "Data: x%x x%x x%x x%x\n", __func__, + "Data: x%x x%x x%lx x%x\n", __func__, fc_ndlp->nlp_DID, fc_ndlp->nlp_state, fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); return -ENOMEM; @@ -3315,7 +3268,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) goto out; } - fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &fc_ndlp->nlp_flag); mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); if (!mbox->ctx_ndlp) { @@ -3339,7 +3292,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0938 %s: failed to format reg_login " - "Data: x%x x%x x%x x%x\n", __func__, + "Data: x%x x%x x%lx x%x\n", __func__, fc_ndlp->nlp_DID, fc_ndlp->nlp_state, fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); return rc; @@ -4378,12 +4331,9 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) { struct lpfc_work_evt *evtp; - if (!(nlp->nlp_flag & NLP_DELAY_TMO)) + if (!test_and_clear_bit(NLP_DELAY_TMO, &nlp->nlp_flag)) return; - spin_lock_irq(&nlp->lock); - nlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&nlp->lock); - del_timer_sync(&nlp->nlp_delayfunc); + timer_delete_sync(&nlp->nlp_delayfunc); nlp->nlp_last_elscmd = 0; if (!list_empty(&nlp->els_retry_evt.evt_listp)) { list_del_init(&nlp->els_retry_evt.evt_listp); @@ -4391,10 +4341,7 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) evtp = &nlp->els_retry_evt; lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); } - if (nlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&nlp->lock); - nlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&nlp->lock); + if (test_and_clear_bit(NLP_NPR_2B_DISC, &nlp->nlp_flag)) { if (vport->num_disc_nodes) { if (vport->port_state < LPFC_VPORT_READY) { /* Check if there are more ADISCs to be sent */ @@ -4474,20 +4421,17 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) spin_lock_irq(&ndlp->lock); cmd = ndlp->nlp_last_elscmd; ndlp->nlp_last_elscmd = 0; + spin_unlock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { - spin_unlock_irq(&ndlp->lock); + if (!test_and_clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) return; - } - ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); /* * If a discovery event readded nlp_delayfunc after timer * firing and before processing the timer, cancel the * nlp_delayfunc. */ - del_timer_sync(&ndlp->nlp_delayfunc); + timer_delete_sync(&ndlp->nlp_delayfunc); retry = ndlp->nlp_retry; ndlp->nlp_retry = 0; @@ -4622,6 +4566,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, int link_reset = 0, rc; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); + u8 rsn_code_exp = 0; /* Note: cmd_dmabuf may be 0 for internal driver abort @@ -4827,22 +4772,32 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (cmd == ELS_CMD_FDISC) && (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ - lpfc_printf_vlog(vport, KERN_ERR, - LOG_TRACE_EVENT, - "0125 FDISC Failed (x%x). " - "Fabric out of resources\n", - stat.un.lsRjtError); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "0125 FDISC (x%x). " + "Fabric out of resources\n", + stat.un.lsRjtError); lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_RSCS); } break; case LSRJT_LOGICAL_BSY: + rsn_code_exp = stat.un.b.lsRjtRsnCodeExp; if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_PRLI) || (cmd == ELS_CMD_NVMEPRLI)) { delay = 1000; maxretry = 48; + + /* An authentication LS_RJT reason code + * explanation means some error in the + * security settings end-to-end. Reduce + * the retry count to allow lpfc to clear + * RSCN mode and not race with dev_loss. + */ + if (cmd == ELS_CMD_PLOGI && + rsn_code_exp == LSEXP_AUTH_REQ) + maxretry = 8; } else if (cmd == ELS_CMD_FDISC) { /* FDISC retry policy */ maxretry = 48; @@ -4867,11 +4822,24 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, LSEXP_NOTHING_MORE) { vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; retry = 1; - lpfc_printf_vlog(vport, KERN_ERR, - LOG_TRACE_EVENT, - "0820 FLOGI Failed (x%x). " - "BBCredit Not Supported\n", - stat.un.lsRjtError); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "0820 FLOGI (x%x). " + "BBCredit Not Supported\n", + stat.un.lsRjtError); + } else if (cmd == ELS_CMD_PLOGI) { + rsn_code_exp = stat.un.b.lsRjtRsnCodeExp; + + /* An authentication LS_RJT reason code + * explanation means some error in the + * security settings end-to-end. Reduce + * the retry count to allow lpfc to clear + * RSCN mode and not race with dev_loss. + */ + if (rsn_code_exp == LSEXP_AUTH_REQ) { + delay = 1000; + retry = 1; + maxretry = 8; + } } break; @@ -4881,11 +4849,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) ) { - lpfc_printf_vlog(vport, KERN_ERR, - LOG_TRACE_EVENT, - "0122 FDISC Failed (x%x). " - "Fabric Detected Bad WWN\n", - stat.un.lsRjtError); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "0122 FDISC (x%x). " + "Fabric Detected Bad WWN\n", + stat.un.lsRjtError); lpfc_vport_set_state(vport, FC_VPORT_FABRIC_REJ_WWN); } @@ -5007,9 +4974,7 @@ out_retry: /* delay is specified in milliseconds */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(delay)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_prev_state = ndlp->nlp_state; if ((cmd == ELS_CMD_PRLI) || @@ -5069,7 +5034,7 @@ out_retry: lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0108 No retry ELS command x%x to remote " "NPORT x%x Retried:%d Error:x%x/%x " - "IoTag x%x nflags x%x\n", + "IoTag x%x nflags x%lx\n", cmd, did, cmdiocb->retry, ulp_status, ulp_word4, cmdiocb->iotag, (ndlp ? ndlp->nlp_flag : 0)); @@ -5236,9 +5201,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ACC to LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0109 ACC to LOGO completes to NPort x%x refcnt %d " - "Data: x%x x%x x%x\n", - ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, - ndlp->nlp_state, ndlp->nlp_rpi); + "last els x%x Data: x%lx x%x x%x\n", + ndlp->nlp_DID, kref_read(&ndlp->kref), + ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi); /* This clause allows the LOGO ACC to complete and free resources * for the Fabric Domain Controller. It does deliberately skip @@ -5250,18 +5216,20 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; if (ndlp->nlp_state == NLP_STE_NPR_NODE) { - /* If PLOGI is being retried, PLOGI completion will cleanup the - * node. The NLP_NPR_2B_DISC flag needs to be retained to make - * progress on nodes discovered from last RSCN. - */ - if ((ndlp->nlp_flag & NLP_DELAY_TMO) && - (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) - goto out; - - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) lpfc_unreg_rpi(vport, ndlp); + /* If came from PRLO, then PRLO_ACC is done. + * Start rediscovery now. + */ + if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) { + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + } } + out: /* * The driver received a LOGO from the rport and has ACK'd it. @@ -5292,7 +5260,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (ndlp) { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "0006 rpi x%x DID:%x flg:%x %d x%px " + "0006 rpi x%x DID:%x flg:%lx %d x%px " "mbx_cmd x%x mbx_flag x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp, mbx_cmd, @@ -5303,11 +5271,9 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * first on an UNREG_LOGIN and then release the final * references. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); if (mbx_cmd == MBX_UNREG_LOGIN) - ndlp->nlp_flag &= ~NLP_UNREG_INP; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); lpfc_drop_node(ndlp->vport, ndlp); } @@ -5340,8 +5306,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, u32 ulp_status, ulp_word4, tmo, did, iotag; if (!vport) { - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "3177 ELS response failed\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, + "3177 null vport in ELS rsp\n"); goto out; } if (cmdiocb->context_un.mbox) @@ -5373,23 +5339,23 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ELS response tag <ulpIoTag> completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0110 ELS response tag x%x completes " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", + "Data: x%x x%x x%x x%x x%lx x%x x%x x%x %p %p\n", iotag, ulp_status, ulp_word4, tmo, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); if (mbox) { - if (ulp_status == 0 - && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { + if (ulp_status == 0 && + test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) { if (!lpfc_unreg_rpi(vport, ndlp) && !test_bit(FC_PT2PT, &vport->fc_flag)) { - if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || + if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0314 PLOGI recov " "DID x%x " - "Data: x%x x%x x%x\n", + "Data: x%x x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_rpi, @@ -5406,18 +5372,17 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out_free_mbox; mbox->vport = vport; - if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { + if (test_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag)) { mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; - } - else { + } else { mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); } - ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) != MBX_NOT_FINISHED) goto out; @@ -5426,12 +5391,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * set for this failed mailbox command. */ lpfc_nlp_put(ndlp); - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); /* ELS rsp: Cannot issue reg_login for <NPortid> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0138 ELS rsp: Cannot issue reg_login for x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } @@ -5440,32 +5405,20 @@ out_free_mbox: } out: if (ndlp && shost) { - spin_lock_irq(&ndlp->lock); if (mbox) - ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; - ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + clear_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag); } /* An SLI4 NPIV instance wants to drop the node at this point under - * these conditions and release the RPI. + * these conditions because it doesn't need the login. */ if (phba->sli_rev == LPFC_SLI_REV4 && vport && vport->port_type == LPFC_NPIV_PORT && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && - ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { - lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - spin_unlock_irq(&ndlp->lock); - } - lpfc_drop_node(vport, ndlp); - } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && - ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && - ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { + if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && + ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { /* Drop ndlp if there is no planned or outstanding * issued PRLI. * @@ -5532,9 +5485,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 1; } @@ -5562,7 +5513,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, pcmd += sizeof(uint32_t); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC: did:x%x flg:x%x", + "Issue ACC: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_FLOGI: @@ -5641,7 +5592,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", + "Issue ACC FLOGI/PLOGI: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_PRLO: @@ -5679,7 +5630,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC PRLO: did:x%x flg:x%x", + "Issue ACC PRLO: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_RDF: @@ -5724,12 +5675,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, default: return 1; } - if (ndlp->nlp_flag & NLP_LOGO_ACC) { - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || - ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) - ndlp->nlp_flag &= ~NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) { + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) && + !test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; } else { elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; @@ -5752,7 +5701,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, /* Xmit ELS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " - "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x " "RPI: x%x, fc_flag x%lx refcnt %d\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -5827,13 +5776,13 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, /* Xmit ELS RJT <err> response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0129 Xmit ELS RJT x%x response tag x%x " - "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, " "rpi x%x\n", rejectError, elsiocb->iotag, get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue LS_RJT: did:x%x flg:x%x err:x%x", + "Issue LS_RJT: did:x%x flg:x%lx err:x%x", ndlp->nlp_DID, ndlp->nlp_flag, rejectError); phba->fc_stat.elsXmitLSRJT++; @@ -5844,18 +5793,6 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, return 1; } - /* The NPIV instance is rejecting this unsolicited ELS. Make sure the - * node's assigned RPI gets released provided this node is not already - * registered with the transport. - */ - if (phba->sli_rev == LPFC_SLI_REV4 && - vport->port_type == LPFC_NPIV_PORT && - !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_RELEASE_RPI; - spin_unlock_irq(&ndlp->lock); - } - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); @@ -5936,7 +5873,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lpfc_format_edc_lft_desc(phba, tlv); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue EDC ACC: did:x%x flg:x%x refcnt %d", + "Issue EDC ACC: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; @@ -5958,7 +5895,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Xmit ELS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " - "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x " "RPI: x%x, fc_flag x%lx\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -6027,7 +5964,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, /* Xmit ADISC ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0130 Xmit ADISC ACC response iotag x%x xri: " - "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", + "x%x, did x%x, nlp_flag x%lx, nlp_state x%x rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -6043,7 +5980,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ap->DID = be32_to_cpu(vport->fc_myDID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", + "Issue ACC ADISC: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6149,7 +6086,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, /* Xmit PRLI ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0131 Xmit PRLI ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -6220,7 +6157,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6015 NVME issue PRLI ACC word1 x%08x " - "word4 x%08x word5 x%08x flag x%x, " + "word4 x%08x word5 x%08x flag x%lx, " "fcp_info x%x nlp_type x%x\n", npr_nvme->word1, npr_nvme->word4, npr_nvme->word5, ndlp->nlp_flag, @@ -6235,7 +6172,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ndlp->nlp_DID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC PRLI: did:x%x flg:x%x", + "Issue ACC PRLI: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6349,7 +6286,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC RNID: did:x%x flg:x%x refcnt %d", + "Issue ACC RNID: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6406,7 +6343,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport, get_job_ulpcontext(phba, iocb)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", + "Clear RRQ: did:x%x flg:x%lx exchg:x%.08x", ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) xri = bf_get(rrq_oxid, rrq); @@ -6483,7 +6420,7 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", + "Issue ACC ECHO: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6533,14 +6470,12 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport) list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state != NLP_STE_NPR_NODE || - !(ndlp->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) continue; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); - if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* This node was marked for ADISC but was not picked * for discovery. This is possible if the node was * missing in gidft response. @@ -6598,9 +6533,9 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) /* go thru NPR nodes and issue any remaining ELS PLOGIs */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_NPR_NODE && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && - (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && - (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { + test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && + !test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) && + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -7096,7 +7031,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2171 Xmit RDP response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -7298,13 +7233,13 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); } mbox->vport = phba->pport; - - rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO); if (rc == MBX_NOT_FINISHED) { rc = 1; goto error; } - + if (rc == MBX_TIMEOUT) + goto error; if (phba->sli_rev == LPFC_SLI_REV4) mp = mbox->ctx_buf; else @@ -7357,7 +7292,10 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); } - rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO); + + if (rc == MBX_TIMEOUT) + goto error; if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { rc = 1; goto error; @@ -7368,8 +7306,10 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, DMP_SFF_PAGE_A2_SIZE); error: - mbox->ctx_buf = mpsave; - lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + if (mbox->mbox_flag & LPFC_MBX_WAKE) { + mbox->ctx_buf = mpsave; + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + } return rc; @@ -7415,7 +7355,8 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, goto error; } - if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { + if (phba->sli_rev < LPFC_SLI_REV4 || + test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { rjt_err = LSRJT_UNABLE_TPC; rjt_expl = LSEXP_REQ_UNSUPPORTED; goto error; @@ -7738,7 +7679,7 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, } if (phba->sli_rev < LPFC_SLI_REV4 || - phba->hba_flag & HBA_FCOE_MODE || + test_bit(HBA_FCOE_MODE, &phba->hba_flag) || (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2)) { rjt_err = LSRJT_CMD_UNSUPPORTED; @@ -8064,7 +8005,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, */ if (vport->port_state <= LPFC_NS_QRY) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", + "RCV RSCN ignore: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); @@ -8094,7 +8035,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, vport->fc_flag, payload_len, *lp, vport->fc_rscn_id_cnt); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", + "RCV RSCN vport: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); @@ -8104,8 +8045,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if (test_bit(FC_DISC_TMO, &vport->fc_flag)) { tmo = ((phba->fc_ratov * 3) + 3); mod_timer(&vport->fc_disctmo, - jiffies + - msecs_to_jiffies(1000 * tmo)); + jiffies + secs_to_jiffies(tmo)); } return 0; } @@ -8131,7 +8071,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if (test_bit(FC_RSCN_MODE, &vport->fc_flag) || test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", + "RCV RSCN defer: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); set_bit(FC_RSCN_DEFERRED, &vport->fc_flag); @@ -8140,7 +8080,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if (test_bit(FC_DISC_TMO, &vport->fc_flag)) { tmo = ((phba->fc_ratov * 3) + 3); mod_timer(&vport->fc_disctmo, - jiffies + msecs_to_jiffies(1000 * tmo)); + jiffies + secs_to_jiffies(tmo)); } if ((rscn_cnt < FC_MAX_HOLD_RSCN) && !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) { @@ -8187,7 +8127,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, return 0; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN: did:x%x/ste:x%x flg:x%x", + "RCV RSCN: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); set_bit(FC_RSCN_MODE, &vport->fc_flag); @@ -8443,22 +8383,28 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); /* Defer ACC response until AFTER we issue a FLOGI */ - if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { - phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, + if (!test_bit(HBA_FLOGI_ISSUED, &phba->hba_flag)) { + phba->defer_flogi_acc.rx_id = bf_get(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com); - phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, + phba->defer_flogi_acc.ox_id = bf_get(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com); vport->fc_myDID = did; lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3344 Deferring FLOGI ACC: rx_id: x%x," - " ox_id: x%x, hba_flag x%x\n", - phba->defer_flogi_acc_rx_id, - phba->defer_flogi_acc_ox_id, phba->hba_flag); + " ox_id: x%x, hba_flag x%lx\n", + phba->defer_flogi_acc.rx_id, + phba->defer_flogi_acc.ox_id, phba->hba_flag); - phba->defer_flogi_acc_flag = true; + phba->defer_flogi_acc.flag = true; + /* This nlp_get is paired with nlp_puts that reset the + * defer_flogi_acc.flag back to false. We need to retain + * a kref on the ndlp until the deferred FLOGI ACC is + * processed or cancelled. + */ + phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp); return 0; } @@ -8687,7 +8633,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Xmit ELS RLS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -8849,7 +8795,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Xmit ELS RLS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x, " "Data: x%x x%x x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -9046,7 +8992,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, /* Xmit ELS RPL ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0120 Xmit ELS RPL ACC response tag x%x " - "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, " "rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -9564,7 +9510,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) if (!list_empty(&pring->txcmplq)) if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) mod_timer(&vport->els_tmofunc, - jiffies + msecs_to_jiffies(1000 * timeout)); + jiffies + secs_to_jiffies(timeout)); } /** @@ -9622,20 +9568,29 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags); /* First we need to issue aborts to outstanding cmds on txcmpl */ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { - if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err) - continue; - if (piocb->vport != vport) continue; - if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err) - continue; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2243 iotag = 0x%x cmd_flag = 0x%x " + "ulp_command = 0x%x sli_flag = 0x%x\n", + piocb->iotag, piocb->cmd_flag, + get_job_cmnd(phba, piocb), + phba->sli.sli_flag); + + if ((phba->sli.sli_flag & LPFC_SLI_ACTIVE) && !mbx_tmo_err) { + if (piocb->cmd_flag & LPFC_IO_LIBDFC) + continue; + if (piocb->cmd_flag & LPFC_DRIVER_ABORTED) + continue; + } - /* On the ELS ring we can have ELS_REQUESTs or - * GEN_REQUESTs waiting for a response. + /* On the ELS ring we can have ELS_REQUESTs, ELS_RSPs, + * or GEN_REQUESTs waiting for a CQE response. */ ulp_command = get_job_cmnd(phba, piocb); - if (ulp_command == CMD_ELS_REQUEST64_CR) { + if (ulp_command == CMD_ELS_REQUEST64_WQE || + ulp_command == CMD_XMIT_ELS_RSP64_WQE) { list_add_tail(&piocb->dlist, &abort_list); /* If the link is down when flushing ELS commands @@ -9660,7 +9615,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { spin_lock_irqsave(&phba->hbalock, iflags); list_del_init(&piocb->dlist); - if (mbx_tmo_err) + if (mbx_tmo_err || !(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) list_move_tail(&piocb->list, &cancel_list); else lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); @@ -10390,14 +10345,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * Do not process any unsolicited ELS commands * if the ndlp is in DEV_LOSS */ - spin_lock_irq(&ndlp->lock); - if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) { if (newnode) lpfc_nlp_put(ndlp); goto dropit; } - spin_unlock_irq(&ndlp->lock); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) @@ -10426,7 +10378,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, switch (cmd) { case ELS_CMD_PLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PLOGI: did:x%x/ste:x%x flg:x%x", + "RCV PLOGI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPLOGI++; @@ -10465,17 +10417,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; - spin_unlock_irq(&ndlp->lock); - lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); break; case ELS_CMD_FLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FLOGI: did:x%x/ste:x%x flg:x%x", + "RCV FLOGI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFLOGI++; @@ -10494,7 +10442,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_els_rcv_flogi(vport, elsiocb, ndlp); /* retain node if our response is deferred */ - if (phba->defer_flogi_acc_flag) + if (phba->defer_flogi_acc.flag) break; if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, @@ -10502,7 +10450,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_LOGO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV LOGO: did:x%x/ste:x%x flg:x%x", + "RCV LOGO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvLOGO++; @@ -10519,7 +10467,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_PRLO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PRLO: did:x%x/ste:x%x flg:x%x", + "RCV PRLO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLO++; @@ -10548,7 +10496,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_ADISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV ADISC: did:x%x/ste:x%x flg:x%x", + "RCV ADISC: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); lpfc_send_els_event(vport, ndlp, payload); @@ -10563,7 +10511,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_PDISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PDISC: did:x%x/ste:x%x flg:x%x", + "RCV PDISC: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPDISC++; @@ -10577,7 +10525,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FARPR: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FARPR: did:x%x/ste:x%x flg:x%x", + "RCV FARPR: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFARPR++; @@ -10585,7 +10533,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FARP: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FARP: did:x%x/ste:x%x flg:x%x", + "RCV FARP: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFARP++; @@ -10593,7 +10541,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FAN: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FAN: did:x%x/ste:x%x flg:x%x", + "RCV FAN: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFAN++; @@ -10602,7 +10550,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, case ELS_CMD_PRLI: case ELS_CMD_NVMEPRLI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PRLI: did:x%x/ste:x%x flg:x%x", + "RCV PRLI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLI++; @@ -10616,7 +10564,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_LIRR: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV LIRR: did:x%x/ste:x%x flg:x%x", + "RCV LIRR: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvLIRR++; @@ -10627,7 +10575,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RLS: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RLS: did:x%x/ste:x%x flg:x%x", + "RCV RLS: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRLS++; @@ -10638,7 +10586,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RPL: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RPL: did:x%x/ste:x%x flg:x%x", + "RCV RPL: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRPL++; @@ -10649,7 +10597,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RNID: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RNID: did:x%x/ste:x%x flg:x%x", + "RCV RNID: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRNID++; @@ -10660,7 +10608,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RTV: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RTV: did:x%x/ste:x%x flg:x%x", + "RCV RTV: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRTV++; lpfc_els_rcv_rtv(vport, elsiocb, ndlp); @@ -10670,7 +10618,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RRQ: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RRQ: did:x%x/ste:x%x flg:x%x", + "RCV RRQ: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRRQ++; @@ -10681,7 +10629,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_ECHO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV ECHO: did:x%x/ste:x%x flg:x%x", + "RCV ECHO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvECHO++; @@ -10697,7 +10645,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FPIN: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FPIN: did:x%x/ste:x%x flg:x%x", + "RCV FPIN: did:x%x/ste:x%x " + "flg:x%lx", did, vport->port_state, ndlp->nlp_flag); lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, @@ -10732,7 +10681,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_exp = LSEXP_NOTHING_MORE; /* Unknown ELS command <elsCmd> received from NPORT <did> */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0115 Unknown ELS command x%x " "received from NPORT x%x\n", cmd, did); if (newnode) @@ -10947,7 +10896,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) "3334 Delay fc port discovery for %d secs\n", phba->fc_ratov); mod_timer(&vport->delayed_disc_tmo, - jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); + jiffies + secs_to_jiffies(phba->fc_ratov)); return; } @@ -11204,10 +11153,8 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) if (!ndlp) return; - mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1)); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; phba->pport->port_state = LPFC_FLOGI; return; @@ -11300,10 +11247,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) goto out; - /* FDISC failed */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "0126 FDISC failed. (x%x/x%x)\n", - ulp_status, ulp_word4); + /* Warn FDISC status */ + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "0126 FDISC cmpl status: x%x/x%x)\n", + ulp_status, ulp_word4); goto fdisc_failed; } @@ -11338,11 +11285,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { if ((np->nlp_state != NLP_STE_NPR_NODE) || - !(np->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &np->nlp_flag)) continue; - spin_lock_irq(&ndlp->lock); - np->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &np->nlp_flag); lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); @@ -11545,7 +11490,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* NPIV LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2928 NPIV LOGO completes to NPort x%x " - "Data: x%x x%x x%x x%x x%x x%x x%x\n", + "Data: x%x x%x x%x x%x x%x x%lx x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4, tmo, vport->num_disc_nodes, kref_read(&ndlp->kref), ndlp->nlp_flag, @@ -11557,14 +11502,13 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_can_disctmo(vport); } - if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { + if (test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags)) { /* Wake up lpfc_vport_delete if waiting...*/ if (ndlp->logo_waitq) wake_up(ndlp->logo_waitq); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); - ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); + clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags); } /* Safe to release resources now. */ @@ -11612,13 +11556,11 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, - "Issue LOGO npiv did:x%x flg:x%x", + "Issue LOGO npiv did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_SND, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -11634,9 +11576,7 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); return 1; } @@ -12117,7 +12057,7 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3094 Start rport recovery on shost id 0x%x " "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " - "flags 0x%x\n", + "flag 0x%lx\n", shost->host_no, ndlp->nlp_DID, vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag); @@ -12127,8 +12067,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, */ spin_lock_irqsave(&ndlp->lock, flags); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag |= NLP_ISSUE_LOGO; spin_unlock_irqrestore(&ndlp->lock, flags); + set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index e42fa9c822b5..3d15a964f5c9 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -100,6 +100,12 @@ lpfc_rport_invalid(struct fc_rport *rport) return -EINVAL; } + if (rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) { + pr_info("**** %s: devloss_callbk_done rport x%px SID x%x\n", + __func__, rport, rport->scsi_target_id); + return -EINVAL; + } + rdata = rport->dd_data; if (!rdata) { pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n", @@ -137,7 +143,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport) ndlp = rdata->pnode; vport = ndlp->vport; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport terminate: sid:x%x did:x%x flg:x%x", + "rport terminate: sid:x%x did:x%x flg:x%lx", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); if (ndlp->nlp_sid != NLP_NO_SID) @@ -155,6 +161,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) struct lpfc_hba *phba; struct lpfc_work_evt *evtp; unsigned long iflags; + bool drop_initial_node_ref = false; ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; if (!ndlp) @@ -164,56 +171,76 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) phba = vport->phba; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport devlosscb: sid:x%x did:x%x flg:x%x", + "rport devlosscb: sid:x%x did:x%x flg:x%lx", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3181 dev_loss_callbk x%06x, rport x%px flg x%x " + "3181 dev_loss_callbk x%06x, rport x%px flg x%lx " "load_flag x%lx refcnt %u state %d xpt x%x\n", ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, vport->load_flag, kref_read(&ndlp->kref), ndlp->nlp_state, ndlp->fc4_xpt_flags); /* Don't schedule a worker thread event if the vport is going down. */ - if (test_bit(FC_UNLOADING, &vport->load_flag)) { + if (test_bit(FC_UNLOADING, &vport->load_flag) || + !test_bit(HBA_SETUP, &phba->hba_flag)) { + spin_lock_irqsave(&ndlp->lock, iflags); ndlp->rport = NULL; + /* Only 1 thread can drop the initial node reference. + * If not registered for NVME and NLP_DROPPED flag is + * clear, remove the initial reference. + */ + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) + drop_initial_node_ref = true; + /* The scsi_transport is done with the rport so lpfc cannot - * call to unregister. Remove the scsi transport reference - * and clean up the SCSI transport node details. + * call to unregister. */ - if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) { + if (ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; - /* NVME transport-registered rports need the - * NLP_XPT_REGD flag to complete an unregister. + /* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node, + * unregister calls were made to the scsi and nvme + * transports and refcnt was already decremented. Clear + * the NLP_XPT_REGD flag only if the NVME nrport is + * confirmed unregistered. */ - if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) - ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; - spin_unlock_irqrestore(&ndlp->lock, iflags); - lpfc_nlp_put(ndlp); - spin_lock_irqsave(&ndlp->lock, iflags); - } + if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; + spin_unlock_irqrestore(&ndlp->lock, iflags); - /* Only 1 thread can drop the initial node reference. If - * another thread has set NLP_DROPPED, this thread is done. - */ - if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) && - !(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; + /* Release scsi transport reference */ + lpfc_nlp_put(ndlp); + } else { + spin_unlock_irqrestore(&ndlp->lock, iflags); + } + } else { spin_unlock_irqrestore(&ndlp->lock, iflags); - lpfc_nlp_put(ndlp); - return; } - spin_unlock_irqrestore(&ndlp->lock, iflags); + if (drop_initial_node_ref) + lpfc_nlp_put(ndlp); return; } if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) return; + /* Ignore callback for a mismatched (stale) rport */ + if (ndlp->rport != rport) { + lpfc_vlog_msg(vport, KERN_WARNING, LOG_NODE, + "6788 fc rport mismatch: d_id x%06x ndlp x%px " + "fc rport x%px node rport x%px state x%x " + "refcnt %u\n", + ndlp->nlp_DID, ndlp, rport, ndlp->rport, + ndlp->nlp_state, kref_read(&ndlp->kref)); + return; + } + if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6789 rport name %llx != node port name %llx", @@ -229,14 +256,14 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) return; } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_IN_DEV_LOSS; + set_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); + spin_lock_irqsave(&ndlp->lock, iflags); /* If there is a PLOGI in progress, and we are in a * NLP_NPR_2B_DISC state, don't turn off the flag. */ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* * The backend does not expect any more calls associated with this @@ -265,15 +292,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) } else { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "3188 worker thread is stopped %s x%06x, " - " rport x%px flg x%x load_flag x%lx refcnt " + " rport x%px flg x%lx load_flag x%lx refcnt " "%d\n", __func__, ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, vport->load_flag, kref_read(&ndlp->kref)); if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { - spin_lock_irqsave(&ndlp->lock, iflags); /* Node is in dev loss. No further transaction. */ - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } @@ -397,22 +422,15 @@ void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { - unsigned long iflags; - - spin_lock_irqsave(&ndlp->lock, iflags); - if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) { - ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) { lpfc_nlp_get(ndlp); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8438 Devloss timeout reversed on DID x%x " - "refcnt %d ndlp %p flag x%x " + "refcnt %d ndlp %p flag x%lx " "port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); - return; } - spin_unlock_irqrestore(&ndlp->lock, iflags); } /** @@ -449,7 +467,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n", + "3182 %s x%06x, nflag x%lx xflags x%x refcnt %d\n", __func__, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); @@ -463,9 +481,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); return fcf_inuse; } @@ -487,12 +503,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) recovering = true; } else { /* Physical port path. */ - if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) + if (test_bit(HBA_FLOGI_OUTSTANDING, + &phba->hba_flag)) recovering = true; } break; case Fabric_Cntl_DID: - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) recovering = true; break; case FDMI_DID: @@ -520,18 +537,18 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) * the following lpfc_nlp_put is necessary after fabric node is * recovered. */ + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); if (recovering) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8436 Devloss timeout marked on " "DID x%x refcnt %d ndlp %p " - "flag x%x port_state = x%x\n", + "flag x%lx port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + set_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags); + return fcf_inuse; } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { /* Fabric node fully recovered before this dev_loss_tmo * queue work is processed. Thus, ignore the @@ -541,16 +558,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) LOG_DISCOVERY | LOG_NODE, "8437 Devloss timeout ignored on " "DID x%x refcnt %d ndlp %p " - "flag x%x port_state = x%x\n", + "flag x%lx port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); return fcf_inuse; } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_nlp_put(ndlp); return fcf_inuse; } @@ -564,7 +578,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0203 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "NPort x%06x Data: x%x x%x x%x refcnt %d\n", + "NPort x%06x Data: x%lx x%x x%x refcnt %d\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, @@ -574,15 +588,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, "0204 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "NPort x%06x Data: x%x x%x x%x\n", + "NPort x%06x Data: x%lx x%x x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); /* If we are devloss, but we are in the process of rediscovering the * ndlp, don't issue a NLP_EVT_DEVICE_RM event. @@ -652,14 +664,15 @@ lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, if (!fcf_inuse) return; - if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { + if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && + !lpfc_fcf_inuse(phba)) { spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { - if (phba->hba_flag & HBA_DEVLOSS_TMO) { + if (test_and_set_bit(HBA_DEVLOSS_TMO, + &phba->hba_flag)) { spin_unlock_irq(&phba->hbalock); return; } - phba->hba_flag |= HBA_DEVLOSS_TMO; lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2847 Last remote node (x%x) using " "FCF devloss tmo\n", nlp_did); @@ -671,8 +684,9 @@ lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, "in progress\n"); return; } - if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->hbalock); + if (!test_bit(FCF_TS_INPROG, &phba->hba_flag) && + !test_bit(FCF_RR_INPROG, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2869 Devloss tmo to idle FIP engine, " "unreg in-use FCF and rescan.\n"); @@ -680,11 +694,10 @@ lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, lpfc_unregister_fcf_rescan(phba); return; } - spin_unlock_irq(&phba->hbalock); - if (phba->hba_flag & FCF_TS_INPROG) + if (test_bit(FCF_TS_INPROG, &phba->hba_flag)) lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2870 FCF table scan in progress\n"); - if (phba->hba_flag & FCF_RR_INPROG) + if (test_bit(FCF_RR_INPROG, &phba->hba_flag)) lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2871 FLOGI roundrobin FCF failover " "in progress\n"); @@ -978,18 +991,15 @@ lpfc_work_done(struct lpfc_hba *phba) /* Process SLI4 events */ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { - if (phba->hba_flag & HBA_RRQ_ACTIVE) + if (test_bit(HBA_RRQ_ACTIVE, &phba->hba_flag)) lpfc_handle_rrq_active(phba); - if (phba->hba_flag & ELS_XRI_ABORT_EVENT) + if (test_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag)) lpfc_sli4_els_xri_abort_event_proc(phba); - if (phba->hba_flag & ASYNC_EVENT) + if (test_bit(ASYNC_EVENT, &phba->hba_flag)) lpfc_sli4_async_event_proc(phba); - if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; - spin_unlock_irq(&phba->hbalock); + if (test_and_clear_bit(HBA_POST_RECEIVE_BUFFER, + &phba->hba_flag)) lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); - } if (phba->fcf.fcf_flag & FCF_REDISC_EVT) lpfc_sli4_fcf_redisc_event_proc(phba); } @@ -1035,11 +1045,11 @@ lpfc_work_done(struct lpfc_hba *phba) status >>= (4*LPFC_ELS_RING); if (pring && (status & HA_RXMASK || pring->flag & LPFC_DEFERRED_RING_EVENT || - phba->hba_flag & HBA_SP_QUEUE_EVT)) { + test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag))) { if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Preserve legacy behavior. */ - if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) + if (!test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag)) set_bit(LPFC_DATA_READY, &phba->data_flags); } else { /* Driver could have abort request completed in queue @@ -1220,7 +1230,7 @@ lpfc_linkdown_port(struct lpfc_vport *vport) /* Stop delayed Nport discovery */ clear_bit(FC_DISC_DELAYED, &vport->fc_flag); - del_timer_sync(&vport->delayed_disc_tmo); + timer_delete_sync(&vport->delayed_disc_tmo); if (phba->sli_rev == LPFC_SLI_REV4 && vport->port_type == LPFC_PHYSICAL_PORT && @@ -1247,7 +1257,14 @@ lpfc_linkdown(struct lpfc_hba *phba) lpfc_scsi_dev_block(phba); offline = pci_channel_offline(phba->pcidev); - phba->defer_flogi_acc_flag = false; + /* Decrement the held ndlp if there is a deferred flogi acc */ + if (phba->defer_flogi_acc.flag) { + if (phba->defer_flogi_acc.ndlp) { + lpfc_nlp_put(phba->defer_flogi_acc.ndlp); + phba->defer_flogi_acc.ndlp = NULL; + } + } + phba->defer_flogi_acc.flag = false; /* Clear external loopback plug detected flag */ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; @@ -1342,7 +1359,7 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) if (ndlp->nlp_DID != Fabric_DID) lpfc_unreg_rpi(vport, ndlp); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { /* Fail outstanding IO now since device is * marked for PLOGI. */ @@ -1369,7 +1386,7 @@ lpfc_linkup_port(struct lpfc_vport *vport) (vport != phba->pport)) return; - if (phba->defer_flogi_acc_flag) { + if (phba->defer_flogi_acc.flag) { clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag); clear_bit(FC_RSCN_MODE, &vport->fc_flag); clear_bit(FC_NLP_MORE, &vport->fc_flag); @@ -1403,7 +1420,7 @@ lpfc_linkup(struct lpfc_hba *phba) /* Unblock fabric iocbs if they are blocked */ clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); - del_timer_sync(&phba->fabric_block_timer); + timer_delete_sync(&phba->fabric_block_timer); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) @@ -1420,7 +1437,8 @@ lpfc_linkup(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); /* reinitialize initial HBA flag */ - phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL); + clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); + clear_bit(HBA_RHBA_CMPL, &phba->hba_flag); return 0; } @@ -1505,7 +1523,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* don't perform discovery for SLI4 loopback diagnostic test */ if ((phba->sli_rev == LPFC_SLI_REV4) && - !(phba->hba_flag & HBA_FCOE_MODE) && + !test_bit(HBA_FCOE_MODE, &phba->hba_flag) && (phba->link_flag & LS_LOOPBACK_MODE)) return; @@ -1548,7 +1566,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) goto sparam_out; } - phba->hba_flag |= HBA_DEFER_FLOGI; + set_bit(HBA_DEFER_FLOGI, &phba->hba_flag); } else { lpfc_initial_flogi(vport); } @@ -1617,27 +1635,23 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_unlock_irq(&phba->hbalock); /* If there is a pending FCoE event, restart FCF table scan. */ - if ((!(phba->hba_flag & FCF_RR_INPROG)) && - lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) + if (!test_bit(FCF_RR_INPROG, &phba->hba_flag) && + lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) goto fail_out; /* Mark successful completion of FCF table scan */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); - phba->hba_flag &= ~FCF_TS_INPROG; + spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); if (vport->port_state != LPFC_FLOGI) { - phba->hba_flag |= FCF_RR_INPROG; - spin_unlock_irq(&phba->hbalock); + set_bit(FCF_RR_INPROG, &phba->hba_flag); lpfc_issue_init_vfi(vport); - goto out; } - spin_unlock_irq(&phba->hbalock); goto out; fail_out: - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCF_RR_INPROG; - spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); out: mempool_free(mboxq, phba->mbox_mem_pool); } @@ -1867,32 +1881,31 @@ lpfc_register_fcf(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); /* If the FCF is not available do nothing. */ if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { - phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); return; } /* The FCF is already registered, start discovery */ if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); - phba->hba_flag &= ~FCF_TS_INPROG; + spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); if (phba->pport->port_state != LPFC_FLOGI && test_bit(FC_FABRIC, &phba->pport->fc_flag)) { - phba->hba_flag |= FCF_RR_INPROG; - spin_unlock_irq(&phba->hbalock); + set_bit(FCF_RR_INPROG, &phba->hba_flag); lpfc_initial_flogi(phba->pport); return; } - spin_unlock_irq(&phba->hbalock); return; } spin_unlock_irq(&phba->hbalock); fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!fcf_mbxq) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); - spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); return; } @@ -1901,9 +1914,8 @@ lpfc_register_fcf(struct lpfc_hba *phba) fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); - spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); mempool_free(fcf_mbxq, phba->mbox_mem_pool); } @@ -1956,7 +1968,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) return 0; - if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { + if (!test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) { *boot_flag = 0; *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); @@ -2151,8 +2163,9 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2833 Stop FCF discovery process due to link " "state change (x%x)\n", phba->link_state); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); spin_unlock_irq(&phba->hbalock); } @@ -2380,9 +2393,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) int rc; if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { - spin_lock_irq(&phba->hbalock); - if (phba->hba_flag & HBA_DEVLOSS_TMO) { - spin_unlock_irq(&phba->hbalock); + if (test_bit(HBA_DEVLOSS_TMO, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2872 Devloss tmo with no eligible " "FCF, unregister in-use FCF (x%x) " @@ -2392,8 +2403,9 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) goto stop_flogi_current_fcf; } /* Mark the end to FLOGI roundrobin failover */ - phba->hba_flag &= ~FCF_RR_INPROG; + clear_bit(FCF_RR_INPROG, &phba->hba_flag); /* Allow action to new fcf asynchronous event */ + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, @@ -2630,9 +2642,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) "2765 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record.\n"); /* Let next new FCF event trigger fast failover */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCF_TS_INPROG; - spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); lpfc_sli4_mbox_cmd_free(phba, mboxq); return; } @@ -2873,10 +2883,10 @@ read_next_fcf: phba->fcoe_eventtag_at_fcf_scan, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); - spin_lock_irq(&phba->hbalock); - if (phba->hba_flag & HBA_DEVLOSS_TMO) { - phba->hba_flag &= ~FCF_TS_INPROG; - spin_unlock_irq(&phba->hbalock); + if (test_bit(HBA_DEVLOSS_TMO, + &phba->hba_flag)) { + clear_bit(FCF_TS_INPROG, + &phba->hba_flag); /* Unregister in-use FCF and rescan */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, @@ -2889,8 +2899,7 @@ read_next_fcf: /* * Let next new FCF event trigger fast failover */ - phba->hba_flag &= ~FCF_TS_INPROG; - spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); return; } /* @@ -2996,8 +3005,8 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) if (phba->link_state < LPFC_LINK_UP) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; - phba->hba_flag &= ~FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); goto out; } @@ -3008,7 +3017,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2766 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record. " - "hba_flg x%x fcf_flg x%x\n", phba->hba_flag, + "hba_flg x%lx fcf_flg x%x\n", phba->hba_flag, phba->fcf.fcf_flag); lpfc_unregister_fcf_rescan(phba); goto out; @@ -3471,9 +3480,9 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Check if sending the FLOGI is being deferred to after we get * up to date CSPs from MBX_READ_SPARAM. */ - if (phba->hba_flag & HBA_DEFER_FLOGI) { + if (test_bit(HBA_DEFER_FLOGI, &phba->hba_flag)) { lpfc_initial_flogi(vport); - phba->hba_flag &= ~HBA_DEFER_FLOGI; + clear_bit(HBA_DEFER_FLOGI, &phba->hba_flag); } return; @@ -3495,7 +3504,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) spin_lock_irqsave(&phba->hbalock, iflags); phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); - if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { case LPFC_LINK_SPEED_1GHZ: case LPFC_LINK_SPEED_2GHZ: @@ -3517,7 +3526,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) if (phba->fc_topology && phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "3314 Toplogy changed was 0x%x is 0x%x\n", + "3314 Topology changed was 0x%x is 0x%x\n", phba->fc_topology, bf_get(lpfc_mbx_read_top_topology, la)); phba->fc_topology_changed = 1; @@ -3611,7 +3620,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) goto out; } - if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!cfglink_mbox) goto out; @@ -3631,7 +3640,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) * is phase 1 implementation that support FCF index 0 and driver * defaults. */ - if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { + if (!test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) { fcf_record = kzalloc(sizeof(struct fcf_record), GFP_KERNEL); if (unlikely(!fcf_record)) { @@ -3661,12 +3670,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) * The driver is expected to do FIP/FCF. Call the port * and get the FCF Table. */ - spin_lock_irqsave(&phba->hbalock, iflags); - if (phba->hba_flag & FCF_TS_INPROG) { - spin_unlock_irqrestore(&phba->hbalock, iflags); + if (test_bit(FCF_TS_INPROG, &phba->hba_flag)) return; - } /* This is the initial FCF discovery scan */ + spin_lock_irqsave(&phba->hbalock, iflags); phba->fcf.fcf_flag |= FCF_INIT_DISC; spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, @@ -3861,14 +3868,13 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->ctx_ndlp = NULL; lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY, - "0002 rpi:%x DID:%x flg:%x %d x%px\n", + "0002 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); - if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || + if (test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) || ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { /* We rcvd a rscn after issuing this * mbox reg login, we may have cycled @@ -3878,16 +3884,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * there is another reg login in * process. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); /* * We cannot leave the RPI registered because * if we go thru discovery again for this ndlp * a subsequent REG_RPI will fail. */ - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); } @@ -4200,7 +4204,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -4331,9 +4335,7 @@ out: * reference. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } @@ -4354,11 +4356,11 @@ out: if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0003 rpi:%x DID:%x flg:%x %d x%px\n", + "0003 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); @@ -4450,8 +4452,8 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) __func__, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_state); - ndlp->nlp_flag |= NLP_RPI_REGISTERED; - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -4485,7 +4487,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport add: did:x%x flg:x%x type x%x", + "rport add: did:x%x flg:x%lx type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); /* Don't add the remote port if unloading. */ @@ -4553,7 +4555,7 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) return; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport delete: did:x%x flg:x%x type x%x", + "rport delete: did:x%x flg:x%lx type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, @@ -4669,7 +4671,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, "0999 %s Not regd: ndlp x%px rport x%px DID " - "x%x FLG x%x XPT x%x\n", + "x%x FLG x%lx XPT x%x\n", __func__, ndlp, ndlp->rport, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags); return; @@ -4685,7 +4687,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } else if (!ndlp->rport) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, - "1999 %s NDLP in devloss x%px DID x%x FLG x%x" + "1999 %s NDLP in devloss x%px DID x%x FLG x%lx" " XPT x%x refcnt %u\n", __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags, @@ -4695,9 +4697,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { vport->phba->nport_event_cnt++; if (vport->phba->nvmet_support == 0) { - /* Start devloss if target. */ - if (ndlp->nlp_type & NLP_NVME_TARGET) - lpfc_nvme_unregister_port(vport, ndlp); + lpfc_nvme_unregister_port(vport, ndlp); } else { /* NVMET has no upcall. */ lpfc_nlp_put(ndlp); @@ -4730,7 +4730,7 @@ lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type |= NLP_FC_NODE; fallthrough; case NLP_STE_MAPPED_NODE: - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); lpfc_nlp_reg_node(vport, ndlp); break; @@ -4741,7 +4741,7 @@ lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * backend, attempt it now */ case NLP_STE_NPR_NODE: - ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); fallthrough; default: lpfc_nlp_unreg_node(vport, ndlp); @@ -4762,13 +4762,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } if (new_state == NLP_STE_UNMAPPED_NODE) { - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FC_NODE; } if (new_state == NLP_STE_MAPPED_NODE) - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); if (new_state == NLP_STE_NPR_NODE) - ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); /* Reg/Unreg for FCP and NVME Transport interface */ if ((old_state == NLP_STE_MAPPED_NODE || @@ -4776,7 +4776,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* For nodes marked for ADISC, Handle unreg in ADISC cmpl * if linkup. In linkdown do unreg_node */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC) || + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag) || !lpfc_is_link_up(vport->phba)) lpfc_nlp_unreg_node(vport, ndlp); } @@ -4796,9 +4796,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (!ndlp->rport || ndlp->rport->scsi_target_id == -1 || ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } } @@ -4830,7 +4828,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int state) { int old_state = ndlp->nlp_state; - int node_dropped = ndlp->nlp_flag & NLP_DROPPED; + bool node_dropped = test_bit(NLP_DROPPED, &ndlp->nlp_flag); char name1[16], name2[16]; unsigned long iflags; @@ -4846,7 +4844,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (node_dropped && old_state == NLP_STE_UNUSED_NODE && state != NLP_STE_UNUSED_NODE) { - ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_get(ndlp); } @@ -4854,7 +4852,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, state != NLP_STE_NPR_NODE) lpfc_cancel_retry_delay_tmo(vport, ndlp); if (old_state == NLP_STE_UNMAPPED_NODE) { - ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; + clear_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag); ndlp->nlp_type &= ~NLP_FC_NODE; } @@ -4951,14 +4949,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * reference from lpfc_nlp_init. If set, don't drop it again and * introduce an imbalance. */ - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irq(&ndlp->lock); + if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); - return; - } - spin_unlock_irq(&ndlp->lock); } /* @@ -4987,7 +4979,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport) tmo, vport->port_state, vport->fc_flag); } - mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); + mod_timer(&vport->fc_disctmo, jiffies + secs_to_jiffies(tmo)); set_bit(FC_DISC_TMO, &vport->fc_flag); /* Start Discovery Timer state <hba_state> */ @@ -5018,7 +5010,7 @@ lpfc_can_disctmo(struct lpfc_vport *vport) if (test_bit(FC_DISC_TMO, &vport->fc_flag) || timer_pending(&vport->fc_disctmo)) { clear_bit(FC_DISC_TMO, &vport->fc_flag); - del_timer_sync(&vport->fc_disctmo); + timer_delete_sync(&vport->fc_disctmo); spin_lock_irqsave(&vport->work_port_lock, iflags); vport->work_port_events &= ~WORKER_DISC_TMO; spin_unlock_irqrestore(&vport->work_port_lock, iflags); @@ -5061,7 +5053,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, case CMD_GEN_REQUEST64_CR: if (iocb->ndlp == ndlp) return 1; - fallthrough; + break; case CMD_ELS_REQUEST64_CR: if (remote_id == ndlp->nlp_DID) return 1; @@ -5073,9 +5065,9 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, } else if (pring->ringno == LPFC_FCP_RING) { /* Skip match check if waiting to relogin to FCP target */ if ((ndlp->nlp_type & NLP_FCP_TARGET) && - (ndlp->nlp_flag & NLP_DELAY_TMO)) { + test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) return 0; - } + if (ulp_context == ndlp->nlp_rpi) return 1; } @@ -5145,7 +5137,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * Everything that matches on txcmplq will be returned * by firmware with a no rpi error. */ - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { if (phba->sli_rev != LPFC_SLI_REV4) lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions); else @@ -5179,29 +5171,19 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_issue_els_logo(vport, ndlp, 0); /* Check to see if there are any deferred events to process */ - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) && + ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1434 UNREG cmpl deferred logo x%x " "on NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { - /* NLP_RELEASE_RPI is only set for SLI4 ports. */ - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - spin_unlock_irq(&ndlp->lock); - } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_UNREG_INP; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); } /* The node has an outstanding reference for the unreg. Now @@ -5221,8 +5203,6 @@ static void lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) { - unsigned long iflags; - /* Driver always gets a reference on the mailbox job * in support of async jobs. */ @@ -5230,9 +5210,8 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, if (!mbox->ctx_ndlp) return; - if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { + if (test_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag)) { mbox->mbox_cmpl = lpfc_nlp_logo_unreg; - } else if (phba->sli_rev == LPFC_SLI_REV4 && !test_bit(FC_UNLOADING, &vport->load_flag) && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= @@ -5240,13 +5219,6 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, (kref_read(&ndlp->kref) > 0)) { mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; } else { - if (test_bit(FC_UNLOADING, &vport->load_flag)) { - if (phba->sli_rev == LPFC_SLI_REV4) { - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_RELEASE_RPI; - spin_unlock_irqrestore(&ndlp->lock, iflags); - } - } mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } @@ -5268,13 +5240,13 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) int rc, acc_plogi = 1; uint16_t rpi; - if (ndlp->nlp_flag & NLP_RPI_REGISTERED || - ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) || + test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) { + if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "3366 RPI x%x needs to be " - "unregistered nlp_flag x%x " + "unregistered nlp_flag x%lx " "did x%x\n", ndlp->nlp_rpi, ndlp->nlp_flag, ndlp->nlp_DID); @@ -5282,11 +5254,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* If there is already an UNREG in progress for this ndlp, * no need to queue up another one. */ - if (ndlp->nlp_flag & NLP_UNREG_INP) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1436 unreg_rpi SKIP UNREG x%x on " - "NPort x%x deferred x%x flg x%x " + "NPort x%x deferred x%x flg x%lx " "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, @@ -5309,27 +5281,24 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) return 1; } + /* Accept PLOGIs after unreg_rpi_cmpl. */ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) - /* - * accept PLOGIs after unreg_rpi_cmpl - */ acc_plogi = 0; - if (((ndlp->nlp_DID & Fabric_DID_MASK) != - Fabric_DID_MASK) && - (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))) - ndlp->nlp_flag |= NLP_UNREG_INP; + + if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1433 unreg_rpi UNREG x%x on " - "NPort x%x deferred flg x%x " + "NPort x%x deferred flg x%lx " "Data:x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); mempool_free(mbox, phba->mbox_mem_pool); acc_plogi = 1; lpfc_nlp_put(ndlp); @@ -5339,7 +5308,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) LOG_NODE | LOG_DISCOVERY, "1444 Failed to allocate mempool " "unreg_rpi UNREG x%x, " - "DID x%x, flag x%x, " + "DID x%x, flag x%lx, " "ndlp x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); @@ -5349,7 +5318,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * not unloading. */ if (!test_bit(FC_UNLOADING, &vport->load_flag)) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, @@ -5362,13 +5331,13 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) out: if (phba->sli_rev != LPFC_SLI_REV4) ndlp->nlp_rpi = 0; - ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; - ndlp->nlp_flag &= ~NLP_NPR_ADISC; + clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); if (acc_plogi) - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 1; } - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 0; } @@ -5396,7 +5365,7 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags); list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { /* The mempool_alloc might sleep */ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags); @@ -5484,7 +5453,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* Cleanup node for NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0900 Cleanup node for NPort x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_dequeue_node(vport, ndlp); @@ -5529,21 +5498,15 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_els_abort(phba, ndlp); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = 0; - del_timer_sync(&ndlp->nlp_delayfunc); + timer_delete_sync(&ndlp->nlp_delayfunc); list_del_init(&ndlp->els_retry_evt.evt_listp); list_del_init(&ndlp->dev_loss_evt.evt_listp); list_del_init(&ndlp->recovery_evt.evt_listp); lpfc_cleanup_vports_rrqs(vport, ndlp); - - if (phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_flag |= NLP_RELEASE_RPI; - return 0; } @@ -5607,6 +5570,7 @@ static struct lpfc_nodelist * __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct lpfc_nodelist *ndlp; + struct lpfc_nodelist *np = NULL; uint32_t data1; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { @@ -5617,18 +5581,24 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) ); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "0929 FIND node DID " - "Data: x%px x%x x%x x%x x%x x%px\n", + "Data: x%px x%x x%lx x%x x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->nlp_rpi, ndlp->active_rrqs_xri_bitmap); - return ndlp; + + /* Check for new or potentially stale node */ + if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) + return ndlp; + np = ndlp; } } - /* FIND node did <did> NOT FOUND */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, - "0932 FIND node did x%x NOT FOUND.\n", did); - return NULL; + if (!np) + /* FIND node did <did> NOT FOUND */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0932 FIND node did x%x NOT FOUND.\n", did); + + return np; } struct lpfc_nodelist * @@ -5664,7 +5634,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport) iflags); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "2025 FIND node DID MAPPED " - "Data: x%px x%x x%x x%x x%px\n", + "Data: x%px x%x x%lx x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->active_rrqs_xri_bitmap); @@ -5698,13 +5668,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6453 Setup New Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); return ndlp; } @@ -5723,7 +5691,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6455 Setup RSCN Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); @@ -5736,18 +5704,16 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) return ndlp; if (ndlp->nlp_state > NLP_STE_UNUSED_NODE && - ndlp->nlp_state < NLP_STE_PRLI_ISSUE) { + ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6456 Skip Setup RSCN Node x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); ndlp = NULL; @@ -5755,7 +5721,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6457 Setup Active Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); @@ -5766,7 +5732,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || (!vport->phba->nvmet_support && - ndlp->nlp_flag & NLP_RCV_PLOGI)) + test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag))) return NULL; if (vport->phba->nvmet_support) @@ -5776,10 +5742,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) * allows for rediscovery */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); } return ndlp; } @@ -6150,7 +6113,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) /* Clean up the ndlp on Fabric connections */ lpfc_drop_node(vport, ndlp); - } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { /* Fail outstanding IO now since device * is marked for PLOGI. */ @@ -6363,11 +6326,11 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0004 rpi:%x DID:%x flg:%x %d x%px\n", + "0004 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); @@ -6417,7 +6380,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) if (filter(ndlp, param)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "3185 FIND node filter %ps DID " - "ndlp x%px did x%x flg x%x st x%x " + "ndlp x%px did x%x flg x%lx st x%x " "xri x%x type x%x rpi x%x\n", filter, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -6552,9 +6515,10 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) INIT_LIST_HEAD(&ndlp->nlp_listp); if (vport->phba->sli_rev == LPFC_SLI_REV4) { ndlp->nlp_rpi = rpi; - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0007 Init New ndlp x%px, rpi:x%x DID:%x " - "flg:x%x refcnt:%d\n", + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_NODE | LOG_DISCOVERY, + "0007 Init New ndlp x%px, rpi:x%x DID:x%x " + "flg:x%lx refcnt:%d\n", ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); @@ -6586,7 +6550,7 @@ lpfc_nlp_release(struct kref *kref) struct lpfc_vport *vport = ndlp->vport; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node release: did:x%x flg:x%x type:x%x", + "node release: did:x%x flg:x%lx type:x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, @@ -6598,19 +6562,12 @@ lpfc_nlp_release(struct kref *kref) lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cleanup_node(vport, ndlp); - /* Not all ELS transactions have registered the RPI with the port. - * In these cases the rpi usage is temporary and the node is - * released when the WQE is completed. Catch this case to free the - * RPI to the pool. Because this node is in the release path, a lock - * is unnecessary. All references are gone and the node has been - * dequeued. + /* All nodes are initialized with an RPI that needs to be released + * now. All references are gone and the node has been dequeued. */ - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR && - !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - } + if (vport->phba->sli_rev == LPFC_SLI_REV4) { + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; } /* The node is not freed back to memory, it is released to a pool so @@ -6639,7 +6596,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp) if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node get: did:x%x flg:x%x refcnt:x%x", + "node get: did:x%x flg:x%lx refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); @@ -6671,7 +6628,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp) { if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node put: did:x%x flg:x%x refcnt:x%x", + "node put: did:x%x flg:x%lx refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); } else { @@ -6724,11 +6681,12 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags); goto out; - } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + } else if (test_bit(NLP_RPI_REGISTERED, + &ndlp->nlp_flag)) { ret = 1; lpfc_printf_log(phba, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "2624 RPI %x DID %x flag %x " + "2624 RPI %x DID %x flag %lx " "still logged in\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); @@ -6997,11 +6955,11 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) * registered, do nothing. */ spin_lock_irq(&phba->hbalock); - if (!(phba->hba_flag & HBA_FCOE_MODE) || + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) || !(phba->fcf.fcf_flag & FCF_REGISTERED) || - !(phba->hba_flag & HBA_FIP_SUPPORT) || + !test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) || (phba->fcf.fcf_flag & FCF_DISCOVERY) || - (phba->pport->port_state == LPFC_FLOGI)) { + phba->pport->port_state == LPFC_FLOGI) { spin_unlock_irq(&phba->hbalock); return; } diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 2108b4cb7815..32298285ea5e 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -562,6 +562,27 @@ struct fc_vft_header { #include <uapi/scsi/fc/fc_els.h> /* + * Application Header + */ +struct fc_app_header { + uint32_t dst_app_id; + uint32_t src_app_id; +#define LOOPBACK_SRC_APPID 0x4321 + uint32_t word2; + uint32_t word3; +}; + +/* + * dfctl optional header definition + */ +enum lpfc_fc_dfctl { + LPFC_FC_NO_DEVICE_HEADER, + LPFC_FC_16B_DEVICE_HEADER, + LPFC_FC_32B_DEVICE_HEADER, + LPFC_FC_64B_DEVICE_HEADER, +}; + +/* * Extended Link Service LS_COMMAND codes (Payload Word 0) */ #ifdef __BIG_ENDIAN_BITFIELD @@ -703,6 +724,7 @@ struct ls_rjt { /* Structure is in Big Endian format */ #define LSEXP_OUT_OF_RESOURCE 0x29 #define LSEXP_CANT_GIVE_DATA 0x2A #define LSEXP_REQ_UNSUPPORTED 0x2C +#define LSEXP_AUTH_REQ 0x48 #define LSEXP_NO_RSRC_ASSIGN 0x52 uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */ } b; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 367e6b066d42..2dedb273b091 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1907,22 +1907,22 @@ struct lpfc_mbx_query_fw_config { uint32_t asic_revision; uint32_t physical_port; uint32_t function_mode; -#define LPFC_FCOE_INI_MODE 0x00000040 -#define LPFC_FCOE_TGT_MODE 0x00000080 +#define LPFC_FC_INI_MODE 0x00000040 +#define LPFC_FC_TGT_MODE 0x00000080 #define LPFC_DUA_MODE 0x00000800 - uint32_t ulp0_mode; -#define LPFC_ULP_FCOE_INIT_MODE 0x00000040 -#define LPFC_ULP_FCOE_TGT_MODE 0x00000080 - uint32_t ulp0_nap_words[12]; - uint32_t ulp1_mode; - uint32_t ulp1_nap_words[12]; + uint32_t oper_mode; + uint32_t rsvd9[2]; + uint32_t wqid_base; + uint32_t wqid_tot; + uint32_t rqid_base; + uint32_t rqid_tot; + uint32_t rsvd15[19]; uint32_t function_capabilities; uint32_t cqid_base; uint32_t cqid_tot; uint32_t eqid_base; uint32_t eqid_tot; - uint32_t ulp0_nap2_words[2]; - uint32_t ulp1_nap2_words[2]; + uint32_t rsvd39[4]; } rsp; }; @@ -2146,6 +2146,14 @@ struct sli4_sge { /* SLI-4 */ uint32_t sge_len; }; +struct sli4_sge_le { + __le32 addr_hi; + __le32 addr_lo; + + __le32 word2; + __le32 sge_len; +}; + struct sli4_hybrid_sgl { struct list_head list_node; struct sli4_sge *dma_sgl; @@ -3770,25 +3778,22 @@ struct lpfc_mbx_get_prof_cfg { struct lpfc_controller_attribute { uint32_t version_string[8]; uint32_t manufacturer_name[8]; - uint32_t supported_modes; + uint32_t rsvd16; uint32_t word17; -#define lpfc_cntl_attr_eprom_ver_lo_SHIFT 0 -#define lpfc_cntl_attr_eprom_ver_lo_MASK 0x000000ff -#define lpfc_cntl_attr_eprom_ver_lo_WORD word17 -#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8 -#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff -#define lpfc_cntl_attr_eprom_ver_hi_WORD word17 #define lpfc_cntl_attr_flash_id_SHIFT 16 #define lpfc_cntl_attr_flash_id_MASK 0x000000ff #define lpfc_cntl_attr_flash_id_WORD word17 - uint32_t mbx_da_struct_ver; - uint32_t ep_fw_da_struct_ver; +#define lpfc_cntl_attr_boot_enable_SHIFT 24 +#define lpfc_cntl_attr_boot_enable_MASK 0x00000001 +#define lpfc_cntl_attr_boot_enable_WORD word17 + uint32_t rsvd18[2]; uint32_t ncsi_ver_str[3]; - uint32_t dflt_ext_timeout; + uint32_t rsvd23; uint32_t model_number[8]; uint32_t description[16]; uint32_t serial_number[8]; - uint32_t ip_ver_str[8]; + uint32_t ipl_name[5]; + uint32_t rsvd61[3]; uint32_t fw_ver_str[8]; uint32_t bios_ver_str[8]; uint32_t redboot_ver_str[8]; @@ -3796,53 +3801,31 @@ struct lpfc_controller_attribute { uint32_t flash_fw_ver_str[8]; uint32_t functionality; uint32_t word105; -#define lpfc_cntl_attr_max_cbd_len_SHIFT 0 -#define lpfc_cntl_attr_max_cbd_len_MASK 0x0000ffff -#define lpfc_cntl_attr_max_cbd_len_WORD word105 #define lpfc_cntl_attr_asic_rev_SHIFT 16 #define lpfc_cntl_attr_asic_rev_MASK 0x000000ff #define lpfc_cntl_attr_asic_rev_WORD word105 -#define lpfc_cntl_attr_gen_guid0_SHIFT 24 -#define lpfc_cntl_attr_gen_guid0_MASK 0x000000ff -#define lpfc_cntl_attr_gen_guid0_WORD word105 - uint32_t gen_guid1_12[3]; + uint32_t rsvd106[3]; uint32_t word109; -#define lpfc_cntl_attr_gen_guid13_14_SHIFT 0 -#define lpfc_cntl_attr_gen_guid13_14_MASK 0x0000ffff -#define lpfc_cntl_attr_gen_guid13_14_WORD word109 -#define lpfc_cntl_attr_gen_guid15_SHIFT 16 -#define lpfc_cntl_attr_gen_guid15_MASK 0x000000ff -#define lpfc_cntl_attr_gen_guid15_WORD word109 #define lpfc_cntl_attr_hba_port_cnt_SHIFT 24 #define lpfc_cntl_attr_hba_port_cnt_MASK 0x000000ff #define lpfc_cntl_attr_hba_port_cnt_WORD word109 - uint32_t word110; -#define lpfc_cntl_attr_dflt_lnk_tmo_SHIFT 0 -#define lpfc_cntl_attr_dflt_lnk_tmo_MASK 0x0000ffff -#define lpfc_cntl_attr_dflt_lnk_tmo_WORD word110 -#define lpfc_cntl_attr_multi_func_dev_SHIFT 24 -#define lpfc_cntl_attr_multi_func_dev_MASK 0x000000ff -#define lpfc_cntl_attr_multi_func_dev_WORD word110 + uint32_t rsvd110; uint32_t word111; -#define lpfc_cntl_attr_cache_valid_SHIFT 0 -#define lpfc_cntl_attr_cache_valid_MASK 0x000000ff -#define lpfc_cntl_attr_cache_valid_WORD word111 #define lpfc_cntl_attr_hba_status_SHIFT 8 #define lpfc_cntl_attr_hba_status_MASK 0x000000ff #define lpfc_cntl_attr_hba_status_WORD word111 -#define lpfc_cntl_attr_max_domain_SHIFT 16 -#define lpfc_cntl_attr_max_domain_MASK 0x000000ff -#define lpfc_cntl_attr_max_domain_WORD word111 #define lpfc_cntl_attr_lnk_numb_SHIFT 24 #define lpfc_cntl_attr_lnk_numb_MASK 0x0000003f #define lpfc_cntl_attr_lnk_numb_WORD word111 #define lpfc_cntl_attr_lnk_type_SHIFT 30 #define lpfc_cntl_attr_lnk_type_MASK 0x00000003 #define lpfc_cntl_attr_lnk_type_WORD word111 - uint32_t fw_post_status; - uint32_t hba_mtu[8]; + uint32_t rsvd112[9]; uint32_t word121; - uint32_t reserved1[3]; +#define lpfc_cntl_attr_asic_gen_SHIFT 8 +#define lpfc_cntl_attr_asic_gen_MASK 0x000000ff +#define lpfc_cntl_attr_asic_gen_WORD word121 + uint32_t rsvd122[3]; uint32_t word125; #define lpfc_cntl_attr_pci_vendor_id_SHIFT 0 #define lpfc_cntl_attr_pci_vendor_id_MASK 0x0000ffff @@ -3867,15 +3850,7 @@ struct lpfc_controller_attribute { #define lpfc_cntl_attr_pci_fnc_num_SHIFT 16 #define lpfc_cntl_attr_pci_fnc_num_MASK 0x000000ff #define lpfc_cntl_attr_pci_fnc_num_WORD word127 -#define lpfc_cntl_attr_inf_type_SHIFT 24 -#define lpfc_cntl_attr_inf_type_MASK 0x000000ff -#define lpfc_cntl_attr_inf_type_WORD word127 - uint32_t unique_id[2]; - uint32_t word130; -#define lpfc_cntl_attr_num_netfil_SHIFT 0 -#define lpfc_cntl_attr_num_netfil_MASK 0x000000ff -#define lpfc_cntl_attr_num_netfil_WORD word130 - uint32_t reserved2[4]; + uint32_t rsvd128[7]; }; struct lpfc_mbx_get_cntl_attributes { @@ -4839,6 +4814,7 @@ struct fcp_iwrite64_wqe { #define cmd_buff_len_SHIFT 16 #define cmd_buff_len_MASK 0x00000ffff #define cmd_buff_len_WORD word3 +/* Note: payload_offset_len field depends on ASIC support */ #define payload_offset_len_SHIFT 0 #define payload_offset_len_MASK 0x0000ffff #define payload_offset_len_WORD word3 @@ -4855,6 +4831,7 @@ struct fcp_iread64_wqe { #define cmd_buff_len_SHIFT 16 #define cmd_buff_len_MASK 0x00000ffff #define cmd_buff_len_WORD word3 +/* Note: payload_offset_len field depends on ASIC support */ #define payload_offset_len_SHIFT 0 #define payload_offset_len_MASK 0x0000ffff #define payload_offset_len_WORD word3 @@ -4871,6 +4848,7 @@ struct fcp_icmnd64_wqe { #define cmd_buff_len_SHIFT 16 #define cmd_buff_len_MASK 0x00000ffff #define cmd_buff_len_WORD word3 +/* Note: payload_offset_len field depends on ASIC support */ #define payload_offset_len_SHIFT 0 #define payload_offset_len_MASK 0x0000ffff #define payload_offset_len_WORD word3 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index f7a0aa3625f4..2400602a8561 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -567,7 +567,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); /* Initialize ERATT handling flag */ - phba->hba_flag &= ~HBA_ERATT_HANDLED; + clear_bit(HBA_ERATT_HANDLED, &phba->hba_flag); /* Enable appropriate host interrupts */ if (lpfc_readl(phba->HCregaddr, &status)) { @@ -595,17 +595,18 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up ring-0 (ELS) timer */ timeout = phba->fc_ratov * 2; mod_timer(&vport->els_tmofunc, - jiffies + msecs_to_jiffies(1000 * timeout)); + jiffies + secs_to_jiffies(timeout)); /* Set up heart beat (HB) timer */ mod_timer(&phba->hb_tmofunc, - jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); - phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); + jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); + clear_bit(HBA_HBEAT_INP, &phba->hba_flag); + clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, - jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); + jiffies + secs_to_jiffies(phba->eratt_poll_interval)); - if (phba->hba_flag & LINK_DISABLED) { + if (test_bit(LINK_DISABLED, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2598 Adapter Link is disabled.\n"); lpfc_down_link(phba, pmb); @@ -925,9 +926,7 @@ lpfc_sli4_free_sp_events(struct lpfc_hba *phba) struct hbq_dmabuf *dmabuf; struct lpfc_cq_event *cq_event; - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~HBA_SP_QUEUE_EVT; - spin_unlock_irq(&phba->hbalock); + clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); while (!list_empty(&phba->sli4_hba.sp_queue_event)) { /* Get the response iocb from the head of work queue */ @@ -1228,18 +1227,15 @@ static void lpfc_rrq_timeout(struct timer_list *t) { struct lpfc_hba *phba; - unsigned long iflag; phba = from_timer(phba, t, rrq_tmr); - spin_lock_irqsave(&phba->pport->work_port_lock, iflag); - if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) - phba->hba_flag |= HBA_RRQ_ACTIVE; - else - phba->hba_flag &= ~HBA_RRQ_ACTIVE; - spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); + if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { + clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); + return; + } - if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) - lpfc_worker_wake_up(phba); + set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); + lpfc_worker_wake_up(phba); } /** @@ -1261,11 +1257,8 @@ lpfc_rrq_timeout(struct timer_list *t) static void lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) { - unsigned long drvr_flag; - - spin_lock_irqsave(&phba->hbalock, drvr_flag); - phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); - spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + clear_bit(HBA_HBEAT_INP, &phba->hba_flag); + clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); /* Check and reset heart-beat timer if necessary */ mempool_free(pmboxq, phba->mbox_mem_pool); @@ -1274,7 +1267,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) !test_bit(FC_UNLOADING, &phba->pport->load_flag)) mod_timer(&phba->hb_tmofunc, jiffies + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); return; } @@ -1457,7 +1450,7 @@ lpfc_issue_hb_mbox(struct lpfc_hba *phba) int retval; /* Is a Heartbeat mbox already in progress */ - if (phba->hba_flag & HBA_HBEAT_INP) + if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) return 0; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -1473,7 +1466,7 @@ lpfc_issue_hb_mbox(struct lpfc_hba *phba) mempool_free(pmboxq, phba->mbox_mem_pool); return -ENXIO; } - phba->hba_flag |= HBA_HBEAT_INP; + set_bit(HBA_HBEAT_INP, &phba->hba_flag); return 0; } @@ -1493,7 +1486,7 @@ lpfc_issue_hb_tmo(struct lpfc_hba *phba) { if (phba->cfg_enable_hba_heartbeat) return; - phba->hba_flag |= HBA_HBEAT_TMO; + set_bit(HBA_HBEAT_TMO, &phba->hba_flag); } /** @@ -1562,10 +1555,10 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ spin_lock_irq(&phba->pport->work_port_lock); if (time_after(phba->last_completion_time + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL), jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); - if (phba->hba_flag & HBA_HBEAT_INP) + if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); else tmo = (1000 * LPFC_HB_MBOX_INTERVAL); @@ -1574,7 +1567,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) spin_unlock_irq(&phba->pport->work_port_lock); /* Check if a MBX_HEARTBEAT is already in progress */ - if (phba->hba_flag & HBA_HBEAT_INP) { + if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) { /* * If heart beat timeout called with HBA_HBEAT_INP set * we need to give the hb mailbox cmd a chance to @@ -1611,7 +1604,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) } } else { /* Check to see if we want to force a MBX_HEARTBEAT */ - if (phba->hba_flag & HBA_HBEAT_TMO) { + if (test_bit(HBA_HBEAT_TMO, &phba->hba_flag)) { retval = lpfc_issue_hb_mbox(phba); if (retval) tmo = (1000 * LPFC_HB_MBOX_INTERVAL); @@ -1699,9 +1692,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) * since we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~DEFER_ERATT; - spin_unlock_irq(&phba->hbalock); + clear_bit(DEFER_ERATT, &phba->hba_flag); return; } @@ -1752,9 +1743,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) if (!phba->work_hs && !test_bit(FC_UNLOADING, &phba->pport->load_flag)) phba->work_hs = old_host_status & ~HS_FFER1; - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~DEFER_ERATT; - spin_unlock_irq(&phba->hbalock); + clear_bit(DEFER_ERATT, &phba->hba_flag); phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); phba->work_status[1] = readl(phba->MBslimaddr + 0xac); } @@ -1798,9 +1787,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) * since we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~DEFER_ERATT; - spin_unlock_irq(&phba->hbalock); + clear_bit(DEFER_ERATT, &phba->hba_flag); return; } @@ -1811,7 +1798,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) /* Send an internal error event to mgmt application */ lpfc_board_errevt_to_mgmt(phba); - if (phba->hba_flag & DEFER_ERATT) + if (test_bit(DEFER_ERATT, &phba->hba_flag)) lpfc_handle_deferred_eratt(phba); if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { @@ -1920,6 +1907,9 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, uint32_t intr_mode; LPFC_MBOXQ_t *mboxq; + /* Notifying the transport that the targets are going offline. */ + lpfc_scsi_dev_block(phba); + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) { /* @@ -1956,6 +1946,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, lpfc_offline_prep(phba, mbx_action); lpfc_sli_flush_io_rings(phba); + lpfc_nvmels_flush_cmd(phba); lpfc_offline(phba); /* release interrupt for possible resource change */ lpfc_sli4_disable_intr(phba); @@ -2026,7 +2017,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) /* consider PCI bus read error as pci_channel_offline */ if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) return; - if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { + if (!test_bit(HBA_RECOVERABLE_UE, &phba->hba_flag)) { lpfc_sli4_offline_eratt(phba); return; } @@ -3105,7 +3096,8 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_DISCOVERY, "0282 did:x%x ndlp:x%px " - "refcnt:%d xflags x%x nflag x%x\n", + "refcnt:%d xflags x%x " + "nflag x%lx\n", ndlp->nlp_DID, (void *)ndlp, kref_read(&ndlp->kref), ndlp->fc4_xpt_flags, @@ -3131,8 +3123,8 @@ lpfc_cleanup(struct lpfc_vport *vport) void lpfc_stop_vport_timers(struct lpfc_vport *vport) { - del_timer_sync(&vport->els_tmofunc); - del_timer_sync(&vport->delayed_disc_tmo); + timer_delete_sync(&vport->els_tmofunc); + timer_delete_sync(&vport->delayed_disc_tmo); lpfc_can_disctmo(vport); return; } @@ -3151,7 +3143,7 @@ __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; /* Now, try to stop the timer */ - del_timer(&phba->fcf.redisc_wait); + timer_delete(&phba->fcf.redisc_wait); } /** @@ -3313,20 +3305,21 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) lpfc_stop_vport_timers(phba->pport); cancel_delayed_work_sync(&phba->eq_delay_work); cancel_delayed_work_sync(&phba->idle_stat_delay_work); - del_timer_sync(&phba->sli.mbox_tmo); - del_timer_sync(&phba->fabric_block_timer); - del_timer_sync(&phba->eratt_poll); - del_timer_sync(&phba->hb_tmofunc); + timer_delete_sync(&phba->sli.mbox_tmo); + timer_delete_sync(&phba->fabric_block_timer); + timer_delete_sync(&phba->eratt_poll); + timer_delete_sync(&phba->hb_tmofunc); if (phba->sli_rev == LPFC_SLI_REV4) { - del_timer_sync(&phba->rrq_tmr); - phba->hba_flag &= ~HBA_RRQ_ACTIVE; + timer_delete_sync(&phba->rrq_tmr); + clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); } - phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); + clear_bit(HBA_HBEAT_INP, &phba->hba_flag); + clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: /* Stop any LightPulse device specific driver timers */ - del_timer_sync(&phba->fcp_poll_timer); + timer_delete_sync(&phba->fcp_poll_timer); break; case LPFC_PCI_DEV_OC: /* Stop any OneConnect device specific driver timers */ @@ -3364,15 +3357,15 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) spin_unlock_irqrestore(&phba->hbalock, iflag); if (mbx_action == LPFC_MBX_NO_WAIT) return; - timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies; spin_lock_irqsave(&phba->hbalock, iflag); if (phba->sli.mbox_active) { actcmd = phba->sli.mbox_active->u.mb.mbxCommand; /* Determine how long we might wait for the active mailbox * command to be gracefully completed by firmware. */ - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, - phba->sli.mbox_active) * 1000) + jiffies; + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active)) + jiffies; } spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -3391,7 +3384,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) } /** - * lpfc_sli4_node_prep - Assign RPIs for active nodes. + * lpfc_sli4_node_rpi_restore - Recover assigned RPIs for active nodes. * @phba: pointer to lpfc hba data structure. * * Allocate RPIs for all active remote nodes. This is needed whenever @@ -3399,7 +3392,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) * is to fixup the temporary rpi assignments. **/ void -lpfc_sli4_node_prep(struct lpfc_hba *phba) +lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba) { struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_vport **vports; @@ -3409,10 +3402,10 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) return; vports = lpfc_create_vport_work_array(phba); - if (vports == NULL) + if (!vports) return; - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i]; i++) { if (test_bit(FC_UNLOADING, &vports[i]->load_flag)) continue; @@ -3421,14 +3414,20 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) nlp_listp) { rpi = lpfc_sli4_alloc_rpi(phba); if (rpi == LPFC_RPI_ALLOC_ERROR) { - /* TODO print log? */ + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0099 RPI alloc error for " + "ndlp x%px DID:x%06x " + "flg:x%lx\n", + ndlp, ndlp->nlp_DID, + ndlp->nlp_flag); continue; } ndlp->nlp_rpi = rpi; lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0009 Assign RPI x%x to ndlp x%px " - "DID:x%06x flg:x%x\n", + "DID:x%06x flg:x%lx\n", ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, ndlp->nlp_flag); } @@ -3832,35 +3831,12 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) &vports[i]->fc_nodes, nlp_listp) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); - + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); if (offline || hba_pci_err) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_UNREG_INP | - NLP_RPI_REGISTERED); - spin_unlock_irq(&ndlp->lock); - if (phba->sli_rev == LPFC_SLI_REV4) - lpfc_sli_rpi_release(vports[i], - ndlp); - } else { - lpfc_unreg_rpi(vports[i], ndlp); - } - /* - * Whenever an SLI4 port goes offline, free the - * RPI. Get a new RPI when the adapter port - * comes back online. - */ - if (phba->sli_rev == LPFC_SLI_REV4) { - lpfc_printf_vlog(vports[i], KERN_INFO, - LOG_NODE | LOG_DISCOVERY, - "0011 Free RPI x%x on " - "ndlp: x%px did x%x\n", - ndlp->nlp_rpi, ndlp, - ndlp->nlp_DID); - lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + clear_bit(NLP_UNREG_INP, + &ndlp->nlp_flag); + clear_bit(NLP_RPI_REGISTERED, + &ndlp->nlp_flag); } if (ndlp->nlp_type & NLP_FABRIC) { @@ -3874,8 +3850,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) * Otherwise, let dev_loss take care of * the node. */ - if (!(ndlp->save_flags & - NLP_IN_RECOV_POST_DEV_LOSS) && + if (!test_bit(NLP_IN_RECOV_POST_DEV_LOSS, + &ndlp->save_flags) && !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) lpfc_disc_state_machine @@ -4711,6 +4687,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) uint64_t wwn; bool use_no_reset_hba = false; int rc; + u8 if_type; if (lpfc_no_hba_reset_cnt) { if (phba->sli_rev < LPFC_SLI_REV4 && @@ -4785,7 +4762,24 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_id = LPFC_MAX_TARGET; shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; - shost->max_cmd_len = 16; + + /* Set max_cmd_len applicable to ASIC support */ + if (phba->sli_rev == LPFC_SLI_REV4) { + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_2: + fallthrough; + case LPFC_SLI_INTF_IF_TYPE_6: + shost->max_cmd_len = LPFC_FCP_CDB_LEN_32; + break; + default: + shost->max_cmd_len = LPFC_FCP_CDB_LEN; + break; + } + } else { + shost->max_cmd_len = LPFC_FCP_CDB_LEN; + } if (phba->sli_rev == LPFC_SLI_REV4) { if (!phba->cfg_fcp_mq_threshold || @@ -4933,14 +4927,14 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) stat = 1; goto finished; } - if (time >= msecs_to_jiffies(30 * 1000)) { + if (time >= secs_to_jiffies(30)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } - if (time >= msecs_to_jiffies(15 * 1000) && + if (time >= secs_to_jiffies(15) && phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " @@ -4954,7 +4948,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; if (!atomic_read(&vport->fc_map_cnt) && - time < msecs_to_jiffies(2 * 1000)) + time < secs_to_jiffies(2)) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; @@ -4976,7 +4970,7 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) * Avoid reporting supported link speed for FCoE as it can't be * controlled via FCoE. */ - if (phba->hba_flag & HBA_FCOE_MODE) + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) return; if (phba->lmt & LMT_256Gb) @@ -5188,8 +5182,8 @@ lpfc_vmid_poll(struct timer_list *t) lpfc_worker_wake_up(phba); /* restart the timer for the next iteration */ - mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * - LPFC_VMID_TIMER)); + mod_timer(&phba->inactive_vmid_poll, + jiffies + secs_to_jiffies(LPFC_VMID_TIMER)); } /** @@ -5490,7 +5484,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch * topology info. Note: Optional for non FC-AL ports. */ - if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) goto out_free_pmb; @@ -6025,7 +6019,7 @@ lpfc_cmf_timer(struct hrtimer *timer) */ if (phba->cmf_active_mode == LPFC_CFG_MANAGED && phba->link_state != LPFC_LINK_DOWN && - phba->hba_flag & HBA_SETUP) { + test_bit(HBA_SETUP, &phba->hba_flag)) { mbpi = phba->cmf_last_sync_bw; phba->cmf_last_sync_bw = 0; extra = 0; @@ -6778,11 +6772,9 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, } /* If the FCF discovery is in progress, do nothing. */ - spin_lock_irq(&phba->hbalock); - if (phba->hba_flag & FCF_TS_INPROG) { - spin_unlock_irq(&phba->hbalock); + if (test_bit(FCF_TS_INPROG, &phba->hba_flag)) break; - } + spin_lock_irq(&phba->hbalock); /* If fast FCF failover rescan event is pending, do nothing */ if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { spin_unlock_irq(&phba->hbalock); @@ -6920,10 +6912,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, * re-instantiate the Vlink using FDISC. */ mod_timer(&ndlp->nlp_delayfunc, - jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + jiffies + secs_to_jiffies(1)); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { @@ -7321,9 +7311,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) unsigned long iflags; /* First, declare the async event has been handled */ - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag &= ~ASYNC_EVENT; - spin_unlock_irqrestore(&phba->hbalock, iflags); + clear_bit(ASYNC_EVENT, &phba->hba_flag); /* Now, handle all the async events */ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); @@ -7967,11 +7955,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); /* CMF congestion timer */ - hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - phba->cmf_timer.function = lpfc_cmf_timer; + hrtimer_setup(&phba->cmf_timer, lpfc_cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); /* CMF 1 minute stats collection timer */ - hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - phba->cmf_stats_timer.function = lpfc_cmf_stats_timer; + hrtimer_setup(&phba->cmf_stats_timer, lpfc_cmf_stats_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); /* * Control structure for handling external multi-buffer mailbox @@ -8247,7 +8234,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) * our max amount and we need to limit lpfc_sg_seg_cnt * to minimize the risk of running out. */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp) + max_buf_size; /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ @@ -8269,7 +8256,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) * the FCP rsp, a SGE for each, and a SGE for up to * cfg_sg_seg_cnt data segments. */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp) + ((phba->cfg_sg_seg_cnt + extra) * sizeof(struct sli4_sge)); @@ -8332,7 +8319,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->lpfc_cmd_rsp_buf_pool = dma_pool_create("lpfc_cmd_rsp_buf_pool", &phba->pcidev->dev, - sizeof(struct fcp_cmnd) + + sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp), i, 0); if (!phba->lpfc_cmd_rsp_buf_pool) { @@ -9869,41 +9856,38 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) return; } /* FW supports persistent topology - override module parameter value */ - phba->hba_flag |= HBA_PERSISTENT_TOPO; + set_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag); /* if ASIC_GEN_NUM >= 0xC) */ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_6) || (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_FAMILY_G6)) { - if (!tf) { + if (!tf) phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) ? FLAGS_TOPOLOGY_MODE_LOOP : FLAGS_TOPOLOGY_MODE_PT_PT); - } else { - phba->hba_flag &= ~HBA_PERSISTENT_TOPO; - } + else + clear_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag); } else { /* G5 */ - if (tf) { + if (tf) /* If topology failover set - pt is '0' or '1' */ phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : FLAGS_TOPOLOGY_MODE_LOOP_PT); - } else { + else phba->cfg_topology = ((pt == LINK_FLAGS_P2P) ? FLAGS_TOPOLOGY_MODE_PT_PT : FLAGS_TOPOLOGY_MODE_LOOP); - } } - if (phba->hba_flag & HBA_PERSISTENT_TOPO) { + if (test_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag)) lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2020 Using persistent topology value [%s]", lpfc_topo_to_str[phba->cfg_topology]); - } else { + else lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2021 Invalid topology values from FW " "Using driver parameter defined value [%s]", lpfc_topo_to_str[phba->cfg_topology]); - } } /** @@ -10146,7 +10130,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) forced_link_speed = bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); if (forced_link_speed) { - phba->hba_flag |= HBA_FORCED_LINK_SPEED; + set_bit(HBA_FORCED_LINK_SPEED, &phba->hba_flag); switch (forced_link_speed) { case LINK_SPEED_1G: @@ -10452,6 +10436,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) struct lpfc_vector_map_info *cpup; struct lpfc_vector_map_info *eqcpup; struct lpfc_eq_intr_info *eqi; + u32 wqesize; /* * Create HBA Record arrays. @@ -10671,9 +10656,15 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) * Create ELS Work Queues */ - /* Create slow-path ELS Work Queue */ + /* + * Create slow-path ELS Work Queue. + * Increase the ELS WQ size when WQEs contain an embedded cdb + */ + wqesize = (phba->fcp_embed_io) ? + LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, - phba->sli4_hba.wq_esize, + wqesize, phba->sli4_hba.wq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -11120,14 +11111,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.fw_func_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; - phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; - phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; phba->sli4_hba.physical_port = mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " - "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, - phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); + "3251 QUERY_FW_CFG: func_mode:x%x\n", + phba->sli4_hba.fw_func_mode); mempool_free(mboxq, phba->mbox_mem_pool); @@ -12241,7 +12229,7 @@ lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); if (retval) return intr_mode; - phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; + clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); if (cfg_mode == 2) { /* Now, try to enable MSI-X interrupt mode */ @@ -12776,7 +12764,7 @@ static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) * timer. Wait for the poll timer to retire. */ synchronize_rcu(); - del_timer_sync(&phba->cpuhp_poll_timer); + timer_delete_sync(&phba->cpuhp_poll_timer); } static void lpfc_cpuhp_remove(struct lpfc_hba *phba) @@ -12887,7 +12875,7 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) if (offline) { /* Find next online CPU on original mask */ - cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); + cpu_next = cpumask_next_wrap(cpu, orig_mask); cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); /* Found a valid CPU */ @@ -13184,6 +13172,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) eqhdl = lpfc_get_eq_hdl(0); rc = pci_irq_vector(phba->pcidev, 0); if (rc < 0) { + free_irq(phba->pcidev->irq, phba); pci_free_irq_vectors(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0496 MSI pci_irq_vec failed (%d)\n", rc); @@ -13264,6 +13253,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) eqhdl = lpfc_get_eq_hdl(0); retval = pci_irq_vector(phba->pcidev, 0); if (retval < 0) { + free_irq(phba->pcidev->irq, phba); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0502 INTR pci_irq_vec failed (%d)\n", retval); @@ -13512,6 +13502,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) /* Disable FW logging to host memory */ lpfc_ras_stop_fwlog(phba); + lpfc_sli4_queue_unset(phba); + /* Reset SLI4 HBA FCoE function */ lpfc_pci_function_reset(phba); @@ -13877,12 +13869,7 @@ fcponly: if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; - rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "6400 Can't set dma maximum segment size\n"); - return rc; - } + dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len); /* * Check whether the adapter supports an embedded copy of the @@ -14812,6 +14799,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_unset_pci_mem_s4; } + spin_lock_init(&phba->rrq_list_lock); INIT_LIST_HEAD(&phba->active_rrq_list); INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); @@ -15528,7 +15516,7 @@ lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; if (phba->link_state == LPFC_HBA_ERROR && - phba->hba_flag & HBA_IOQ_FLUSH) + test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) return PCI_ERS_RESULT_NEED_RESET; switch (phba->pci_dev_grp) { diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index e98f1c2b2220..fb6dbcb86c09 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -2524,8 +2524,10 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); /* addr mode is bit wise inverted value of fcf addr_mode */ - bf_set(lpfc_reg_fcfi_mam, reg_fcfi, - (~phba->fcf.addr_mode) & 0x3); + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { + bf_set(lpfc_reg_fcfi_mam, reg_fcfi, + (~phba->fcf.addr_mode) & 0x3); + } } else { /* This is ONLY for NVMET MRQ == 1 */ if (phba->cfg_nvmet_mrq != 1) diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index c4172791c267..a596b80d03d4 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -47,14 +47,23 @@ #include "lpfc_debugfs.h" +/* Called to clear RSCN discovery flags when driver is unloading. */ +static bool +lpfc_check_unload_and_clr_rscn(unsigned long *fc_flag) +{ + /* If unloading, then clear the FC_RSCN_DEFERRED flag */ + if (test_bit(FC_UNLOADING, fc_flag)) { + clear_bit(FC_RSCN_DEFERRED, fc_flag); + return false; + } + return test_bit(FC_RSCN_DEFERRED, fc_flag); +} + /* Called to verify a rcv'ed ADISC was intended for us. */ static int lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_name *nn, struct lpfc_name *pn) { - /* First, we MUST have a RPI registered */ - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) - return 0; /* Compare the ADISC rsp WWNN / WWPN matches our internal node * table entry for that node. @@ -213,8 +222,10 @@ void lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(abort_list); + LIST_HEAD(drv_cmpl_list); struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; + int retval = 0; pring = lpfc_phba_elsring(phba); @@ -225,7 +236,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) /* Abort outstanding I/O on NPort <nlp_DID> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, "2819 Abort outstanding I/O on NPort x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); /* Clean up all fabric IOs first.*/ @@ -250,11 +261,20 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) /* Abort the targeted IOs and remove them from the abort list. */ list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) { - spin_lock_irq(&phba->hbalock); - list_del_init(&iocb->dlist); - lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); - spin_unlock_irq(&phba->hbalock); + spin_lock_irq(&phba->hbalock); + list_del_init(&iocb->dlist); + retval = lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); + spin_unlock_irq(&phba->hbalock); + + if (retval && test_bit(FC_UNLOADING, &phba->pport->load_flag)) { + list_del_init(&iocb->list); + list_add_tail(&iocb->list, &drv_cmpl_list); + } } + + lpfc_sli_cancel_iocbs(phba, &drv_cmpl_list, IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); + /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); @@ -317,7 +337,7 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox) /* Now process the REG_RPI cmpl */ lpfc_mbx_cmpl_reg_login(phba, login_mbox); - ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; + clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); kfree(save_iocb); } @@ -381,7 +401,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0114 PLOGI chkparm OK Data: x%x x%x x%x " + "0114 PLOGI chkparm OK Data: x%x x%x x%lx " "x%x x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi, vport->port_state, @@ -406,7 +426,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* if already logged in, do implicit logout */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) break; fallthrough; case NLP_STE_REG_LOGIN_ISSUE: @@ -426,7 +446,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; - ndlp->nlp_flag &= ~NLP_FIRSTBURST; + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); @@ -457,7 +477,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; - ndlp->nlp_flag &= ~NLP_FIRSTBURST; + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); login_mbox = NULL; link_mbox = NULL; @@ -481,7 +501,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * must have ACCed the remote NPorts FLOGI to us * to make it here. */ - if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) + if (test_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag)) lpfc_els_abort_flogi(phba); ed_tov = be32_to_cpu(sp->cmn.e_d_tov); @@ -529,13 +549,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_can_disctmo(vport); } - ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && sp->cmn.valid_vendor_ver_level) { vid = be32_to_cpu(sp->un.vv.vid); flag = be32_to_cpu(sp->un.vv.flags); if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) - ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + set_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); } login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -604,10 +624,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * this ELS request. The only way to do this is * to register, then unregister the RPI. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN | - NLP_RCV_PLOGI); - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag); + set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); } stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; @@ -642,9 +661,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, login_mbox->ctx_u.save_iocb = save_iocb; /* For PLOGI ACC */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); /* Start the ball rolling by issuing REG_LOGIN here */ rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT); @@ -714,6 +732,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ADISC *ap; uint32_t *lp; uint32_t cmd; + int rc; pcmd = cmdiocb->cmd_dmabuf; lp = (uint32_t *) pcmd->virt; @@ -738,21 +757,29 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * resume the RPI before the ACC goes out. */ if (vport->phba->sli_rev == LPFC_SLI_REV4) { - elsiocb = kmalloc(sizeof(struct lpfc_iocbq), - GFP_KERNEL); - if (elsiocb) { - /* Save info from cmd IOCB used in rsp */ - memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb, - sizeof(struct lpfc_iocbq)); - - /* Save the ELS cmd */ - elsiocb->drvrTimeout = cmd; - - if (lpfc_sli4_resume_rpi(ndlp, - lpfc_mbx_cmpl_resume_rpi, - elsiocb)) - kfree(elsiocb); - goto out; + /* Don't resume an unregistered RPI - unnecessary + * mailbox. Just send the ACC when the RPI is not + * registered. + */ + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { + elsiocb = kmalloc(sizeof(*elsiocb), GFP_KERNEL); + if (elsiocb) { + /* Save info from cmd IOCB used in + * rsp + */ + memcpy(elsiocb, cmdiocb, + sizeof(*elsiocb)); + + elsiocb->drvrTimeout = cmd; + + rc = lpfc_sli4_resume_rpi(ndlp, + lpfc_mbx_cmpl_resume_rpi, + elsiocb); + if (rc) + kfree(elsiocb); + + goto out; + } } } @@ -774,7 +801,7 @@ out: */ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && - !(ndlp->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } @@ -791,12 +818,9 @@ out: /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return 0; } @@ -812,9 +836,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary * PLOGIs during LOGO storms from a device. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); if (els_cmd == ELS_CMD_PRLO) lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else @@ -867,9 +889,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { @@ -891,15 +911,13 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (ndlp->nlp_state >= NLP_STE_ADISC_ISSUE || ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) { mod_timer(&ndlp->nlp_delayfunc, - jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + jiffies + secs_to_jiffies(1)); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, "3204 Start nlpdelay on DID x%06x " - "nflag x%x lastels x%x ref cnt %u", + "nflag x%lx lastels x%x ref cnt %u", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_last_elscmd, kref_read(&ndlp->kref)); @@ -912,9 +930,7 @@ out: ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); /* The driver has to wait until the ACC completes before it continues * processing the LOGO. The action will resume in * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an @@ -955,7 +971,7 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport, out: lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "6115 Rcv PRLI (%x) check failed: ndlp rpi %d " - "state x%x flags x%x port_type: x%x " + "state x%x flags x%lx port_type: x%x " "npr->initfcn: x%x npr->tgtfcn: x%x\n", cmd, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag, vport->port_type, @@ -997,7 +1013,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->prliType == PRLI_NVME_TYPE) ndlp->nlp_type |= NLP_NVME_TARGET; if (npr->writeXferRdyDis) - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); } if (npr->Retry && ndlp->nlp_type & (NLP_FCP_INITIATOR | NLP_FCP_TARGET)) @@ -1034,7 +1050,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, roles |= FC_RPORT_ROLE_FCP_TARGET; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport rolechg: role:x%x did:x%x flg:x%x", + "rport rolechg: role:x%x did:x%x flg:x%lx", roles, ndlp->nlp_DID, ndlp->nlp_flag); if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) @@ -1045,10 +1061,8 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, static uint32_t lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); return 0; } @@ -1058,16 +1072,12 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) (test_bit(FC_RSCN_MODE, &vport->fc_flag) || ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && (ndlp->nlp_type & NLP_FCP_TARGET)))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); return 1; } } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); return 0; } @@ -1092,10 +1102,10 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, /* If there is already an UNREG in progress for this ndlp, * no need to queue up another one. */ - if (ndlp->nlp_flag & NLP_UNREG_INP) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1435 release_rpi SKIP UNREG x%x on " - "NPort x%x deferred x%x flg x%x " + "NPort x%x deferred x%x flg x%lx " "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, @@ -1120,11 +1130,11 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))) - ndlp->nlp_flag |= NLP_UNREG_INP; + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1437 release_rpi UNREG x%x " - "on NPort x%x flg x%x\n", + "on NPort x%x flg x%lx\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); @@ -1152,7 +1162,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0271 Illegal State Transition: node x%x " - "event x%x, state x%x Data: x%x x%x\n", + "event x%x, state x%x Data: x%x x%lx\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); return ndlp->nlp_state; @@ -1167,13 +1177,12 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * working on the same NPortID, do nothing for this thread * to stop it. */ - if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { + if (!test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0272 Illegal State Transition: node x%x " - "event x%x, state x%x Data: x%x x%x\n", + "event x%x, state x%x Data: x%x x%lx\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); - } return ndlp->nlp_state; } @@ -1207,9 +1216,7 @@ lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; @@ -1267,11 +1274,9 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, NULL); } else { if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) && - (vport->num_disc_nodes)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && + vport->num_disc_nodes) { + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* Check if there are more PLOGIs to be sent */ lpfc_more_plogi(vport); if (vport->num_disc_nodes == 0) { @@ -1332,10 +1337,8 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1)); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -1366,7 +1369,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { + if (test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) { /* Recovery from PLOGI collision logic */ return ndlp->nlp_state; } @@ -1395,7 +1398,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, goto out; /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + "0121 PLOGI chkparm OK Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) @@ -1423,14 +1426,14 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ed_tov = (phba->fc_edtov + 999999) / 1000000; } - ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && sp->cmn.valid_vendor_ver_level) { vid = be32_to_cpu(sp->un.vv.vid); flag = be32_to_cpu(sp->un.vv.flags); if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) - ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + set_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); } /* @@ -1453,7 +1456,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, LOG_TRACE_EVENT, "0133 PLOGI: no memory " "for config_link " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; @@ -1477,7 +1480,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0018 PLOGI: no memory for reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; @@ -1497,7 +1500,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; break; default: - ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; } @@ -1512,8 +1515,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, NLP_STE_REG_LOGIN_ISSUE); return ndlp->nlp_state; } - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); /* decrement node reference count to the failed mbox * command */ @@ -1521,7 +1523,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0134 PLOGI: cannot issue reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } else { @@ -1529,7 +1531,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0135 PLOGI: cannot format reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } @@ -1582,18 +1584,15 @@ static uint32_t lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding PLOGI */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -1604,10 +1603,8 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, { struct lpfc_hba *phba = vport->phba; - /* Don't do anything that will mess up processing of the - * previous RSCN. - */ - if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) + /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */ + if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag)) return ndlp->nlp_state; /* software abort outstanding PLOGI */ @@ -1615,9 +1612,8 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); return ndlp->nlp_state; } @@ -1635,10 +1631,7 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, cmdiocb = (struct lpfc_iocbq *) arg; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + if (test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { if (vport->num_disc_nodes) lpfc_more_adisc(vport); } @@ -1727,9 +1720,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; @@ -1768,18 +1759,15 @@ static uint32_t lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding ADISC */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding ADISC */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -1790,10 +1778,8 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, { struct lpfc_hba *phba = vport->phba; - /* Don't do anything that will mess up processing of the - * previous RSCN. - */ - if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) + /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */ + if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag)) return ndlp->nlp_state; /* software abort outstanding ADISC */ @@ -1801,9 +1787,8 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -1837,7 +1822,7 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, * transition to UNMAPPED provided the RPI has completed * registration. */ - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { lpfc_rcv_prli(vport, ndlp, cmdiocb); lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); } else { @@ -1876,7 +1861,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, if ((mb = phba->sli.mbox_active)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == mb->ctx_ndlp)) { - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); mb->ctx_ndlp = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -1887,7 +1872,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == mb->ctx_ndlp)) { - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); list_del(&mb->list); phba->sli.mboxq_cnt--; @@ -1956,10 +1941,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, - jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + jiffies + secs_to_jiffies(1)); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_issue_els_logo(vport, ndlp, 0); @@ -1970,7 +1953,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); /* Only if we are not a fabric nport do we issue PRLI */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, @@ -2042,15 +2025,12 @@ lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -2059,25 +2039,22 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, void *arg, uint32_t evt) { - /* Don't do anything that will mess up processing of the - * previous RSCN. - */ - if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) + /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */ + if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag)) return ndlp->nlp_state; ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); /* If we are a target we won't immediately transition into PRLI, * so if REG_LOGIN already completed we don't need to ignore it. */ - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) || + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) || !vport->phba->nvmet_support) - ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; + set_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2211,7 +2188,8 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; if (npr->writeXferRdyDis) - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, + &ndlp->nlp_flag); } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; @@ -2255,7 +2233,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Both sides support FB. The target's first * burst size is a 512 byte encoded value. */ - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz, nvpr); @@ -2270,7 +2248,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6029 NVME PRLI Cmpl w1 x%08x " - "w4 x%08x w5 x%08x flag x%x, " + "w4 x%08x w5 x%08x flag x%lx, " "fcp_info x%x nlp_type x%x\n", be32_to_cpu(nvpr->word1), be32_to_cpu(nvpr->word4), @@ -2282,13 +2260,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (vport->port_type == LPFC_NPIV_PORT) && vport->cfg_restrict_login) { out: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_TARGET_REMOVE; - spin_unlock_irq(&ndlp->lock); + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_DISCOVERY | LOG_NODE, + "6228 Sending LOGO, determined nlp_type " + "0x%x nlp_flag x%lx refcnt %u\n", + ndlp->nlp_type, ndlp->nlp_flag, + kref_read(&ndlp->kref)); lpfc_issue_els_logo(vport, ndlp, 0); - - ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } @@ -2336,18 +2314,15 @@ static uint32_t lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding PLOGI */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } @@ -2375,10 +2350,8 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport, { struct lpfc_hba *phba = vport->phba; - /* Don't do anything that will mess up processing of the - * previous RSCN. - */ - if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) + /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */ + if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag)) return ndlp->nlp_state; /* software abort outstanding PRLI */ @@ -2386,9 +2359,8 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2427,9 +2399,7 @@ lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } @@ -2468,9 +2438,8 @@ lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2576,8 +2545,9 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport, { ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); lpfc_disc_set_adisc(vport, ndlp); @@ -2637,8 +2607,24 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* flush the target */ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); - /* Treat like rcv logo */ - lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); + /* Send PRLO_ACC */ + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); + lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); + + /* Save ELS_CMD_PRLO as the last elscmd and then set to NPR. + * lpfc_cmpl_els_logo_acc is expected to restart discovery. + */ + ndlp->nlp_last_elscmd = ELS_CMD_PRLO; + ndlp->nlp_prev_state = ndlp->nlp_state; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, + "3422 DID x%06x nflag x%lx lastels x%x ref cnt %u\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_last_elscmd, + kref_read(&ndlp->kref)); + + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + return ndlp->nlp_state; } @@ -2652,8 +2638,9 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; @@ -2666,16 +2653,16 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Ignore PLOGI if we have an outstanding LOGO */ - if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) + if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) || + test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) return ndlp->nlp_state; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { lpfc_cancel_retry_delay_tmo(vport, ndlp); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); - } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + } else if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* send PLOGI immediately, move to PLOGI issue state */ - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2696,14 +2683,14 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { /* * ADISC nodes will be handled in regular discovery path after * receiving response from NS. * * For other nodes, Send PLOGI to trigger an implicit LOGO. */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2734,15 +2721,15 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * or discovery in progress for this node. Starting discovery * here will affect the counting of discovery threads. */ - if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && - !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) && + !test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* * ADISC nodes will be handled in regular discovery path after * receiving response from NS. * * For other nodes, Send PLOGI to trigger an implicit LOGO. */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2757,24 +2744,18 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { mod_timer(&ndlp->nlp_delayfunc, - jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + jiffies + secs_to_jiffies(1)); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; } else { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); } return ndlp->nlp_state; } @@ -2811,7 +2792,7 @@ lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + if (ulp_status && test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2844,7 +2825,7 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + if (ulp_status && test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2863,12 +2844,11 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, /* SLI4 ports have preallocated logical rpis. */ if (vport->phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; - if (ndlp->nlp_flag & NLP_LOGO_ACC) { + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) lpfc_unreg_rpi(vport, ndlp); - } } else { - if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { + if (test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2880,10 +2860,8 @@ static uint32_t lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; } lpfc_drop_node(vport, ndlp); @@ -2894,15 +2872,14 @@ static uint32_t lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - /* Don't do anything that will mess up processing of the - * previous RSCN. - */ - if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) + /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */ + if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag)) return ndlp->nlp_state; lpfc_cancel_retry_delay_tmo(vport, ndlp); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; @@ -3115,7 +3092,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0211 DSM in event x%x on NPort x%x in " - "state %d rpi x%x Data: x%x x%x\n", + "state %d rpi x%x Data: x%lx x%x\n", evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi, ndlp->nlp_flag, data1); @@ -3132,12 +3109,12 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ((uint32_t)ndlp->nlp_type)); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0212 DSM out state %d on NPort x%x " - "rpi x%x Data: x%x x%x\n", + "rpi x%x Data: x%lx x%x\n", rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, data1); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, - "DSM out: ste:%d did:x%x flg:x%x", + "DSM out: ste:%d did:x%x flg:x%lx", rc, ndlp->nlp_DID, ndlp->nlp_flag); /* Decrement the ndlp reference count held for this function */ lpfc_nlp_put(ndlp); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index c5792eaf3f64..a6647dd360d1 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -24,7 +24,7 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/crc-t10dif.h> #include <net/checksum.h> @@ -95,7 +95,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, vport = lport->vport; if (!vport || test_bit(FC_UNLOADING, &vport->load_flag) || - vport->phba->hba_flag & HBA_IOQ_FLUSH) + test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) return -ENODEV; qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); @@ -242,7 +242,7 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) * @phba: pointer to lpfc hba data structure. * @axchg: pointer to exchange context for the NVME LS request * - * This routine is used for processing an asychronously received NVME LS + * This routine is used for processing an asynchronously received NVME LS * request. Any remaining validation is done and the LS is then forwarded * to the nvme-fc transport via nvme_fc_rcv_ls_req(). * @@ -272,7 +272,7 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, remoteport = lpfc_rport->remoteport; if (!vport->localport || - vport->phba->hba_flag & HBA_IOQ_FLUSH) + test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) return -EINVAL; lport = vport->localport->private; @@ -569,7 +569,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_DID, ntype, nstate); return -ENODEV; } - if (vport->phba->hba_flag & HBA_IOQ_FLUSH) + if (test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) return -ENODEV; if (!vport->phba->sli4_hba.nvmels_wq) @@ -675,7 +675,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, vport = lport->vport; if (test_bit(FC_UNLOADING, &vport->load_flag) || - vport->phba->hba_flag & HBA_IOQ_FLUSH) + test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) return -ENODEV; atomic_inc(&lport->fc4NvmeLsRequests); @@ -1232,7 +1232,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, /* Word 5 */ if ((phba->cfg_nvme_enable_fb) && - (pnode->nlp_flag & NLP_FIRSTBURST)) { + test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { req_len = lpfc_ncmd->nvmeCmd->payload_length; if (req_len < pnode->nvme_fb_size) wqe->fcp_iwrite.initial_xfer_len = @@ -1568,7 +1568,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, phba = vport->phba; if ((unlikely(test_bit(FC_UNLOADING, &vport->load_flag))) || - phba->hba_flag & HBA_IOQ_FLUSH) { + test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6124 Fail IO, Driver unload\n"); atomic_inc(&lport->xmt_fcp_err); @@ -1909,24 +1909,19 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, return; } - /* Guard against IO completion being called at same time */ - spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags); - - /* If the hba is getting reset, this flag is set. It is - * cleared when the reset is complete and rings reestablished. - */ - spin_lock(&phba->hbalock); /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) { - spin_unlock(&phba->hbalock); - spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6139 Driver in reset cleanup - flushing " - "NVME Req now. hba_flag x%x\n", + "NVME Req now. hba_flag x%lx\n", phba->hba_flag); return; } + /* Guard against IO completion being called at same time */ + spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags); + spin_lock(&phba->hbalock); + nvmereq_wqe = &lpfc_nbuf->cur_iocbq; /* @@ -2236,18 +2231,20 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, struct lpfc_hba *phba = vport->phba; struct lpfc_sli4_hdw_queue *qp; int abts_scsi, abts_nvme; + u16 nvmels_cnt; /* Host transport has to clean up and confirm requiring an indefinite * wait. Print a message if a 10 second wait expires and renew the * wait. This is unexpected. */ - wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); + wait_tmo = secs_to_jiffies(LPFC_NVME_WAIT_TMO); while (true) { ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); if (unlikely(!ret)) { pending = 0; abts_scsi = 0; abts_nvme = 0; + nvmels_cnt = 0; for (i = 0; i < phba->cfg_hdw_queue; i++) { qp = &phba->sli4_hba.hdwq[i]; if (!vport->localport || !qp || !qp->io_wq) @@ -2260,6 +2257,11 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, abts_scsi += qp->abts_scsi_io_bufs; abts_nvme += qp->abts_nvme_io_bufs; } + if (phba->sli4_hba.nvmels_wq) { + pring = phba->sli4_hba.nvmels_wq->pring; + if (pring) + nvmels_cnt = pring->txcmplq_cnt; + } if (!vport->localport || test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) || phba->link_state == LPFC_HBA_ERROR || @@ -2268,10 +2270,10 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6176 Lport x%px Localport x%px wait " - "timed out. Pending %d [%d:%d]. " + "timed out. Pending %d [%d:%d:%d]. " "Renewing.\n", lport, vport->localport, pending, - abts_scsi, abts_nvme); + abts_scsi, abts_nvme, nvmels_cnt); continue; } break; @@ -2506,7 +2508,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) "6031 RemotePort Registration failed " "err: %d, DID x%06x ref %u\n", ret, ndlp->nlp_DID, kref_read(&ndlp->kref)); - lpfc_nlp_put(ndlp); + + /* Only release reference if one was taken for this request */ + if (!oldrport) + lpfc_nlp_put(ndlp); } return ret; @@ -2612,7 +2617,8 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * clear any rport state until the transport calls back. */ - if (ndlp->nlp_type & NLP_NVME_TARGET) { + if ((ndlp->nlp_type & NLP_NVME_TARGET) || + (remoteport->port_role & FC_PORT_ROLE_NVME_TARGET)) { /* No concern about the role change on the nvme remoteport. * The transport will update it. */ @@ -2649,14 +2655,11 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * reference. Check if another thread has set * NLP_DROPPED. */ - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irq(&ndlp->lock); + if (!test_and_set_bit(NLP_DROPPED, + &ndlp->nlp_flag)) { lpfc_nlp_put(ndlp); return; } - spin_unlock_irq(&ndlp->lock); } } } @@ -2846,3 +2849,43 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn); #endif } + +/** + * lpfc_nvmels_flush_cmd - Clean up outstanding nvmels commands for a port + * @phba: Pointer to HBA context object. + * + **/ +void +lpfc_nvmels_flush_cmd(struct lpfc_hba *phba) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + LIST_HEAD(cancel_list); + struct lpfc_sli_ring *pring = NULL; + struct lpfc_iocbq *piocb, *tmp_iocb; + unsigned long iflags; + + if (phba->sli4_hba.nvmels_wq) + pring = phba->sli4_hba.nvmels_wq->pring; + + if (unlikely(!pring)) + return; + + spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock(&pring->ring_lock); + list_splice_init(&pring->txq, &cancel_list); + pring->txq_cnt = 0; + list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { + if (piocb->cmd_flag & LPFC_IO_NVME_LS) { + list_move_tail(&piocb->list, &cancel_list); + pring->txcmplq_cnt--; + piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; + } + } + spin_unlock(&pring->ring_lock); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + if (!list_empty(&cancel_list)) + lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); +#endif +} diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 561ced5503c6..fba2e62027b7 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -24,7 +24,7 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/crc-t10dif.h> #include <net/checksum.h> @@ -1363,6 +1363,16 @@ lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport, atomic_inc(&lpfc_nvmet->xmt_ls_abort); } +static int +lpfc_nvmet_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn) +{ + struct lpfc_nodelist *ndlp = hosthandle; + + *wwnn = wwn_to_u64(ndlp->nlp_nodename.u.wwn); + *wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn); + return 0; +} + static void lpfc_nvmet_host_release(void *hosthandle) { @@ -1413,6 +1423,7 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = { .ls_req = lpfc_nvmet_ls_req, .ls_abort = lpfc_nvmet_ls_abort, .host_release = lpfc_nvmet_host_release, + .host_traddr = lpfc_nvmet_host_traddr, .max_hw_queues = 1, .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, @@ -1811,7 +1822,9 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, ctxp->flag &= ~LPFC_NVME_XBUSY; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + spin_lock_irqsave(&phba->rrq_list_lock, iflag); rrq_empty = list_empty(&phba->active_rrq_list); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflag); ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); if (ndlp && (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || @@ -2129,7 +2142,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) * @phba: pointer to lpfc hba data structure. * @axchg: pointer to exchange context for the NVME LS request * - * This routine is used for processing an asychronously received NVME LS + * This routine is used for processing an asynchronously received NVME LS * request. Any remaining validation is done and the LS is then forwarded * to the nvmet-fc transport via nvmet_fc_rcv_ls_req(). * @@ -2841,7 +2854,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { - if (ndlp->nlp_flag & NLP_SUPPRESS_RSP) + if (test_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag)) bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); } else { @@ -3393,14 +3406,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, /* If the hba is getting reset, this flag is set. It is * cleared when the reset is complete and rings reestablished. */ - spin_lock_irqsave(&phba->hbalock, flags); /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) { - spin_unlock_irqrestore(&phba->hbalock, flags); + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6163 Driver in reset cleanup - flushing " - "NVME Req now. hba_flag x%x oxid x%x\n", + "NVME Req now. hba_flag x%lx oxid x%x\n", phba->hba_flag, ctxp->oxid); lpfc_sli_release_iocbq(phba, abts_wqeq); spin_lock_irqsave(&ctxp->ctxlock, flags); @@ -3409,6 +3420,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, return 0; } + spin_lock_irqsave(&phba->hbalock, flags); /* Outstanding abort is in progress */ if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) { spin_unlock_irqrestore(&phba->hbalock, flags); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 4a6e5223a224..9edf80b14b1a 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -25,7 +25,7 @@ #include <linux/interrupt.h> #include <linux/export.h> #include <linux/delay.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/t10-pi.h> #include <linux/crc-t10dif.h> #include <linux/blk-cgroup.h> @@ -474,9 +474,11 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, ndlp = psb->rdata->pnode; else ndlp = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflag); + spin_lock_irqsave(&phba->rrq_list_lock, iflag); rrq_empty = list_empty(&phba->active_rrq_list); - spin_unlock_irqrestore(&phba->hbalock, iflag); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflag); if (ndlp && !offline) { lpfc_set_rrq_active(phba, ndlp, psb->cur_iocbq.sli4_lxritag, rxid, 1); @@ -598,7 +600,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, { struct lpfc_io_buf *lpfc_cmd; struct lpfc_sli4_hdw_queue *qp; - struct sli4_sge *sgl; + struct sli4_sge_le *sgl; dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_fcp_cmd; uint32_t cpu, idx; @@ -649,23 +651,23 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, * The balance are sg list bdes. Initialize the * first two and leave the rest for queuecommand. */ - sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl; pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); - sgl->word2 = le32_to_cpu(sgl->word2); - bf_set(lpfc_sli4_sge_last, sgl, 0); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); + bf_set_le32(lpfc_sli4_sge_last, sgl, 0); + if (cmnd && cmnd->cmd_len > LPFC_FCP_CDB_LEN) + sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd32)); + else + sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); + sgl++; /* Setup the physical region for the FCP RSP */ - pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); + pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd32); sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); - sgl->word2 = le32_to_cpu(sgl->word2); - bf_set(lpfc_sli4_sge_last, sgl, 1); - sgl->word2 = cpu_to_le32(sgl->word2); + bf_set_le32(lpfc_sli4_sge_last, sgl, 1); sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); if (lpfc_ndlp_check_qdepth(phba, ndlp)) { @@ -2606,7 +2608,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, iocb_cmd->ulpLe = 1; fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); - fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); + fcp_cmnd->fcpDl = cpu_to_be32(fcpdl); /* * Due to difference in data length between DIF/non-DIF paths, @@ -3223,14 +3225,18 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) * explicitly reinitialized. * all iocb memory resources are reused. */ - fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); + if (scsi_cmnd->cmd_len > LPFC_FCP_CDB_LEN) + ((struct fcp_cmnd32 *)fcp_cmnd)->fcpDl = + cpu_to_be32(scsi_bufflen(scsi_cmnd)); + else + fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); /* Set first-burst provided it was successfully negotiated */ - if (!(phba->hba_flag & HBA_FCOE_MODE) && + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && vport->cfg_first_burst_size && scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { u32 init_len, total_len; - total_len = be32_to_cpu(fcp_cmnd->fcpDl); + total_len = scsi_bufflen(scsi_cmnd); init_len = min(total_len, vport->cfg_first_burst_size); /* Word 4 & 5 */ @@ -3418,15 +3424,18 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); - fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); + if (lpfc_cmd->pCmd->cmd_len > LPFC_FCP_CDB_LEN) + ((struct fcp_cmnd32 *)fcp_cmnd)->fcpDl = cpu_to_be32(fcpdl); + else + fcp_cmnd->fcpDl = cpu_to_be32(fcpdl); /* Set first-burst provided it was successfully negotiated */ - if (!(phba->hba_flag & HBA_FCOE_MODE) && + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && vport->cfg_first_burst_size && scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { u32 init_len, total_len; - total_len = be32_to_cpu(fcp_cmnd->fcpDl); + total_len = fcpdl; init_len = min(total_len, vport->cfg_first_burst_size); /* Word 4 & 5 */ @@ -3434,8 +3443,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, wqe->fcp_iwrite.total_xfer_len = total_len; } else { /* Word 4 */ - wqe->fcp_iwrite.total_xfer_len = - be32_to_cpu(fcp_cmnd->fcpDl); + wqe->fcp_iwrite.total_xfer_len = fcpdl; } /* @@ -3892,7 +3900,10 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, fcprsp->rspInfo3); scsi_set_resid(cmnd, 0); - fcpDl = be32_to_cpu(fcpcmd->fcpDl); + if (cmnd->cmd_len > LPFC_FCP_CDB_LEN) + fcpDl = be32_to_cpu(((struct fcp_cmnd32 *)fcpcmd)->fcpDl); + else + fcpDl = be32_to_cpu(fcpcmd->fcpDl); if (resp_info & RESID_UNDER) { scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); @@ -4618,7 +4629,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; if (vport->cfg_first_burst_size && - (pnode->nlp_flag & NLP_FIRSTBURST)) { + test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { u32 xrdy_len; fcpdl = scsi_bufflen(scsi_cmnd); @@ -4721,6 +4732,14 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_NONE); } + + /* Additional fcp cdb length field calculation. + * LPFC_FCP_CDB_LEN_32 - normal 16 byte cdb length, + * then divide by 4 for the word count. + * shift 2 because of the RDDATA/WRDATA. + */ + if (scsi_cmnd->cmd_len > LPFC_FCP_CDB_LEN) + fcp_cmnd->fcpCntl3 |= 4 << 2; } else { /* From the icmnd template, initialize words 4 - 11 */ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], @@ -4796,7 +4815,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, int_to_scsilun(lpfc_cmd->pCmd->device->lun, &lpfc_cmd->fcp_cmnd->fcp_lun); - ptr = &fcp_cmnd->fcpCdb[0]; + ptr = &((struct fcp_cmnd32 *)fcp_cmnd)->fcpCdb[0]; memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { ptr += scsi_cmnd->cmd_len; @@ -5041,7 +5060,7 @@ lpfc_check_pci_resettable(struct lpfc_hba *phba) /* Check for valid Emulex Device ID */ if (phba->sli_rev != LPFC_SLI_REV4 || - phba->hba_flag & HBA_FCOE_MODE) { + test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8347 Incapable PCI reset device: " "0x%04x\n", ptr->device); @@ -5117,6 +5136,12 @@ lpfc_info(struct Scsi_Host *host) goto buffer_done; } + /* Support for BSG ioctls */ + scnprintf(tmp, sizeof(tmp), " BSG"); + if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= + sizeof(lpfcinfobuf)) + goto buffer_done; + /* PCI resettable */ if (!lpfc_check_pci_resettable(phba)) { scnprintf(tmp, sizeof(tmp), " PCI resettable"); @@ -5327,7 +5352,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) cmnd->cmnd[0], scsi_prot_ref_tag(cmnd), scsi_logical_block_count(cmnd), - (cmnd->cmnd[1]>>5)); + scsi_get_prot_type(cmnd)); } err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { @@ -5463,7 +5488,7 @@ void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) struct lpfc_vmid *cur; if (vport->port_type == LPFC_PHYSICAL_PORT) - del_timer_sync(&vport->phba->inactive_vmid_poll); + timer_delete_sync(&vport->phba->inactive_vmid_poll); kfree(vport->qfpa_res); kfree(vport->vmid_priority.vmid_range); @@ -5518,7 +5543,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) spin_lock(&phba->hbalock); /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) { + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3168 SCSI Layer abort requested I/O has been " "flushed by LLD.\n"); @@ -5536,11 +5561,20 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) iocb = &lpfc_cmd->cur_iocbq; if (phba->sli_rev == LPFC_SLI_REV4) { - pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; - if (!pring_s4) { + /* if the io_wq & pring are gone, the port was reset. */ + if (!phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq || + !phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "2877 SCSI Layer I/O Abort Request " + "IO CMPL Status x%x ID %d LUN %llu " + "HBA_SETUP %d\n", FAILED, + cmnd->device->id, + (u64)cmnd->device->lun, + test_bit(HBA_SETUP, &phba->hba_flag)); ret = FAILED; goto out_unlock_hba; } + pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; spin_lock(&pring_s4->ring_lock); } /* the command is in process of being cancelled */ @@ -5611,9 +5645,8 @@ wait_for_cmpl: * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait * for abort to complete. */ - wait_event_timeout(waitq, - (lpfc_cmd->pCmd != cmnd), - msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); + wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), + secs_to_jiffies(2*vport->cfg_devloss_tmo)); spin_lock(&lpfc_cmd->buf_lock); @@ -5801,7 +5834,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue %s to TGT %d LUN %llu " - "rpi x%x nlp_flag x%x Data: x%x x%x\n", + "rpi x%x nlp_flag x%lx Data: x%x x%x\n", lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, iocbq->cmd_flag); @@ -5877,7 +5910,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport) * If target is not in a MAPPED state, delay until * target is rediscovered or devloss timeout expires. */ - later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies; while (time_after(later, jiffies)) { if (!pnode) return FAILED; @@ -5923,7 +5956,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, lpfc_sli_abort_taskmgmt(vport, &phba->sli.sli3_ring[LPFC_FCP_RING], tgt_id, lun_id, context); - later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies; while (time_after(later, jiffies) && cnt) { schedule_timeout_uninterruptible(msecs_to_jiffies(20)); cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); @@ -6066,8 +6099,8 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0722 Target Reset rport failure: rdata x%px\n", rdata); if (pnode) { + clear_bit(NLP_NPR_ADISC, &pnode->nlp_flag); spin_lock_irqsave(&pnode->lock, flags); - pnode->nlp_flag &= ~NLP_NPR_ADISC; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; spin_unlock_irqrestore(&pnode->lock, flags); } @@ -6092,31 +6125,27 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) /* Issue LOGO, if no LOGO is outstanding */ spin_lock_irqsave(&pnode->lock, flags); - if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) && + if (!test_bit(NLP_WAIT_FOR_LOGO, &pnode->save_flags) && !pnode->logo_waitq) { pnode->logo_waitq = &waitq; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - pnode->nlp_flag |= NLP_ISSUE_LOGO; - pnode->save_flags |= NLP_WAIT_FOR_LOGO; spin_unlock_irqrestore(&pnode->lock, flags); + set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag); + set_bit(NLP_WAIT_FOR_LOGO, &pnode->save_flags); lpfc_unreg_rpi(vport, pnode); wait_event_timeout(waitq, - (!(pnode->save_flags & - NLP_WAIT_FOR_LOGO)), - msecs_to_jiffies(dev_loss_tmo * - 1000)); + !test_bit(NLP_WAIT_FOR_LOGO, + &pnode->save_flags), + secs_to_jiffies(dev_loss_tmo)); - if (pnode->save_flags & NLP_WAIT_FOR_LOGO) { + if (test_and_clear_bit(NLP_WAIT_FOR_LOGO, + &pnode->save_flags)) lpfc_printf_vlog(vport, KERN_ERR, logit, "0725 SCSI layer TGTRST " "failed & LOGO TMO (%d, %llu) " "return x%x\n", tgt_id, lun_id, status); - spin_lock_irqsave(&pnode->lock, flags); - pnode->save_flags &= ~NLP_WAIT_FOR_LOGO; - } else { - spin_lock_irqsave(&pnode->lock, flags); - } + spin_lock_irqsave(&pnode->lock, flags); pnode->logo_waitq = NULL; spin_unlock_irqrestore(&pnode->lock, flags); status = SUCCESS; @@ -6198,7 +6227,7 @@ error: } /** - * lpfc_slave_alloc - scsi_host_template slave_alloc entry point + * lpfc_sdev_init - scsi_host_template sdev_init entry point * @sdev: Pointer to scsi_device. * * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's @@ -6211,7 +6240,7 @@ error: * 0 - Success **/ static int -lpfc_slave_alloc(struct scsi_device *sdev) +lpfc_sdev_init(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; @@ -6314,8 +6343,9 @@ lpfc_slave_alloc(struct scsi_device *sdev) } /** - * lpfc_slave_configure - scsi_host_template slave_configure entry point + * lpfc_sdev_configure - scsi_host_template sdev_configure entry point * @sdev: Pointer to scsi_device. + * @lim: Request queue limits. * * This routine configures following items * - Tag command queuing support for @sdev if supported. @@ -6325,7 +6355,7 @@ lpfc_slave_alloc(struct scsi_device *sdev) * 0 - Success **/ static int -lpfc_slave_configure(struct scsi_device *sdev) +lpfc_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; @@ -6343,13 +6373,13 @@ lpfc_slave_configure(struct scsi_device *sdev) } /** - * lpfc_slave_destroy - slave_destroy entry point of SHT data structure + * lpfc_sdev_destroy - sdev_destroy entry point of SHT data structure * @sdev: Pointer to scsi_device. * * This routine sets @sdev hostatdata filed to null. **/ static void -lpfc_slave_destroy(struct scsi_device *sdev) +lpfc_sdev_destroy(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; @@ -6394,7 +6424,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, { struct lpfc_device_data *lun_info; - int memory_flags; + gfp_t memory_flags; if (unlikely(!phba) || !vport_wwpn || !target_wwpn || !(phba->cfg_fof)) @@ -6709,7 +6739,13 @@ lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) } static int -lpfc_no_slave(struct scsi_device *sdev) +lpfc_init_no_sdev(struct scsi_device *sdev) +{ + return -ENODEV; +} + +static int +lpfc_config_no_sdev(struct scsi_device *sdev, struct queue_limits *lim) { return -ENODEV; } @@ -6720,8 +6756,8 @@ struct scsi_host_template lpfc_template_nvme = { .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_no_command, - .slave_alloc = lpfc_no_slave, - .slave_configure = lpfc_no_slave, + .sdev_init = lpfc_init_no_sdev, + .sdev_configure = lpfc_config_no_sdev, .scan_finished = lpfc_scan_finished, .this_id = -1, .sg_tablesize = 1, @@ -6744,9 +6780,9 @@ struct scsi_host_template lpfc_template = { .eh_device_reset_handler = lpfc_device_reset_handler, .eh_target_reset_handler = lpfc_target_reset_handler, .eh_host_reset_handler = lpfc_host_reset_handler, - .slave_alloc = lpfc_slave_alloc, - .slave_configure = lpfc_slave_configure, - .slave_destroy = lpfc_slave_destroy, + .sdev_init = lpfc_sdev_init, + .sdev_configure = lpfc_sdev_configure, + .sdev_destroy = lpfc_sdev_destroy, .scan_finished = lpfc_scan_finished, .this_id = -1, .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, @@ -6771,9 +6807,9 @@ struct scsi_host_template lpfc_vport_template = { .eh_target_reset_handler = lpfc_target_reset_handler, .eh_bus_reset_handler = NULL, .eh_host_reset_handler = NULL, - .slave_alloc = lpfc_slave_alloc, - .slave_configure = lpfc_slave_configure, - .slave_destroy = lpfc_slave_destroy, + .sdev_init = lpfc_sdev_init, + .sdev_configure = lpfc_sdev_configure, + .sdev_destroy = lpfc_sdev_destroy, .scan_finished = lpfc_scan_finished, .this_id = -1, .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index eae56944f31b..a05d203e4777 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -24,6 +24,7 @@ struct lpfc_hba; #define LPFC_FCP_CDB_LEN 16 +#define LPFC_FCP_CDB_LEN_32 32 #define list_remove_head(list, entry, type, member) \ do { \ @@ -99,17 +100,11 @@ struct fcp_rsp { #define SNSCOD_BADCMD 0x20 /* sense code is byte 13 ([12]) */ }; -struct fcp_cmnd { - struct scsi_lun fcp_lun; - - uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */ - uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */ #define SIMPLE_Q 0x00 #define HEAD_OF_Q 0x01 #define ORDERED_Q 0x02 #define ACA_Q 0x04 #define UNTAGGED 0x05 - uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */ #define FCP_ABORT_TASK_SET 0x02 /* Bit 1 */ #define FCP_CLEAR_TASK_SET 0x04 /* bit 2 */ #define FCP_BUS_RESET 0x08 /* bit 3 */ @@ -117,12 +112,31 @@ struct fcp_cmnd { #define FCP_TARGET_RESET 0x20 /* bit 5 */ #define FCP_CLEAR_ACA 0x40 /* bit 6 */ #define FCP_TERMINATE_TASK 0x80 /* bit 7 */ - uint8_t fcpCntl3; #define WRITE_DATA 0x01 /* Bit 0 */ #define READ_DATA 0x02 /* Bit 1 */ +struct fcp_cmnd { + struct scsi_lun fcp_lun; + + uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */ + uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */ + uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */ + uint8_t fcpCntl3; + uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */ - uint32_t fcpDl; /* Total transfer length */ + __be32 fcpDl; /* Total transfer length */ + +}; +struct fcp_cmnd32 { + struct scsi_lun fcp_lun; + + uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */ + uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */ + uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */ + uint8_t fcpCntl3; + + uint8_t fcpCdb[LPFC_FCP_CDB_LEN_32]; /* SRB cdb field is copied here */ + __be32 fcpDl; /* Total transfer length */ }; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a028e008dd1e..2ebb073e4ef3 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -1024,9 +1024,9 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) unsigned long iflags; LIST_HEAD(send_rrq); - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); + clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); + next_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1); + spin_lock_irqsave(&phba->rrq_list_lock, iflags); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (time_after(jiffies, rrq->rrq_stop_time)) @@ -1034,7 +1034,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) else if (time_before(rrq->rrq_stop_time, next_time)) next_time = rrq->rrq_stop_time; } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); if ((!list_empty(&phba->active_rrq_list)) && (!test_bit(FC_UNLOADING, &phba->pport->load_flag))) mod_timer(&phba->rrq_tmr, next_time); @@ -1072,16 +1072,16 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) if (phba->sli_rev != LPFC_SLI_REV4) return NULL; - spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock_irqsave(&phba->rrq_list_lock, iflags); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (rrq->vport == vport && rrq->xritag == xri && rrq->nlp_DID == did){ list_del(&rrq->list); - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); return rrq; } } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); return NULL; } @@ -1109,7 +1109,7 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_sli4_vport_delete_els_xri_aborted(vport); lpfc_sli4_vport_delete_fcp_xri_aborted(vport); } - spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock_irqsave(&phba->rrq_list_lock, iflags); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (rrq->vport != vport) continue; @@ -1118,7 +1118,7 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_move(&rrq->list, &rrq_list); } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { list_del(&rrq->list); @@ -1179,12 +1179,12 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, if (!phba->cfg_enable_rrq) return -EINVAL; - spin_lock_irqsave(&phba->hbalock, iflags); if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { - phba->hba_flag &= ~HBA_RRQ_ACTIVE; - goto out; + clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); + goto outnl; } + spin_lock_irqsave(&phba->hbalock, iflags); if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag)) goto out; @@ -1208,21 +1208,22 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, else rrq->send_rrq = 0; rrq->xritag = xritag; - rrq->rrq_stop_time = jiffies + - msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); + rrq->rrq_stop_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1); rrq->nlp_DID = ndlp->nlp_DID; rrq->vport = ndlp->vport; rrq->rxid = rxid; - spin_lock_irqsave(&phba->hbalock, iflags); + + spin_lock_irqsave(&phba->rrq_list_lock, iflags); empty = list_empty(&phba->active_rrq_list); list_add_tail(&rrq->list, &phba->active_rrq_list); - phba->hba_flag |= HBA_RRQ_ACTIVE; - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); + set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); if (empty) lpfc_worker_wake_up(phba); return 0; out: spin_unlock_irqrestore(&phba->hbalock, iflags); +outnl: lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2921 Can't set rrq active xri:0x%x rxid:0x%x" " DID:0x%x Send:%d\n", @@ -1734,8 +1735,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, BUG_ON(!piocb->vport); if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag)) mod_timer(&piocb->vport->els_tmofunc, - jiffies + - msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); + jiffies + secs_to_jiffies(phba->fc_ratov << 1)); } return 0; @@ -1930,7 +1930,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) union lpfc_wqe128 *wqe; struct lpfc_iocbq *sync_buf; unsigned long iflags; - u32 ret_val; + u32 ret_val, cgn_sig_freq; u32 atot, wtot, max; u8 warn_sync_period = 0; @@ -1938,12 +1938,15 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0); wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0); + spin_lock_irqsave(&phba->hbalock, iflags); + /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */ if (phba->cmf_active_mode != LPFC_CFG_MANAGED || - phba->link_state == LPFC_LINK_DOWN) - return 0; + phba->link_state < LPFC_LINK_UP) { + ret_val = 0; + goto out_unlock; + } - spin_lock_irqsave(&phba->hbalock, iflags); sync_buf = __lpfc_sli_get_iocbq(phba); if (!sync_buf) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, @@ -1982,8 +1985,10 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) } else if (wtot) { if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + cgn_sig_freq = phba->cgn_sig_freq ? phba->cgn_sig_freq : + lpfc_fabric_cgn_frequency; /* We hit an Signal warning condition */ - max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency * + max = LPFC_SEC_TO_MSEC / cgn_sig_freq * lpfc_acqe_cgn_frequency; bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max); bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot); @@ -2837,27 +2842,6 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) return; } -static void -__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) -{ - unsigned long iflags; - - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - spin_unlock_irqrestore(&ndlp->lock, iflags); - } - ndlp->nlp_flag &= ~NLP_UNREG_INP; -} - -void -lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) -{ - __lpfc_sli_rpi_release(vport, ndlp); -} - /** * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler * @phba: Pointer to HBA context object. @@ -2927,18 +2911,18 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_MBOX | LOG_DISCOVERY, "1438 UNREG cmpl deferred mbox x%x " - "on NPort x%x Data: x%x x%x x%px x%lx x%x\n", + "on NPort x%x Data: x%lx x%x x%px x%lx x%x\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp, vport->load_flag, kref_read(&ndlp->kref)); - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) && + ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) { + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { - __lpfc_sli_rpi_release(vport, ndlp); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); } /* The unreg_login mailbox is complete and had a @@ -2986,6 +2970,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct lpfc_nodelist *ndlp; + bool unreg_inp; ndlp = pmb->ctx_ndlp; if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { @@ -2998,20 +2983,26 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_MBOX | LOG_SLI | LOG_NODE, "0010 UNREG_LOGIN vpi:x%x " - "rpi:%x DID:%x defer x%x flg x%x " + "rpi:%x DID:%x defer x%x flg x%lx " "x%px\n", vport->vpi, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp->nlp_flag, ndlp); - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + + /* Cleanup the nlp_flag now that the UNREG RPI + * has completed. + */ + unreg_inp = test_and_clear_bit(NLP_UNREG_INP, + &ndlp->nlp_flag); + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); /* Check to see if there are any deferred * events to process */ - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != - NLP_EVT_NOTHING_PENDING)) { + if (unreg_inp && + ndlp->nlp_defer_did != + NLP_EVT_NOTHING_PENDING) { lpfc_printf_vlog( vport, KERN_INFO, LOG_MBOX | LOG_SLI | LOG_NODE, @@ -3020,14 +3011,12 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) "NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); - ndlp->nlp_flag &= ~NLP_UNREG_INP; ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi( vport, ndlp->nlp_DID, 0); - } else { - __lpfc_sli_rpi_release(vport, ndlp); } + lpfc_nlp_put(ndlp); } } @@ -3937,12 +3926,19 @@ void lpfc_poll_eratt(struct timer_list *t) uint64_t sli_intr, cnt; phba = from_timer(phba, t, eratt_poll); - if (!(phba->hba_flag & HBA_SETUP)) - return; if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) return; + if (phba->sli_rev == LPFC_SLI_REV4 && + !test_bit(HBA_SETUP, &phba->hba_flag)) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0663 HBA still initializing 0x%lx, restart " + "timer\n", + phba->hba_flag); + goto restart_timer; + } + /* Here we will also keep track of interrupts per sec of the hba */ sli_intr = phba->sli.slistat.sli_intr; @@ -3961,14 +3957,16 @@ void lpfc_poll_eratt(struct timer_list *t) /* Check chip HA register for error event */ eratt = lpfc_sli_check_eratt(phba); - if (eratt) + if (eratt) { /* Tell the worker thread there is work to do */ lpfc_worker_wake_up(phba); - else - /* Restart the timer for next eratt poll */ - mod_timer(&phba->eratt_poll, - jiffies + - msecs_to_jiffies(1000 * phba->eratt_poll_interval)); + return; + } + +restart_timer: + /* Restart the timer for next eratt poll */ + mod_timer(&phba->eratt_poll, + jiffies + secs_to_jiffies(phba->eratt_poll_interval)); return; } @@ -4522,9 +4520,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, unsigned long iflag; int count = 0; - spin_lock_irqsave(&phba->hbalock, iflag); - phba->hba_flag &= ~HBA_SP_QUEUE_EVT; - spin_unlock_irqrestore(&phba->hbalock, iflag); + clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); while (!list_empty(&phba->sli4_hba.sp_queue_event)) { /* Get the response iocb from the head of work queue */ spin_lock_irqsave(&phba->hbalock, iflag); @@ -4681,14 +4677,23 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba) uint32_t i; struct lpfc_iocbq *piocb, *next_iocb; - spin_lock_irq(&phba->hbalock); /* Indicate the I/O queues are flushed */ - phba->hba_flag |= HBA_IOQ_FLUSH; - spin_unlock_irq(&phba->hbalock); + set_bit(HBA_IOQ_FLUSH, &phba->hba_flag); /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { for (i = 0; i < phba->cfg_hdw_queue; i++) { + if (!phba->sli4_hba.hdwq || + !phba->sli4_hba.hdwq[i].io_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "7777 hdwq's deleted %lx " + "%lx %x %x\n", + phba->pport->load_flag, + phba->hba_flag, + phba->link_state, + phba->sli.sli_flag); + return; + } pring = phba->sli4_hba.hdwq[i].io_wq->pring; spin_lock_irq(&pring->ring_lock); @@ -4762,7 +4767,7 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) if (lpfc_readl(phba->HSregaddr, &status)) return 1; - phba->hba_flag |= HBA_NEEDS_CFG_PORT; + set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); /* * Check status register every 100ms for 5 retries, then every @@ -4841,7 +4846,7 @@ lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) } else phba->sli4_hba.intr_enable = 0; - phba->hba_flag &= ~HBA_SETUP; + clear_bit(HBA_SETUP, &phba->hba_flag); return retval; } @@ -5046,7 +5051,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) return 1; } - del_timer_sync(&psli->mbox_tmo); + timer_delete_sync(&psli->mbox_tmo); if (ha_copy & HA_ERATT) { writel(HA_ERATT, phba->HAregaddr); phba->pport->stopped = 1; @@ -5093,7 +5098,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) /* perform board reset */ phba->fc_eventTag = 0; phba->link_events = 0; - phba->hba_flag |= HBA_NEEDS_CFG_PORT; + set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); if (phba->pport) { phba->pport->fc_myDID = 0; phba->pport->fc_prevDID = 0; @@ -5153,7 +5158,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) /* Reset HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0295 Reset HBA Data: x%x x%x x%x\n", + "0295 Reset HBA Data: x%x x%x x%lx\n", phba->pport->port_state, psli->sli_flag, phba->hba_flag); @@ -5162,7 +5167,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) phba->link_events = 0; phba->pport->fc_myDID = 0; phba->pport->fc_prevDID = 0; - phba->hba_flag &= ~HBA_SETUP; + clear_bit(HBA_SETUP, &phba->hba_flag); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~(LPFC_PROCESS_LA); @@ -5279,6 +5284,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) "0296 Restart HBA Data: x%x x%x\n", phba->pport->port_state, psli->sli_flag); + lpfc_sli4_queue_unset(phba); + rc = lpfc_sli4_brdreset(phba); if (rc) { phba->link_state = LPFC_HBA_ERROR; @@ -5406,7 +5413,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) return -EIO; } - phba->hba_flag |= HBA_NEEDS_CFG_PORT; + set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); @@ -5708,11 +5715,11 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) int longs; /* Enable ISR already does config_port because of config_msi mbx */ - if (phba->hba_flag & HBA_NEEDS_CFG_PORT) { + if (test_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag)) { rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3); if (rc) return -EIO; - phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; + clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); } phba->fcp_embed_io = 0; /* SLI4 FC support only */ @@ -6006,9 +6013,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); - memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); - strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, + memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str, sizeof(phba->BIOSVersion)); + phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0'; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " @@ -7759,7 +7766,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) snprintf(mbox->u.mqe.un.set_host_data.un.data, LPFC_HOST_OS_DRIVER_VERSION_SIZE, "Linux %s v"LPFC_DRIVER_VERSION, - (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); + test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? "FCoE" : "FC"); } int @@ -8487,7 +8494,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); } } - phba->hba_flag &= ~HBA_SETUP; + clear_bit(HBA_SETUP, &phba->hba_flag); lpfc_sli4_dip(phba); @@ -8516,25 +8523,26 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) mqe = &mboxq->u.mqe; phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { - phba->hba_flag |= HBA_FCOE_MODE; + set_bit(HBA_FCOE_MODE, &phba->hba_flag); phba->fcp_embed_io = 0; /* SLI4 FC support only */ } else { - phba->hba_flag &= ~HBA_FCOE_MODE; + clear_bit(HBA_FCOE_MODE, &phba->hba_flag); } if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == LPFC_DCBX_CEE_MODE) - phba->hba_flag |= HBA_FIP_SUPPORT; + set_bit(HBA_FIP_SUPPORT, &phba->hba_flag); else - phba->hba_flag &= ~HBA_FIP_SUPPORT; + clear_bit(HBA_FIP_SUPPORT, &phba->hba_flag); - phba->hba_flag &= ~HBA_IOQ_FLUSH; + clear_bit(HBA_IOQ_FLUSH, &phba->hba_flag); if (phba->sli_rev != LPFC_SLI_REV4) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0376 READ_REV Error. SLI Level %d " "FCoE enabled %d\n", - phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); + phba->sli_rev, + test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? 1 : 0); rc = -EIO; kfree(vpd); goto out_free_mbox; @@ -8549,7 +8557,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) * to read FCoE param config regions, only read parameters if the * board is FCoE */ - if (phba->hba_flag & HBA_FCOE_MODE && + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag) && lpfc_sli4_read_fcoe_params(phba)) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, "2570 Failed to read FCoE parameters\n"); @@ -8626,7 +8634,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc == MBX_SUCCESS) { - phba->hba_flag |= HBA_RECOVERABLE_UE; + set_bit(HBA_RECOVERABLE_UE, &phba->hba_flag); /* Set 1Sec interval to detect UE */ phba->eratt_poll_interval = 1; phba->sli4_hba.ue_to_sr = bf_get( @@ -8677,7 +8685,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) } /* Performance Hints are ONLY for FCoE */ - if (phba->hba_flag & HBA_FCOE_MODE) { + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; else @@ -8735,6 +8743,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_sli_config_mbox_opcode_get( phba, mboxq), rc, dd); + /* * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent * calls depends on these resources to complete port setup. @@ -8747,6 +8756,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) goto out_free_mbox; } + lpfc_sli4_node_rpi_restore(phba); + lpfc_set_host_data(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); @@ -8808,7 +8819,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = lpfc_sli4_queue_setup(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "0381 Error %d during queue setup.\n ", rc); + "0381 Error %d during queue setup.\n", rc); goto out_stop_timers; } /* Initialize the driver internal SLI layer lists. */ @@ -8934,9 +8945,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = -ENODEV; goto out_free_iocblist; } - lpfc_sli4_node_prep(phba); - if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { /* * The FC Port needs to register FCFI (index 0) @@ -9007,12 +9017,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start the ELS watchdog timer */ mod_timer(&vport->els_tmofunc, - jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); + jiffies + secs_to_jiffies(phba->fc_ratov * 2)); /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, - jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); - phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); + jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); + clear_bit(HBA_HBEAT_INP, &phba->hba_flag); + clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); phba->last_completion_time = jiffies; /* start eq_delay heartbeat */ @@ -9025,7 +9036,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, - jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); + jiffies + secs_to_jiffies(phba->eratt_poll_interval)); /* * The port is ready, set the host's link state to LINK_DOWN @@ -9054,8 +9065,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Setup CMF after HBA is initialized */ lpfc_cmf_setup(phba); - if (!(phba->hba_flag & HBA_FCOE_MODE) && - (phba->hba_flag & LINK_DISABLED)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && + test_bit(LINK_DISABLED, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3103 Adapter Link is disabled.\n"); lpfc_down_link(phba, mboxq); @@ -9079,7 +9090,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Enable RAS FW log support */ lpfc_sli4_ras_setup(phba); - phba->hba_flag |= HBA_SETUP; + set_bit(HBA_SETUP, &phba->hba_flag); return rc; out_io_buff_free: @@ -9383,7 +9394,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, } /* If HBA has a deferred error attention, fail the iocb. */ - if (unlikely(phba->hba_flag & DEFER_ERATT)) { + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); goto out_not_finished; } @@ -9502,8 +9513,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, goto out_not_finished; } /* timeout active mbox command */ - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * - 1000); + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)); mod_timer(&psli->mbox_tmo, jiffies + timeout); } @@ -9627,8 +9637,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, drvr_flag); goto out_not_finished; } - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * - 1000) + jiffies; + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)) + jiffies; i = 0; /* Wait for command to complete */ while (((word0 & OWN_CHIP) == OWN_CHIP) || @@ -9754,9 +9763,8 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) * command to be gracefully completed by firmware. */ if (phba->sli.mbox_active) - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, - phba->sli.mbox_active) * - 1000) + jiffies; + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active)) + jiffies; spin_unlock_irq(&phba->hbalock); /* Make sure the mailbox is really active */ @@ -9879,8 +9887,7 @@ lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) } } - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) - * 1000) + jiffies; + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)) + jiffies; do { bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); @@ -10228,7 +10235,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) /* Start timer for the mbox_tmo and log some mailbox post messages */ mod_timer(&psli->mbox_tmo, (jiffies + - msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); + secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)))); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " @@ -10447,7 +10454,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, return IOCB_ERROR; /* If HBA has a deferred error attention, fail the iocb. */ - if (unlikely(phba->hba_flag & DEFER_ERATT)) + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) return IOCB_ERROR; /* @@ -10579,10 +10586,11 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq; union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe; - struct sli4_sge *sgl; + struct sli4_sge_le *sgl; + u32 type_size; /* 128 byte wqe support here */ - sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl; if (phba->fcp_embed_io) { struct fcp_cmnd *fcp_cmnd; @@ -10591,24 +10599,24 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) fcp_cmnd = lpfc_cmd->fcp_cmnd; /* Word 0-2 - FCP_CMND */ - wqe->generic.bde.tus.f.bdeFlags = - BUFF_TYPE_BDE_IMMED; - wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; + type_size = le32_to_cpu(sgl->sge_len); + type_size |= ULP_BDE64_TYPE_BDE_IMMED; + wqe->generic.bde.tus.w = type_size; wqe->generic.bde.addrHigh = 0; - wqe->generic.bde.addrLow = 88; /* Word 22 */ + wqe->generic.bde.addrLow = 72; /* Word 18 */ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); - /* Word 22-29 FCP CMND Payload */ - ptr = &wqe->words[22]; - memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); + /* Word 18-29 FCP CMND Payload */ + ptr = &wqe->words[18]; + lpfc_sli_pcimem_bcopy(fcp_cmnd, ptr, le32_to_cpu(sgl->sge_len)); } else { /* Word 0-2 - Inline BDE */ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; - wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd); - wqe->generic.bde.addrHigh = sgl->addr_hi; - wqe->generic.bde.addrLow = sgl->addr_lo; + wqe->generic.bde.tus.f.bdeSize = le32_to_cpu(sgl->sge_len); + wqe->generic.bde.addrHigh = le32_to_cpu(sgl->addr_hi); + wqe->generic.bde.addrLow = le32_to_cpu(sgl->addr_lo); /* Word 10 */ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); @@ -11078,9 +11086,17 @@ __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq, /* Word 9 */ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id); - /* Word 12 */ - if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) + if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) { + /* Word 10 */ + if (cmdiocbq->cmd_flag & LPFC_IO_VMID) { + bf_set(wqe_appid, &wqe->xmit_sequence.wqe_com, 1); + bf_set(wqe_wqes, &wqe->xmit_sequence.wqe_com, 1); + wqe->words[31] = LOOPBACK_SRC_APPID; + } + + /* Word 12 */ wqe->xmit_sequence.xmit_len = full_size; + } else wqe->xmit_sequence.xmit_len = wqe->xmit_sequence.bde.tus.f.bdeSize; @@ -12070,7 +12086,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) local_bh_enable(); /* Return any active mbox cmds */ - del_timer_sync(&psli->mbox_tmo); + timer_delete_sync(&psli->mbox_tmo); spin_lock_irqsave(&phba->pport->work_port_lock, flags); phba->pport->work_port_events &= ~WORKER_MBOX_TMO; @@ -12301,18 +12317,16 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto release_iocb; } } - - lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, - "0327 Cannot abort els iocb x%px " - "with io cmd xri %x abort tag : x%x, " - "abort status %x abort code %x\n", - cmdiocb, get_job_abtsiotag(phba, cmdiocb), - (phba->sli_rev == LPFC_SLI_REV4) ? - get_wqe_reqtag(cmdiocb) : - cmdiocb->iocb.un.acxri.abortContextTag, - ulp_status, ulp_word4); - } + + lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, + "0327 Abort els iocb complete x%px with io cmd xri %x " + "abort tag x%x abort status %x abort code %x\n", + cmdiocb, get_job_abtsiotag(phba, cmdiocb), + (phba->sli_rev == LPFC_SLI_REV4) ? + get_wqe_reqtag(cmdiocb) : + cmdiocb->iocb.ulpIoTag, + ulp_status, ulp_word4); release_iocb: lpfc_sli_release_iocbq(phba, cmdiocb); return; @@ -12361,10 +12375,10 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ELS cmd tag <ulpIoTag> completes */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "0139 Ignoring ELS cmd code x%x completion Data: " + "0139 Ignoring ELS cmd code x%x ref cnt x%x Data: " "x%x x%x x%x x%px\n", - ulp_command, ulp_status, ulp_word4, iotag, - cmdiocb->ndlp); + ulp_command, kref_read(&cmdiocb->ndlp->kref), + ulp_status, ulp_word4, iotag, cmdiocb->ndlp); /* * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp * if exchange is busy. @@ -12460,7 +12474,9 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } - if (phba->link_state < LPFC_LINK_UP || + /* Just close the exchange under certain conditions. */ + if (test_bit(FC_UNLOADING, &vport->load_flag) || + phba->link_state < LPFC_LINK_UP || (phba->sli_rev == LPFC_SLI_REV4 && phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) || (phba->link_flag & LS_EXTERNAL_LOOPBACK)) @@ -12472,8 +12488,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, cmdiocb->iocb.ulpClass, LPFC_WQE_CQ_ID_DEFAULT, ia, false); - abtsiocbp->vport = vport; - /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; if (cmdiocb->cmd_flag & LPFC_IO_FCP) @@ -12507,10 +12521,10 @@ abort_iotag_exit: lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "0339 Abort IO XRI x%x, Original iotag x%x, " "abort tag x%x Cmdjob : x%px Abortjob : x%px " - "retval x%x\n", + "retval x%x : IA %d cmd_cmpl %ps\n", ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ? cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp, - retval); + retval, ia, abtsiocbp->cmd_cmpl); if (retval) { cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; __lpfc_sli_release_iocbq(phba, abtsiocbp); @@ -12775,7 +12789,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id, int i; /* all I/Os are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) return errcnt; for (i = 1; i <= phba->sli.last_iotag; i++) { @@ -12845,15 +12859,13 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT; bool ia; - spin_lock_irqsave(&phba->hbalock, iflags); - /* all I/Os are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) { - spin_unlock_irqrestore(&phba->hbalock, iflags); + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) return 0; - } + sum = 0; + spin_lock_irqsave(&phba->hbalock, iflags); for (i = 1; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; @@ -13152,7 +13164,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, SLI_IOCB_RET_IOCB); if (retval == IOCB_SUCCESS) { - timeout_req = msecs_to_jiffies(timeout * 1000); + timeout_req = secs_to_jiffies(timeout); timeleft = wait_event_timeout(done_q, lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); @@ -13268,8 +13280,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, /* now issue the command */ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); if (retval == MBX_BUSY || retval == MBX_SUCCESS) { - wait_for_completion_timeout(&mbox_done, - msecs_to_jiffies(timeout * 1000)); + wait_for_completion_timeout(&mbox_done, secs_to_jiffies(timeout)); spin_lock_irqsave(&phba->hbalock, flag); pmboxq->ctx_u.mbox_wait = NULL; @@ -13316,7 +13327,7 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) lpfc_sli_mbox_sys_flush(phba); return; } - timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies; /* Disable softirqs, including timers from obtaining phba->hbalock */ local_bh_disable(); @@ -13329,9 +13340,8 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) * command to be gracefully completed by firmware. */ if (phba->sli.mbox_active) - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, - phba->sli.mbox_active) * - 1000) + jiffies; + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active)) + jiffies; spin_unlock_irq(&phba->hbalock); /* Enable softirqs again, done with phba->hbalock */ @@ -13385,7 +13395,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) if ((HS_FFER1 & phba->work_hs) && ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { - phba->hba_flag |= DEFER_ERATT; + set_bit(DEFER_ERATT, &phba->hba_flag); /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); @@ -13394,7 +13404,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) /* Set the driver HA work bitmap */ phba->work_ha |= HA_ERATT; /* Indicate polling handles this ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); return 1; } return 0; @@ -13405,7 +13415,7 @@ unplug_err: /* Set the driver HA work bitmap */ phba->work_ha |= HA_ERATT; /* Indicate polling handles this ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); return 1; } @@ -13441,7 +13451,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) &uerr_sta_hi)) { phba->work_hs |= UNPLUG_ERR; phba->work_ha |= HA_ERATT; - phba->hba_flag |= HBA_ERATT_HANDLED; + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); return 1; } if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || @@ -13457,7 +13467,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) phba->work_status[0] = uerr_sta_lo; phba->work_status[1] = uerr_sta_hi; phba->work_ha |= HA_ERATT; - phba->hba_flag |= HBA_ERATT_HANDLED; + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); return 1; } break; @@ -13469,7 +13479,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) &portsmphr)){ phba->work_hs |= UNPLUG_ERR; phba->work_ha |= HA_ERATT; - phba->hba_flag |= HBA_ERATT_HANDLED; + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); return 1; } if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { @@ -13492,7 +13502,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) phba->work_status[0], phba->work_status[1]); phba->work_ha |= HA_ERATT; - phba->hba_flag |= HBA_ERATT_HANDLED; + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); return 1; } break; @@ -13529,22 +13539,18 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) return 0; /* Check if interrupt handler handles this ERATT */ - spin_lock_irq(&phba->hbalock); - if (phba->hba_flag & HBA_ERATT_HANDLED) { + if (test_bit(HBA_ERATT_HANDLED, &phba->hba_flag)) /* Interrupt handler has handled ERATT */ - spin_unlock_irq(&phba->hbalock); return 0; - } /* * If there is deferred error attention, do not check for error * attention */ - if (unlikely(phba->hba_flag & DEFER_ERATT)) { - spin_unlock_irq(&phba->hbalock); + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) return 0; - } + spin_lock_irq(&phba->hbalock); /* If PCI channel is offline, don't process it */ if (unlikely(pci_channel_offline(phba->pcidev))) { spin_unlock_irq(&phba->hbalock); @@ -13666,19 +13672,17 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) ha_copy &= ~HA_ERATT; /* Check the need for handling ERATT in interrupt handler */ if (ha_copy & HA_ERATT) { - if (phba->hba_flag & HBA_ERATT_HANDLED) + if (test_and_set_bit(HBA_ERATT_HANDLED, + &phba->hba_flag)) /* ERATT polling has handled ERATT */ ha_copy &= ~HA_ERATT; - else - /* Indicate interrupt handler handles ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; } /* * If there is deferred error attention, do not check for any * interrupt. */ - if (unlikely(phba->hba_flag & DEFER_ERATT)) { + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_NONE; } @@ -13774,7 +13778,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { - phba->hba_flag |= DEFER_ERATT; + set_bit(DEFER_ERATT, &phba->hba_flag); /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); @@ -13808,7 +13812,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) phba->sli.mbox_active = NULL; spin_unlock_irqrestore(&phba->hbalock, iflag); phba->last_completion_time = jiffies; - del_timer(&phba->sli.mbox_tmo); + timer_delete(&phba->sli.mbox_tmo); if (pmb->mbox_cmpl) { lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE); @@ -13961,16 +13965,16 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) /* Need to read HA REG for FCP ring and other ring events */ if (lpfc_readl(phba->HAregaddr, &ha_copy)) return IRQ_HANDLED; - /* Clear up only attention source related to fast-path */ - spin_lock_irqsave(&phba->hbalock, iflag); + /* * If there is deferred error attention, do not check for * any interrupt. */ - if (unlikely(phba->hba_flag & DEFER_ERATT)) { - spin_unlock_irqrestore(&phba->hbalock, iflag); + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) return IRQ_NONE; - } + + /* Clear up only attention source related to fast-path */ + spin_lock_irqsave(&phba->hbalock, iflag); writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), phba->HAregaddr); readl(phba->HAregaddr); /* flush */ @@ -14053,18 +14057,15 @@ lpfc_sli_intr_handler(int irq, void *dev_id) spin_unlock(&phba->hbalock); return IRQ_NONE; } else if (phba->ha_copy & HA_ERATT) { - if (phba->hba_flag & HBA_ERATT_HANDLED) + if (test_and_set_bit(HBA_ERATT_HANDLED, &phba->hba_flag)) /* ERATT polling has handled ERATT */ phba->ha_copy &= ~HA_ERATT; - else - /* Indicate interrupt handler handles ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; } /* * If there is deferred error attention, do not check for any interrupt. */ - if (unlikely(phba->hba_flag & DEFER_ERATT)) { + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { spin_unlock(&phba->hbalock); return IRQ_NONE; } @@ -14135,9 +14136,7 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) unsigned long iflags; /* First, declare the els xri abort event has been handled */ - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; - spin_unlock_irqrestore(&phba->hbalock, iflags); + clear_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag); /* Now, handle all the els xri abort events */ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); @@ -14263,9 +14262,7 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); /* Set the async event flag */ - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag |= ASYNC_EVENT; - spin_unlock_irqrestore(&phba->hbalock, iflags); + set_bit(ASYNC_EVENT, &phba->hba_flag); return true; } @@ -14315,7 +14312,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) /* Reset heartbeat timer */ phba->last_completion_time = jiffies; - del_timer(&phba->sli.mbox_tmo); + timer_delete(&phba->sli.mbox_tmo); /* Move mbox data to caller's mailbox region, do endian swapping */ if (pmb->mbox_cmpl && mbox) @@ -14346,9 +14343,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) * an unsolicited PLOGI from the same NPortId from * starting another mailbox transaction. */ - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_UNREG_INP; - spin_unlock_irqrestore(&ndlp->lock, iflags); + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_unreg_login(phba, vport->vpi, pmbox->un.varWords[0], pmb); pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; @@ -14505,8 +14500,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, spin_lock_irqsave(&phba->hbalock, iflags); list_add_tail(&irspiocbq->cq_event.list, &phba->sli4_hba.sp_queue_event); - phba->hba_flag |= HBA_SP_QUEUE_EVT; spin_unlock_irqrestore(&phba->hbalock, iflags); + set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); return true; } @@ -14580,7 +14575,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, list_add_tail(&cq_event->list, &phba->sli4_hba.sp_els_xri_aborted_work_queue); /* Set the els xri abort event flag */ - phba->hba_flag |= ELS_XRI_ABORT_EVENT; + set_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag); spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); workposted = true; @@ -14667,9 +14662,9 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) /* save off the frame for the work thread to process */ list_add_tail(&dma_buf->cq_event.list, &phba->sli4_hba.sp_queue_event); - /* Frame received */ - phba->hba_flag |= HBA_SP_QUEUE_EVT; spin_unlock_irqrestore(&phba->hbalock, iflags); + /* Frame received */ + set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); workposted = true; break; case FC_STATUS_INSUFF_BUF_FRM_DISC: @@ -14689,9 +14684,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) case FC_STATUS_INSUFF_BUF_NEED_BUF: hrq->RQ_no_posted_buf++; /* Post more buffers if possible */ - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; - spin_unlock_irqrestore(&phba->hbalock, iflags); + set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag); workposted = true; break; case FC_STATUS_RQ_DMA_FAILURE: @@ -15706,7 +15699,7 @@ static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq) synchronize_rcu(); if (list_empty(&phba->poll_list)) - del_timer_sync(&phba->cpuhp_poll_timer); + timer_delete_sync(&phba->cpuhp_poll_timer); } void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba) @@ -17621,6 +17614,9 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) if (!eq) return -ENODEV; + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17647,10 +17643,12 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, eq->phba->mbox_mem_pool); +list_remove: /* Remove eq from any list */ list_del_init(&eq->list); - mempool_free(mbox, eq->phba->mbox_mem_pool); + return status; } @@ -17678,6 +17676,10 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) /* sanity check on queue memory */ if (!cq) return -ENODEV; + + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17703,9 +17705,11 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, cq->phba->mbox_mem_pool); + +list_remove: /* Remove cq from any list */ list_del_init(&cq->list); - mempool_free(mbox, cq->phba->mbox_mem_pool); return status; } @@ -17733,6 +17737,10 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) /* sanity check on queue memory */ if (!mq) return -ENODEV; + + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17758,9 +17766,11 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, mq->phba->mbox_mem_pool); + +list_remove: /* Remove mq from any list */ list_del_init(&mq->list); - mempool_free(mbox, mq->phba->mbox_mem_pool); return status; } @@ -17788,6 +17798,10 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) /* sanity check on queue memory */ if (!wq) return -ENODEV; + + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17812,11 +17826,13 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, wq->phba->mbox_mem_pool); + +list_remove: /* Remove wq from any list */ list_del_init(&wq->list); kfree(wq->pring); wq->pring = NULL; - mempool_free(mbox, wq->phba->mbox_mem_pool); return status; } @@ -17846,6 +17862,10 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, /* sanity check on queue memory */ if (!hrq || !drq) return -ENODEV; + + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17886,9 +17906,11 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, hrq->phba->mbox_mem_pool); + +list_remove: list_del_init(&hrq->list); list_del_init(&drq->list); - mempool_free(mbox, hrq->phba->mbox_mem_pool); return status; } @@ -18438,6 +18460,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) { /* make rctl_names static to save stack space */ struct fc_vft_header *fc_vft_hdr; + struct fc_app_header *fc_app_hdr; uint32_t *header = (uint32_t *) fc_hdr; #define FC_RCTL_MDS_DIAGS 0xF4 @@ -18493,6 +18516,32 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) goto drop; } + if (unlikely(phba->link_flag == LS_LOOPBACK_MODE && + phba->cfg_vmid_app_header)) { + /* Application header is 16B device header */ + if (fc_hdr->fh_df_ctl & LPFC_FC_16B_DEVICE_HEADER) { + fc_app_hdr = (struct fc_app_header *) (fc_hdr + 1); + if (be32_to_cpu(fc_app_hdr->src_app_id) != + LOOPBACK_SRC_APPID) { + lpfc_printf_log(phba, KERN_WARNING, + LOG_ELS | LOG_LIBDFC, + "1932 Loopback src app id " + "not matched, app_id:x%x\n", + be32_to_cpu(fc_app_hdr->src_app_id)); + + goto drop; + } + } else { + lpfc_printf_log(phba, KERN_WARNING, + LOG_ELS | LOG_LIBDFC, + "1933 Loopback df_ctl bit not set, " + "df_ctl:x%x\n", + fc_hdr->fh_df_ctl); + + goto drop; + } + } + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "2538 Received frame rctl:x%x, type:x%x, " "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", @@ -19043,9 +19092,9 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, * to free ndlp when transmit completes */ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE && - !(ndlp->nlp_flag & NLP_DROPPED) && + !test_bit(NLP_DROPPED, &ndlp->nlp_flag) && !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) { - ndlp->nlp_flag |= NLP_DROPPED; + set_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } } @@ -19349,8 +19398,8 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, spin_lock_irqsave(&phba->hbalock, iflags); list_add_tail(&dmabuf->cq_event.list, &phba->sli4_hba.sp_queue_event); - phba->hba_flag |= HBA_SP_QUEUE_EVT; spin_unlock_irqrestore(&phba->hbalock, iflags); + set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); lpfc_worker_wake_up(phba); return; } @@ -20102,9 +20151,7 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) mboxq->vport = phba->pport; mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; - spin_lock_irq(&phba->hbalock); - phba->hba_flag |= FCF_TS_INPROG; - spin_unlock_irq(&phba->hbalock); + set_bit(FCF_TS_INPROG, &phba->hba_flag); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) @@ -20120,9 +20167,7 @@ fail_fcf_scan: if (mboxq) lpfc_sli4_mbox_cmd_free(phba, mboxq); /* FCF scan failed, clear FCF_TS_INPROG flag */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCF_TS_INPROG; - spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); } return error; } @@ -20779,7 +20824,7 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba) /* This HBA contains PORT_STE configured */ if (!rgn23_data[offset + 2]) - phba->hba_flag |= LINK_DISABLED; + set_bit(LINK_DISABLED, &phba->hba_flag); goto out; } @@ -21067,11 +21112,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) /* Unregister the RPI when mailbox complete */ mb->mbox_flag |= LPFC_MBX_IMED_UNREG; restart_loop = 1; - spin_unlock_irq(&phba->hbalock); - spin_lock(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&ndlp->lock); - spin_lock_irq(&phba->hbalock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); break; } } @@ -21086,9 +21127,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) ndlp = mb->ctx_ndlp; mb->ctx_ndlp = NULL; if (ndlp) { - spin_lock(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } } @@ -21097,9 +21136,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) /* Release the ndlp with the cleaned-up active mailbox command */ if (act_mbx_ndlp) { - spin_lock(&act_mbx_ndlp->lock); - act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&act_mbx_ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &act_mbx_ndlp->nlp_flag); lpfc_nlp_put(act_mbx_ndlp); } } @@ -21160,7 +21197,7 @@ lpfc_drain_txq(struct lpfc_hba *phba) if (!piocbq) { spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "2823 txq empty and txq_cnt is %d\n ", + "2823 txq empty and txq_cnt is %d\n", txq_cnt); break; } @@ -22488,7 +22525,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, } tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + - sizeof(struct fcp_cmnd)); + sizeof(struct fcp_cmnd32)); spin_lock_irqsave(&hdwq->hdwq_lock, iflags); list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); @@ -22593,12 +22630,13 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job) u8 cmnd; u32 *pcmd; u32 if_type = 0; - u32 fip, abort_tag; + u32 abort_tag; + bool fip; struct lpfc_nodelist *ndlp = NULL; union lpfc_wqe128 *wqe = &job->wqe; u8 command_type = ELS_COMMAND_NON_FIP; - fip = phba->hba_flag & HBA_FIP_SUPPORT; + fip = test_bit(HBA_FIP_SUPPORT, &phba->hba_flag); /* The fcp commands will set command type */ if (job->cmd_flag & LPFC_IO_FCP) command_type = FCP_COMMAND; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index c1e9ec0243ba..9be3da91c923 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -865,8 +865,6 @@ struct lpfc_sli4_hba { struct lpfc_name wwpn; uint32_t fw_func_mode; /* FW function protocol mode */ - uint32_t ulp0_mode; /* ULP0 protocol mode */ - uint32_t ulp1_mode; /* ULP1 protocol mode */ /* Optimized Access Storage specific queues/structures */ uint64_t oas_next_lun; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 915f2f11fb55..749688aa8a82 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.4.0.1" +#define LPFC_DRIVER_VERSION "14.4.0.9" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ @@ -32,6 +32,6 @@ #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright (C) 2017-2024 Broadcom. All Rights " \ +#define LPFC_COPYRIGHT "Copyright (C) 2017-2025 Broadcom. All Rights " \ "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \ "and/or its subsidiaries." diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c index 773e02ae20c3..14dbfe954e42 100644 --- a/drivers/scsi/lpfc/lpfc_vmid.c +++ b/drivers/scsi/lpfc/lpfc_vmid.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -278,7 +278,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) { mod_timer(&vport->phba->inactive_vmid_poll, jiffies + - msecs_to_jiffies(1000 * LPFC_VMID_TIMER)); + secs_to_jiffies(LPFC_VMID_TIMER)); vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD; } } @@ -321,6 +321,5 @@ lpfc_reinit_vmid(struct lpfc_vport *vport) if (!hash_empty(vport->hash_table)) hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode) hash_del(&cur->hnode); - vport->vmid_flag = 0; write_unlock(&vport->vmid_lock); } diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 4439167a5188..2797aa75a689 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -246,7 +246,7 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport) * fabric RA_TOV value and dev_loss tmo. The driver's * devloss_tmo is 10 giving this loop a 3x multiplier minimally. */ - wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000); + wait_time_max = secs_to_jiffies((phba->fc_ratov * 3) + 3); wait_time_max += jiffies; start_time = jiffies; while (time_before(jiffies, wait_time_max)) { @@ -492,21 +492,22 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); spin_lock_irq(&ndlp->lock); - if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) && + if (!test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags) && !ndlp->logo_waitq) { ndlp->logo_waitq = &waitq; ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag |= NLP_ISSUE_LOGO; - ndlp->save_flags |= NLP_WAIT_FOR_LOGO; + set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); + set_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags); } spin_unlock_irq(&ndlp->lock); rc = lpfc_issue_els_npiv_logo(vport, ndlp); if (!rc) { wait_event_timeout(waitq, - (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)), - msecs_to_jiffies(phba->fc_ratov * 2000)); + !test_bit(NLP_WAIT_FOR_LOGO, + &ndlp->save_flags), + secs_to_jiffies(phba->fc_ratov * 2)); - if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)) + if (!test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags)) goto logo_cmpl; /* LOGO wait failed. Correct status. */ rc = -EINTR; @@ -515,10 +516,8 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } /* Error - clean up node flags. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; - ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); + clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags); logo_cmpl: lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, @@ -626,6 +625,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; int rc; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, @@ -679,21 +679,50 @@ lpfc_vport_delete(struct fc_vport *fc_vport) if (!ndlp) goto skip_logo; + /* Send the DA_ID and Fabric LOGO to cleanup the NPIV fabric entries. */ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && phba->link_state >= LPFC_LINK_UP && phba->fc_topology != LPFC_TOPOLOGY_LOOP) { if (vport->cfg_enable_da_id) { - /* Send DA_ID and wait for a completion. */ + /* Send DA_ID and wait for a completion. This is best + * effort. If the DA_ID fails, likely the fabric will + * "leak" NportIDs but at least the driver issued the + * command. + */ + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (!ndlp) + goto issue_logo; + + spin_lock_irq(&ndlp->lock); + ndlp->da_id_waitq = &waitq; + spin_unlock_irq(&ndlp->lock); + set_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags); + rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0); - if (rc) { - lpfc_printf_log(vport->phba, KERN_WARNING, - LOG_VPORT, - "1829 CT command failed to " - "delete objects on fabric, " - "rc %d\n", rc); + if (!rc) { + wait_event_timeout(waitq, + !test_bit(NLP_WAIT_FOR_DA_ID, + &ndlp->save_flags), + secs_to_jiffies(phba->fc_ratov * 2)); } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS, + "1829 DA_ID issue status %d. " + "SFlag x%lx NState x%x, NFlag x%lx " + "Rpi x%x\n", + rc, ndlp->save_flags, ndlp->nlp_state, + ndlp->nlp_flag, ndlp->nlp_rpi); + + /* Remove the waitq and save_flags. It no + * longer matters if the wake happened. + */ + spin_lock_irq(&ndlp->lock); + ndlp->da_id_waitq = NULL; + spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags); } +issue_logo: /* * If the vpi is not registered, then a valid FDISC doesn't * exist and there is no need for a ELS LOGO. Just cleanup diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index 187ae0a65d40..ff0253d47a0e 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c @@ -432,7 +432,7 @@ static void esp_mac_remove(struct platform_device *dev) static struct platform_driver esp_mac_driver = { .probe = esp_mac_probe, - .remove_new = esp_mac_remove, + .remove = esp_mac_remove, .driver = { .name = DRV_MODULE_NAME, }, diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 181f16899fdc..a86bd839d08e 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -102,11 +102,15 @@ __setup("mac5380=", mac_scsi_setup); * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets * so bus errors are unavoidable. * - * If a MOVE.B instruction faults, we assume that zero bytes were transferred - * and simply retry. That assumption probably depends on target behaviour but - * seems to hold up okay. The NOP provides synchronization: without it the - * fault can sometimes occur after the program counter has moved past the - * offending instruction. Post-increment addressing can't be used. + * If a MOVE.B instruction faults during a receive operation, we assume the + * target sent nothing and try again. That assumption probably depends on + * target firmware but it seems to hold up okay. If a fault happens during a + * send operation, the target may or may not have seen /ACK and got the byte. + * It's uncertain so the whole SCSI command gets retried. + * + * The NOP is needed for synchronization because the fault address in the + * exception stack frame may or may not be the instruction that actually + * caused the bus error. Post-increment addressing can't be used. */ #define MOVE_BYTE(operands) \ @@ -208,8 +212,6 @@ __setup("mac5380=", mac_scsi_setup); ".previous \n" \ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) -#define MAC_PDMA_DELAY 32 - static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n) { unsigned char *addr = start; @@ -245,22 +247,21 @@ static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n) if (n >= 1) { MOVE_BYTE("%0@,%3@"); if (result) - goto out; + return -1; } if (n >= 1 && ((unsigned long)addr & 1)) { MOVE_BYTE("%0@,%3@"); if (result) - goto out; + return -2; } while (n >= 32) MOVE_16_WORDS("%0@+,%3@"); while (n >= 2) MOVE_WORD("%0@+,%3@"); if (result) - return start - addr; /* Negated to indicate uncertain length */ + return start - addr - 1; /* Negated to indicate uncertain length */ if (n == 1) MOVE_BYTE("%0@,%3@"); -out: return addr - start; } @@ -274,25 +275,56 @@ static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value) out_be32(hostdata->io + (CTRL_REG << 4), value); } +static inline int macscsi_wait_for_drq(struct NCR5380_hostdata *hostdata) +{ + unsigned int n = 1; /* effectively multiplies NCR5380_REG_POLL_TIME */ + unsigned char basr; + +again: + basr = NCR5380_read(BUS_AND_STATUS_REG); + + if (!(basr & BASR_PHASE_MATCH)) + return 1; + + if (basr & BASR_IRQ) + return -1; + + if (basr & BASR_DRQ) + return 0; + + if (n-- == 0) { + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: DRQ timeout\n", __func__); + return -1; + } + + NCR5380_poll_politely2(hostdata, + BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, + BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0); + goto again; +} + static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, unsigned char *dst, int len) { u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); unsigned char *d = dst; - int result = 0; hostdata->pdma_residual = len; - while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, - BASR_DRQ | BASR_PHASE_MATCH, - BASR_DRQ | BASR_PHASE_MATCH, 0)) { - int bytes; + while (macscsi_wait_for_drq(hostdata) == 0) { + int bytes, chunk_bytes; if (macintosh_config->ident == MAC_MODEL_IIFX) write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | CTRL_INTERRUPTS_ENABLE); - bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512)); + chunk_bytes = min(hostdata->pdma_residual, 512); + bytes = mac_pdma_recv(s, d, chunk_bytes); + + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); if (bytes > 0) { d += bytes; @@ -300,37 +332,25 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, } if (hostdata->pdma_residual == 0) - goto out; + break; - if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, - BUS_AND_STATUS_REG, BASR_ACK, - BASR_ACK, 0) < 0) - scmd_printk(KERN_DEBUG, hostdata->connected, - "%s: !REQ and !ACK\n", __func__); - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) - goto out; + if (bytes > 0) + continue; - if (bytes == 0) - udelay(MAC_PDMA_DELAY); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: bus error [%d/%d] (%d/%d)\n", + __func__, d - dst, len, bytes, chunk_bytes); - if (bytes >= 0) + if (bytes == 0) continue; - dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, - "%s: bus error (%d/%d)\n", __func__, d - dst, len); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; - goto out; + if (macscsi_wait_for_drq(hostdata) <= 0) + set_host_byte(hostdata->connected, DID_ERROR); + break; } - scmd_printk(KERN_ERR, hostdata->connected, - "%s: phase mismatch or !DRQ\n", __func__); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; -out: - if (macintosh_config->ident == MAC_MODEL_IIFX) - write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); - return result; + return 0; } static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, @@ -338,67 +358,47 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, { unsigned char *s = src; u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); - int result = 0; hostdata->pdma_residual = len; - while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, - BASR_DRQ | BASR_PHASE_MATCH, - BASR_DRQ | BASR_PHASE_MATCH, 0)) { - int bytes; + while (macscsi_wait_for_drq(hostdata) == 0) { + int bytes, chunk_bytes; if (macintosh_config->ident == MAC_MODEL_IIFX) write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | CTRL_INTERRUPTS_ENABLE); - bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512)); + chunk_bytes = min(hostdata->pdma_residual, 512); + bytes = mac_pdma_send(s, d, chunk_bytes); + + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); if (bytes > 0) { s += bytes; hostdata->pdma_residual -= bytes; } - if (hostdata->pdma_residual == 0) { - if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, - TCR_LAST_BYTE_SENT, - TCR_LAST_BYTE_SENT, - 0) < 0) { - scmd_printk(KERN_ERR, hostdata->connected, - "%s: Last Byte Sent timeout\n", __func__); - result = -1; - } - goto out; - } + if (hostdata->pdma_residual == 0) + break; - if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, - BUS_AND_STATUS_REG, BASR_ACK, - BASR_ACK, 0) < 0) - scmd_printk(KERN_DEBUG, hostdata->connected, - "%s: !REQ and !ACK\n", __func__); - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) - goto out; + if (bytes > 0) + continue; - if (bytes == 0) - udelay(MAC_PDMA_DELAY); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: bus error [%d/%d] (%d/%d)\n", + __func__, s - src, len, bytes, chunk_bytes); - if (bytes >= 0) + if (bytes == 0) continue; - dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, - "%s: bus error (%d/%d)\n", __func__, s - src, len); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; - goto out; + if (macscsi_wait_for_drq(hostdata) <= 0) + set_host_byte(hostdata->connected, DID_ERROR); + break; } - scmd_printk(KERN_ERR, hostdata->connected, - "%s: phase mismatch or !DRQ\n", __func__); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; -out: - if (macintosh_config->ident == MAC_MODEL_IIFX) - write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); - return result; + return 0; } static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, @@ -432,7 +432,7 @@ static struct scsi_host_template mac_scsi_template = { .eh_host_reset_handler = macscsi_host_reset, .can_queue = 16, .this_id = 7, - .sg_tablesize = 1, + .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct NCR5380_cmd), @@ -470,6 +470,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev) if (setup_hostid >= 0) mac_scsi_template.this_id = setup_hostid & 7; + if (macintosh_config->ident == MAC_MODEL_IIFX) + mac_scsi_template.sg_tablesize = 1; + instance = scsi_host_alloc(&mac_scsi_template, sizeof(struct NCR5380_hostdata)); if (!instance) @@ -491,6 +494,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev) host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; + if (instance->sg_tablesize > 1) + host_flags |= FLAG_DMA_FIXUP; + error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP); if (error) goto fail_init; @@ -534,8 +540,14 @@ static void __exit mac_scsi_remove(struct platform_device *pdev) scsi_host_put(instance); } -static struct platform_driver mac_scsi_driver = { - .remove_new = __exit_p(mac_scsi_remove), +/* + * mac_scsi_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver mac_scsi_driver __refdata = { + .remove = __exit_p(mac_scsi_remove), .driver = { .name = DRV_MODULE_NAME, }, @@ -544,4 +556,5 @@ static struct platform_driver mac_scsi_driver = { module_platform_driver_probe(mac_scsi_driver, mac_scsi_probe); MODULE_ALIAS("platform:" DRV_MODULE_NAME); +MODULE_DESCRIPTION("Macintosh NCR5380 SCSI driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 38976f94453e..2006094af418 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -855,8 +855,8 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) return scb; #if MEGA_HAVE_CLUSTERING - case RESERVE: - case RELEASE: + case RESERVE_6: + case RELEASE_6: /* * Do we support clustering and is the support enabled @@ -875,7 +875,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) } scb->raw_mbox[0] = MEGA_CLUSTER_CMD; - scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ? + scb->raw_mbox[2] = *cmd->cmnd == RESERVE_6 ? MEGA_RESERVE_LD : MEGA_RELEASE_LD; scb->raw_mbox[3] = ldrv_num; @@ -1618,8 +1618,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) * failed or the input parameter is invalid */ if( status == 1 && - (cmd->cmnd[0] == RESERVE || - cmd->cmnd[0] == RELEASE) ) { + (cmd->cmnd[0] == RESERVE_6 || + cmd->cmnd[0] == RELEASE_6) ) { cmd->result |= (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT; @@ -4551,7 +4551,7 @@ megaraid_shutdown(struct pci_dev *pdev) __megaraid_shutdown(adapter); } -static struct pci_device_id megaraid_pci_tbl[] = { +static const struct pci_device_id megaraid_pci_tbl[] = { {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2, diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid index 3f2ce1eb081c..56b76d73895b 100644 --- a/drivers/scsi/megaraid/Kconfig.megaraid +++ b/drivers/scsi/megaraid/Kconfig.megaraid @@ -3,85 +3,84 @@ config MEGARAID_NEWGEN bool "LSI Logic New Generation RAID Device Drivers" depends on PCI && HAS_IOPORT && SCSI help - LSI Logic RAID Device Drivers + LSI Logic RAID Device Drivers config MEGARAID_MM tristate "LSI Logic Management Module (New Driver)" depends on PCI && HAS_IOPORT && SCSI && MEGARAID_NEWGEN help - Management Module provides ioctl, sysfs support for LSI Logic - RAID controllers. - To compile this driver as a module, choose M here: the - module will be called megaraid_mm + Management Module provides ioctl, sysfs support for LSI Logic + RAID controllers. + To compile this driver as a module, choose M here: the + module will be called megaraid_mm config MEGARAID_MAILBOX tristate "LSI Logic MegaRAID Driver (New Driver)" depends on PCI && SCSI && MEGARAID_MM help - List of supported controllers + List of supported controllers - OEM Product Name VID :DID :SVID:SSID - --- ------------ ---- ---- ---- ---- - Dell PERC3/QC 101E:1960:1028:0471 - Dell PERC3/DC 101E:1960:1028:0493 - Dell PERC3/SC 101E:1960:1028:0475 - Dell PERC3/Di 1028:000E:1028:0123 - Dell PERC4/SC 1000:1960:1028:0520 - Dell PERC4/DC 1000:1960:1028:0518 - Dell PERC4/QC 1000:0407:1028:0531 - Dell PERC4/Di 1028:000F:1028:014A - Dell PERC 4e/Si 1028:0013:1028:016c - Dell PERC 4e/Di 1028:0013:1028:016d - Dell PERC 4e/Di 1028:0013:1028:016e - Dell PERC 4e/Di 1028:0013:1028:016f - Dell PERC 4e/Di 1028:0013:1028:0170 - Dell PERC 4e/DC 1000:0408:1028:0002 - Dell PERC 4e/SC 1000:0408:1028:0001 - LSI MegaRAID SCSI 320-0 1000:1960:1000:A520 - LSI MegaRAID SCSI 320-1 1000:1960:1000:0520 - LSI MegaRAID SCSI 320-2 1000:1960:1000:0518 - LSI MegaRAID SCSI 320-0X 1000:0407:1000:0530 - LSI MegaRAID SCSI 320-2X 1000:0407:1000:0532 - LSI MegaRAID SCSI 320-4X 1000:0407:1000:0531 - LSI MegaRAID SCSI 320-1E 1000:0408:1000:0001 - LSI MegaRAID SCSI 320-2E 1000:0408:1000:0002 - LSI MegaRAID SATA 150-4 1000:1960:1000:4523 - LSI MegaRAID SATA 150-6 1000:1960:1000:0523 - LSI MegaRAID SATA 300-4X 1000:0409:1000:3004 - LSI MegaRAID SATA 300-8X 1000:0409:1000:3008 - INTEL RAID Controller SRCU42X 1000:0407:8086:0532 - INTEL RAID Controller SRCS16 1000:1960:8086:0523 - INTEL RAID Controller SRCU42E 1000:0408:8086:0002 - INTEL RAID Controller SRCZCRX 1000:0407:8086:0530 - INTEL RAID Controller SRCS28X 1000:0409:8086:3008 - INTEL RAID Controller SROMBU42E 1000:0408:8086:3431 - INTEL RAID Controller SROMBU42E 1000:0408:8086:3499 - INTEL RAID Controller SRCU51L 1000:1960:8086:0520 - FSC MegaRAID PCI Express ROMB 1000:0408:1734:1065 - ACER MegaRAID ROMB-2E 1000:0408:1025:004D - NEC MegaRAID PCI Express ROMB 1000:0408:1033:8287 + OEM Product Name VID :DID :SVID:SSID + --- ------------ ---- ---- ---- ---- + Dell PERC3/QC 101E:1960:1028:0471 + Dell PERC3/DC 101E:1960:1028:0493 + Dell PERC3/SC 101E:1960:1028:0475 + Dell PERC3/Di 1028:000E:1028:0123 + Dell PERC4/SC 1000:1960:1028:0520 + Dell PERC4/DC 1000:1960:1028:0518 + Dell PERC4/QC 1000:0407:1028:0531 + Dell PERC4/Di 1028:000F:1028:014A + Dell PERC 4e/Si 1028:0013:1028:016c + Dell PERC 4e/Di 1028:0013:1028:016d + Dell PERC 4e/Di 1028:0013:1028:016e + Dell PERC 4e/Di 1028:0013:1028:016f + Dell PERC 4e/Di 1028:0013:1028:0170 + Dell PERC 4e/DC 1000:0408:1028:0002 + Dell PERC 4e/SC 1000:0408:1028:0001 + LSI MegaRAID SCSI 320-0 1000:1960:1000:A520 + LSI MegaRAID SCSI 320-1 1000:1960:1000:0520 + LSI MegaRAID SCSI 320-2 1000:1960:1000:0518 + LSI MegaRAID SCSI 320-0X 1000:0407:1000:0530 + LSI MegaRAID SCSI 320-2X 1000:0407:1000:0532 + LSI MegaRAID SCSI 320-4X 1000:0407:1000:0531 + LSI MegaRAID SCSI 320-1E 1000:0408:1000:0001 + LSI MegaRAID SCSI 320-2E 1000:0408:1000:0002 + LSI MegaRAID SATA 150-4 1000:1960:1000:4523 + LSI MegaRAID SATA 150-6 1000:1960:1000:0523 + LSI MegaRAID SATA 300-4X 1000:0409:1000:3004 + LSI MegaRAID SATA 300-8X 1000:0409:1000:3008 + INTEL RAID Controller SRCU42X 1000:0407:8086:0532 + INTEL RAID Controller SRCS16 1000:1960:8086:0523 + INTEL RAID Controller SRCU42E 1000:0408:8086:0002 + INTEL RAID Controller SRCZCRX 1000:0407:8086:0530 + INTEL RAID Controller SRCS28X 1000:0409:8086:3008 + INTEL RAID Controller SROMBU42E 1000:0408:8086:3431 + INTEL RAID Controller SROMBU42E 1000:0408:8086:3499 + INTEL RAID Controller SRCU51L 1000:1960:8086:0520 + FSC MegaRAID PCI Express ROMB 1000:0408:1734:1065 + ACER MegaRAID ROMB-2E 1000:0408:1025:004D + NEC MegaRAID PCI Express ROMB 1000:0408:1033:8287 - To compile this driver as a module, choose M here: the - module will be called megaraid_mbox + To compile this driver as a module, choose M here: the + module will be called megaraid_mbox config MEGARAID_LEGACY tristate "LSI Logic Legacy MegaRAID Driver" depends on PCI && HAS_IOPORT && SCSI help - This driver supports the LSI MegaRAID 418, 428, 438, 466, 762, 490 - and 467 SCSI host adapters. This driver also support the all U320 - RAID controllers + This driver supports the LSI MegaRAID 418, 428, 438, 466, 762, 490 + and 467 SCSI host adapters. This driver also support the all U320 + RAID controllers - To compile this driver as a module, choose M here: the - module will be called megaraid + To compile this driver as a module, choose M here: the + module will be called megaraid config MEGARAID_SAS tristate "LSI Logic MegaRAID SAS RAID Module" depends on PCI && SCSI select IRQ_POLL help - Module for LSI Logic's SAS based RAID controllers. - To compile this driver as a module, choose 'm' here. - Module will be called megaraid_sas - + Module for LSI Logic's SAS based RAID controllers. + To compile this driver as a module, choose 'm' here. + Module will be called megaraid_sas diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index bc867da650b6..b75f46c30759 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -199,7 +199,7 @@ MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)"); /* * PCI table for all supported controllers. */ -static struct pci_device_id pci_id_table_g[] = { +static const struct pci_device_id pci_id_table_g[] = { { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_PERC4_DI_DISCOVERY, @@ -621,7 +621,7 @@ megaraid_io_attach(adapter_t *adapter) host = scsi_host_alloc(&megaraid_template_g, 8); if (!host) { con_log(CL_ANN, (KERN_WARNING - "megaraid mbox: scsi_register failed\n")); + "megaraid mbox: scsi_host_alloc failed\n")); return -1; } @@ -1725,8 +1725,8 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) return scb; - case RESERVE: - case RELEASE: + case RESERVE_6: + case RELEASE_6: /* * Do we support clustering and is the support enabled */ @@ -1748,7 +1748,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) scb->dev_channel = 0xFF; scb->dev_target = target; ccb->raw_mbox[0] = CLUSTER_CMD; - ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ? + ccb->raw_mbox[2] = scp->cmnd[0] == RESERVE_6 ? RESERVE_LD : RELEASE_LD; ccb->raw_mbox[3] = target; @@ -2334,8 +2334,8 @@ megaraid_mbox_dpc(unsigned long devp) * Error code returned is 1 if Reserve or Release * failed or the input parameter is invalid */ - if (status == 1 && (scp->cmnd[0] == RESERVE || - scp->cmnd[0] == RELEASE)) { + if (status == 1 && (scp->cmnd[0] == RESERVE_6 || + scp->cmnd[0] == RELEASE_6)) { scp->result = DID_ERROR << 16 | SAM_STAT_RESERVATION_CONFLICT; @@ -3951,8 +3951,8 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter) } - del_timer_sync(&timeout.timer); - destroy_timer_on_stack(&timeout.timer); + timer_delete_sync(&timeout.timer); + timer_destroy_on_stack(&timeout.timer); mutex_unlock(&raid_dev->sysfs_mtx); diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index c509440bd161..fd7fa7640a5e 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -703,8 +703,8 @@ lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) */ wait_event(wait_q, (kioc->status != -ENODATA)); if (timeout.timer.function) { - del_timer_sync(&timeout.timer); - destroy_timer_on_stack(&timeout.timer); + timer_delete_sync(&timeout.timer); + timer_destroy_on_stack(&timeout.timer); } /* diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 56624cbf7fa5..8ee2bfe47571 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -23,8 +23,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "07.727.03.00-rc1" -#define MEGASAS_RELDATE "Oct 03, 2023" +#define MEGASAS_VERSION "07.734.00.00-rc1" +#define MEGASAS_RELDATE "Apr 03, 2025" #define MEGASAS_MSIX_NAME_LEN 32 @@ -814,12 +814,12 @@ struct MR_HOST_DEVICE_LIST { __le32 size; __le32 count; __le32 reserved[2]; - struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[1]; + struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[] __counted_by_le(count); } __packed; #define HOST_DEVICE_LIST_SZ (sizeof(struct MR_HOST_DEVICE_LIST) + \ (sizeof(struct MR_HOST_DEVICE_LIST_ENTRY) * \ - (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT - 1))) + (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))) /* @@ -2473,7 +2473,7 @@ struct MR_LD_VF_MAP { union MR_LD_REF ref; u8 ldVfCount; u8 reserved[6]; - u8 policy[1]; + u8 policy[]; }; struct MR_LD_VF_AFFILIATION { @@ -2701,7 +2701,7 @@ int megasas_get_ctrl_info(struct megasas_instance *instance); int megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend); void megasas_set_dynamic_target_properties(struct scsi_device *sdev, - bool is_target_prop); + struct queue_limits *lim, bool is_target_prop); int megasas_get_target_prop(struct megasas_instance *instance, struct scsi_device *sdev); void megasas_get_snapdump_properties(struct megasas_instance *instance); diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 3d4f13da1ae8..5e33d411fa3d 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -29,7 +29,7 @@ #include <linux/uio.h> #include <linux/slab.h> #include <linux/uaccess.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/blkdev.h> @@ -37,7 +37,6 @@ #include <linux/poll.h> #include <linux/vmalloc.h> #include <linux/irq_poll.h> -#include <linux/blk-mq-pci.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -94,7 +93,7 @@ static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; module_param(scmd_timeout, int, 0444); MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); -int perf_mode = -1; +static int perf_mode = -1; module_param(perf_mode, int, 0444); MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t" "0 - balanced: High iops and low latency queues are allocated &\n\t\t" @@ -106,15 +105,15 @@ MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options: "default mode is 'balanced'" ); -int event_log_level = MFI_EVT_CLASS_CRITICAL; +static int event_log_level = MFI_EVT_CLASS_CRITICAL; module_param(event_log_level, int, 0644); MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)"); -unsigned int enable_sdev_max_qd; +static unsigned int enable_sdev_max_qd; module_param(enable_sdev_max_qd, int, 0444); MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0"); -int poll_queues; +static int poll_queues; module_param(poll_queues, int, 0444); MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" "This parameter is effective only if host_tagset_enable=1 &\n\t\t" @@ -123,7 +122,7 @@ MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode "High iops queues are not allocated &\n\t\t" ); -int host_tagset_enable = 1; +static int host_tagset_enable = 1; module_param(host_tagset_enable, int, 0444); MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)"); @@ -147,7 +146,7 @@ megasas_set_ld_removed_by_fw(struct megasas_instance *instance); /* * PCI ID table for all supported controllers */ -static struct pci_device_id megasas_pci_table[] = { +static const struct pci_device_id megasas_pci_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, /* xscale IOP */ @@ -1888,7 +1887,7 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no) * Returns void */ void megasas_set_dynamic_target_properties(struct scsi_device *sdev, - bool is_target_prop) + struct queue_limits *lim, bool is_target_prop) { u16 pd_index = 0, ld; u32 device_id; @@ -1915,8 +1914,10 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev, return; raid = MR_LdRaidGet(ld, local_map_ptr); - if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) - blk_queue_update_dma_alignment(sdev->request_queue, 0x7); + if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { + if (lim) + lim->dma_alignment = 0x7; + } mr_device_priv_data->is_tm_capable = raid->capability.tmCapable; @@ -1967,7 +1968,8 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev, * */ static inline void -megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) +megasas_set_nvme_device_properties(struct scsi_device *sdev, + struct queue_limits *lim, u32 max_io_size) { struct megasas_instance *instance; u32 mr_nvme_pg_size; @@ -1976,10 +1978,8 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE); - blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); - - blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); - blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); + lim->max_hw_sectors = max_io_size / 512; + lim->virt_boundary_mask = mr_nvme_pg_size - 1; } /* @@ -2041,7 +2041,7 @@ static void megasas_set_fw_assisted_qd(struct scsi_device *sdev, * @is_target_prop true, if fw provided target properties. */ static void megasas_set_static_target_properties(struct scsi_device *sdev, - bool is_target_prop) + struct queue_limits *lim, bool is_target_prop) { u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; struct megasas_instance *instance; @@ -2060,13 +2060,15 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev, max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); if (instance->nvme_page_size && max_io_size_kb) - megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); + megasas_set_nvme_device_properties(sdev, lim, + max_io_size_kb << 10); megasas_set_fw_assisted_qd(sdev, is_target_prop); } -static int megasas_slave_configure(struct scsi_device *sdev) +static int megasas_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { u16 pd_index = 0; struct megasas_instance *instance; @@ -2096,17 +2098,20 @@ static int megasas_slave_configure(struct scsi_device *sdev) ret_target_prop = megasas_get_target_prop(instance, sdev); is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; - megasas_set_static_target_properties(sdev, is_target_prop); + megasas_set_static_target_properties(sdev, lim, is_target_prop); /* This sdev property may change post OCR */ - megasas_set_dynamic_target_properties(sdev, is_target_prop); + megasas_set_dynamic_target_properties(sdev, lim, is_target_prop); + + if (!MEGASAS_IS_LOGICAL(sdev)) + sdev->no_vpd_size = 1; mutex_unlock(&instance->reset_mutex); return 0; } -static int megasas_slave_alloc(struct scsi_device *sdev) +static int megasas_sdev_init(struct scsi_device *sdev) { u16 pd_index = 0, ld_tgt_id; struct megasas_instance *instance ; @@ -2151,7 +2156,7 @@ scan_target: return 0; } -static void megasas_slave_destroy(struct scsi_device *sdev) +static void megasas_sdev_destroy(struct scsi_device *sdev) { u16 ld_tgt_id; struct megasas_instance *instance; @@ -3190,7 +3195,7 @@ static void megasas_map_queues(struct Scsi_Host *shost) map = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; map->nr_queues = instance->msix_vectors - offset; map->queue_offset = 0; - blk_mq_pci_map_queues(map, instance->pdev, offset); + blk_mq_map_hw_queues(map, &instance->pdev->dev, offset); qoff += map->nr_queues; offset += map->nr_queues; @@ -3507,9 +3512,9 @@ static const struct scsi_host_template megasas_template = { .module = THIS_MODULE, .name = "Avago SAS based MegaRAID driver", .proc_name = "megaraid_sas", - .slave_configure = megasas_slave_configure, - .slave_alloc = megasas_slave_alloc, - .slave_destroy = megasas_slave_destroy, + .sdev_configure = megasas_sdev_configure, + .sdev_init = megasas_sdev_init, + .sdev_destroy = megasas_sdev_destroy, .queuecommand = megasas_queue_command, .eh_target_reset_handler = megasas_reset_target, .eh_abort_handler = megasas_task_abort, @@ -3660,8 +3665,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: - cmd->scmd->result = - (DID_ERROR << 16) | hdr->scsi_status; + if (hdr->scsi_status == 0xf0) + cmd->scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION; + else + cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: @@ -6300,7 +6307,7 @@ static int megasas_init_fw(struct megasas_instance *instance) } if (!instance->msix_vectors) { - i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); + i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_INTX); if (i < 0) goto fail_init_adapter; } @@ -6377,7 +6384,7 @@ static int megasas_init_fw(struct megasas_instance *instance) GFP_KERNEL); if (!fusion->stream_detect_by_ld[i]) { dev_err(&instance->pdev->dev, - "unable to allocate stream detect by LD\n "); + "unable to allocate stream detect by LD\n"); for (j = 0; j < i; ++j) kfree(fusion->stream_detect_by_ld[j]); kfree(fusion->stream_detect_by_ld); @@ -6519,7 +6526,7 @@ static int megasas_init_fw(struct megasas_instance *instance) fail_start_watchdog: if (instance->requestorId && !instance->skip_heartbeat_timer_del) - del_timer_sync(&instance->sriov_heartbeat_timer); + timer_delete_sync(&instance->sriov_heartbeat_timer); fail_get_ld_pd_list: instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); @@ -7601,7 +7608,7 @@ fail_io_attach: megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; if (instance->requestorId && !instance->skip_heartbeat_timer_del) - del_timer_sync(&instance->sriov_heartbeat_timer); + timer_delete_sync(&instance->sriov_heartbeat_timer); instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); @@ -7741,7 +7748,7 @@ megasas_suspend(struct device *dev) /* Shutdown SR-IOV heartbeat timer */ if (instance->requestorId && !instance->skip_heartbeat_timer_del) - del_timer_sync(&instance->sriov_heartbeat_timer); + timer_delete_sync(&instance->sriov_heartbeat_timer); /* Stop the FW fault detection watchdog */ if (instance->adapter_type != MFI_SERIES) @@ -7839,7 +7846,7 @@ megasas_resume(struct device *dev) if (!instance->msix_vectors) { rval = pci_alloc_irq_vectors(instance->pdev, 1, 1, - PCI_IRQ_LEGACY); + PCI_IRQ_INTX); if (rval < 0) goto fail_reenable_msix; } @@ -7905,7 +7912,7 @@ megasas_resume(struct device *dev) fail_start_watchdog: if (instance->requestorId && !instance->skip_heartbeat_timer_del) - del_timer_sync(&instance->sriov_heartbeat_timer); + timer_delete_sync(&instance->sriov_heartbeat_timer); fail_init_mfi: megasas_free_ctrl_dma_buffers(instance); megasas_free_ctrl_mem(instance); @@ -7969,7 +7976,7 @@ static void megasas_detach_one(struct pci_dev *pdev) /* Shutdown SR-IOV heartbeat timer */ if (instance->requestorId && !instance->skip_heartbeat_timer_del) - del_timer_sync(&instance->sriov_heartbeat_timer); + timer_delete_sync(&instance->sriov_heartbeat_timer); /* Stop the FW fault detection watchdog */ if (instance->adapter_type != MFI_SERIES) @@ -8904,8 +8911,11 @@ megasas_aen_polling(struct work_struct *work) (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL), (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL), 0); - if (sdev1) + if (sdev1) { + mutex_unlock(&instance->reset_mutex); megasas_remove_scsi_device(sdev1); + mutex_lock(&instance->reset_mutex); + } event_type = SCAN_VD_CHANNEL; break; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index c60014e07b44..a6794f49e9fa 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1988,8 +1988,8 @@ megasas_fusion_start_watchdog(struct megasas_instance *instance) sizeof(instance->fault_handler_work_q_name), "poll_megasas%d_status", instance->host->host_no); - instance->fw_fault_work_q = - create_singlethread_workqueue(instance->fault_handler_work_q_name); + instance->fw_fault_work_q = alloc_ordered_workqueue( + "%s", WQ_MEM_RECLAIM, instance->fault_handler_work_q_name); if (!instance->fw_fault_work_q) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); @@ -2043,7 +2043,10 @@ map_cmd_status(struct fusion_context *fusion, case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: - scmd->result = (DID_ERROR << 16) | ext_status; + if (ext_status == 0xf0) + scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION; + else + scmd->result = (DID_ERROR << 16) | ext_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: @@ -4969,7 +4972,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) } if (instance->requestorId && !instance->skip_heartbeat_timer_del) - del_timer_sync(&instance->sriov_heartbeat_timer); + timer_delete_sync(&instance->sriov_heartbeat_timer); set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); @@ -5119,7 +5122,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) ret_target_prop = megasas_get_target_prop(instance, sdev); is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; - megasas_set_dynamic_target_properties(sdev, is_target_prop); + megasas_set_dynamic_target_properties(sdev, NULL, + is_target_prop); } status_reg = instance->instancet->read_fw_status_reg diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h index 35f81af40f51..96401eb7e231 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h @@ -19,6 +19,7 @@ #define MPI3_CONFIG_PAGETYPE_PCIE_SWITCH (0x31) #define MPI3_CONFIG_PAGETYPE_PCIE_LINK (0x33) #define MPI3_CONFIG_PAGEATTR_MASK (0xf0) +#define MPI3_CONFIG_PAGEATTR_SHIFT (4) #define MPI3_CONFIG_PAGEATTR_READ_ONLY (0x00) #define MPI3_CONFIG_PAGEATTR_CHANGEABLE (0x10) #define MPI3_CONFIG_PAGEATTR_PERSISTENT (0x20) @@ -29,10 +30,13 @@ #define MPI3_CONFIG_ACTION_READ_PERSISTENT (0x04) #define MPI3_CONFIG_ACTION_WRITE_PERSISTENT (0x05) #define MPI3_DEVICE_PGAD_FORM_MASK (0xf0000000) +#define MPI3_DEVICE_PGAD_FORM_SHIFT (28) #define MPI3_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) #define MPI3_DEVICE_PGAD_FORM_HANDLE (0x20000000) #define MPI3_DEVICE_PGAD_HANDLE_MASK (0x0000ffff) +#define MPI3_DEVICE_PGAD_HANDLE_SHIFT (0) #define MPI3_SAS_EXPAND_PGAD_FORM_MASK (0xf0000000) +#define MPI3_SAS_EXPAND_PGAD_FORM_SHIFT (28) #define MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) #define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM (0x10000000) #define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE (0x20000000) @@ -67,6 +71,7 @@ #define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00) #define MPI3_SECURITY_PGAD_SLOT_GROUP_SHIFT (8) #define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff) +#define MPI3_INSTANCE_PGAD_INSTANCE_MASK (0x0000ffff) struct mpi3_config_request { __le16 host_tag; u8 ioc_use_only02; @@ -75,7 +80,8 @@ struct mpi3_config_request { u8 ioc_use_only06; u8 msg_flags; __le16 change_count; - __le16 reserved0a; + u8 proxy_ioc_number; + u8 reserved0b; u8 page_version; u8 page_number; u8 page_type; @@ -206,6 +212,9 @@ struct mpi3_config_page_header { #define MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT (0x00b5) #define MPI3_MFGPAGE_DEVID_SAS5116_NVME_MGMT (0x00b6) #define MPI3_MFGPAGE_DEVID_SAS5116_PCIE_SWITCH (0x00b8) +#define MPI3_MFGPAGE_DEVID_SAS5248_MPI (0x00f0) +#define MPI3_MFGPAGE_DEVID_SAS5248_MPI_NS (0x00f1) +#define MPI3_MFGPAGE_DEVID_SAS5248_PCIE_SWITCH (0x00f2) struct mpi3_man_page0 { struct mpi3_config_page_header header; u8 chip_revision[8]; @@ -309,6 +318,7 @@ struct mpi3_man6_gpio_entry { #define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00) #define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10) #define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_ACTIVE_CABLE_OVERCURRENT (0x20) +#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_ACK_REQUIRED (0x02) #define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_MASK (0x01) #define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_EDGE (0x00) #define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_LEVEL (0x01) @@ -1073,6 +1083,8 @@ struct mpi3_io_unit_page8 { #define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04) #define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02) #define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01) +#define MPI3_IOUNIT8_SBMODE_CURRENT_KEY_IOUNIT17 (0x10) +#define MPI3_IOUNIT8_SBMODE_HARD_SECURE_RECERTIFIED (0x08) struct mpi3_io_unit_page9 { struct mpi3_config_page_header header; __le32 flags; @@ -1088,6 +1100,8 @@ struct mpi3_io_unit_page9 { #define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004) #define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001) #define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff) +#define MPI3_IOUNIT9_FIRSTDEVICE_IN_DRIVER_PAGE_0 (0xfffe) + struct mpi3_io_unit_page10 { struct mpi3_config_page_header header; u8 flags; @@ -1223,6 +1237,19 @@ struct mpi3_io_unit_page15 { #define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITHOUT_POWER_BRAKE_GPIO (0x01) #define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITH_POWER_BRAKE_GPIO (0x02) #define MPI3_IOUNIT15_NUMPOWERBUDGETDATA_POWER_BUDGETING_DISABLED (0x00) + +struct mpi3_io_unit_page17 { + struct mpi3_config_page_header header; + u8 num_instances; + u8 instance; + __le16 reserved0a; + __le32 reserved0c[4]; + __le16 key_length; + u8 encryption_algorithm; + u8 reserved1f; + __le32 current_key[]; +}; +#define MPI3_IOUNIT17_PAGEVERSION (0x00) struct mpi3_ioc_page0 { struct mpi3_config_page_header header; __le32 reserved08; @@ -1310,21 +1337,26 @@ struct mpi3_driver_page0 { u8 tur_interval; u8 reserved10; u8 security_key_timeout; - __le16 reserved12; + __le16 first_device; __le32 reserved14; __le32 reserved18; }; #define MPI3_DRIVER0_PAGEVERSION (0x00) +#define MPI3_DRIVER0_BSDOPTS_DEVICEEXPOSURE_DISABLE (0x00000020) +#define MPI3_DRIVER0_BSDOPTS_WRITECACHE_DISABLE (0x00000010) #define MPI3_DRIVER0_BSDOPTS_HEADLESS_MODE_ENABLE (0x00000008) #define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004) #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003) #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000) #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001) #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_INTERNAL_DEVS (0x00000002) +#define MPI3_DRIVER0_FIRSTDEVICE_IGNORE1 (0x0000) +#define MPI3_DRIVER0_FIRSTDEVICE_IGNORE2 (0xffff) struct mpi3_driver_page1 { struct mpi3_config_page_header header; __le32 flags; - __le32 reserved0c; + u8 time_stamp_update; + u8 reserved0d[3]; __le16 host_diag_trace_max_size; __le16 host_diag_trace_min_size; __le16 host_diag_trace_decrement_size; @@ -1562,16 +1594,13 @@ struct mpi3_sas_io_unit0_phy_data { __le32 reserved10; }; -#ifndef MPI3_SAS_IO_UNIT0_PHY_MAX -#define MPI3_SAS_IO_UNIT0_PHY_MAX (1) -#endif struct mpi3_sas_io_unit_page0 { struct mpi3_config_page_header header; __le32 reserved08; u8 num_phys; u8 init_status; __le16 reserved0e; - struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX]; + struct mpi3_sas_io_unit0_phy_data phy_data[]; }; #define MPI3_SASIOUNIT0_PAGEVERSION (0x00) @@ -1603,9 +1632,6 @@ struct mpi3_sas_io_unit1_phy_data { __le32 reserved08; }; -#ifndef MPI3_SAS_IO_UNIT1_PHY_MAX -#define MPI3_SAS_IO_UNIT1_PHY_MAX (1) -#endif struct mpi3_sas_io_unit_page1 { struct mpi3_config_page_header header; __le16 control_flags; @@ -1615,7 +1641,7 @@ struct mpi3_sas_io_unit_page1 { u8 num_phys; u8 sata_max_q_depth; __le16 reserved12; - struct mpi3_sas_io_unit1_phy_data phy_data[MPI3_SAS_IO_UNIT1_PHY_MAX]; + struct mpi3_sas_io_unit1_phy_data phy_data[]; }; #define MPI3_SASIOUNIT1_PAGEVERSION (0x00) @@ -2350,6 +2376,10 @@ struct mpi3_device0_vd_format { #define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001) #define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000) #define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12) +#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_MASK (0x0003) +#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_HDD (0x0000) +#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_SSD (0x0001) +#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_NO_GUIDANCE (0x0002) union mpi3_device0_dev_spec_format { struct mpi3_device0_sas_sata_format sas_sata_format; struct mpi3_device0_pcie_format pcie_format; diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h index 47035b811902..8d824107a678 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h @@ -66,7 +66,12 @@ struct mpi3_component_image_header { #define MPI3_IMAGE_HEADER_SIGNATURE1_SMM (0x204d4d53) #define MPI3_IMAGE_HEADER_SIGNATURE1_PSW (0x20575350) #define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546) +#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_MASK (0x00000300) +#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_SHIFT (8) +#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_MASK (0x000000c0) +#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_SHIFT (6) #define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030) +#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_SHIFT (4) #define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000) #define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_DI (0x00000010) #define MPI3_IMAGE_HEADER_FLAGS_SIGNED_NVDATA (0x00000008) @@ -198,25 +203,29 @@ struct mpi3_supported_devices_data { struct mpi3_supported_device supported_device[MPI3_SUPPORTED_DEVICE_MAX]; }; -#ifndef MPI3_ENCRYPTED_HASH_MAX -#define MPI3_ENCRYPTED_HASH_MAX (1) +#ifndef MPI3_PUBLIC_KEY_MAX +#define MPI3_PUBLIC_KEY_MAX (1) #endif struct mpi3_encrypted_hash_entry { u8 hash_image_type; u8 hash_algorithm; u8 encryption_algorithm; - u8 reserved03; - __le32 reserved04; - __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX]; + u8 flags; + __le16 public_key_size; + __le16 signature_size; + __le32 public_key[MPI3_PUBLIC_KEY_MAX]; }; - -#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03) +#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH (0x03) +#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_1_OF_2 (0x04) +#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_2_OF_2 (0x05) #define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0) +#define MPI3_HASH_ALGORITHM_VERSION_SHIFT (5) #define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00) #define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20) #define MPI3_HASH_ALGORITHM_VERSION_SHA2 (0x40) #define MPI3_HASH_ALGORITHM_VERSION_SHA3 (0x60) #define MPI3_HASH_ALGORITHM_SIZE_MASK (0x1f) +#define MPI3_HASH_ALGORITHM_SIZE_SHIFT (0) #define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00) #define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01) #define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02) @@ -228,17 +237,13 @@ struct mpi3_encrypted_hash_entry { #define MPI3_ENCRYPTION_ALGORITHM_RSA2048 (0x04) #define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05) #define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06) -#ifndef MPI3_PUBLIC_KEY_MAX -#define MPI3_PUBLIC_KEY_MAX (1) -#endif -struct mpi3_encrypted_key_with_hash_entry { - u8 hash_image_type; - u8 hash_algorithm; - u8 encryption_algorithm; - u8 reserved03; - __le32 reserved04; - __le32 public_key[MPI3_PUBLIC_KEY_MAX]; -}; + +/* hierarchical signature system (hss) */ +#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_87 (0x0b) +#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_65 (0x0c) +#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_44 (0x0d) +#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_MASK (0x0f) +#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_SHIFT (0) #ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX #define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1) diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h index af86d12c8e49..bbef5bac92ed 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_init.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h @@ -38,23 +38,31 @@ struct mpi3_scsi_io_request { #define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80) #define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40) #define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000) +#define MPI3_SCSIIO_FLAGS_LARGE_CDB_MASK (0x60000000) +#define MPI3_SCSIIO_FLAGS_LARGE_CDB_SHIFT (29) +#define MPI3_SCSIIO_FLAGS_IOC_USE_ONLY_27_MASK (0x18000000) +#define MPI3_SCSIIO_FLAGS_IOC_USE_ONLY_27_SHIFT (27) #define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000) #define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000) #define MPI3_SCSIIO_FLAGS_CDB_IN_SEPARATE_BUFFER (0x40000000) #define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_MASK (0x07000000) +#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SHIFT (24) +#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000) +#define MPI3_SCSIIO_FLAGS_DATADIRECTION_SHIFT (18) #define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ (0x00000000) #define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ (0x01000000) #define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ (0x02000000) #define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ (0x04000000) #define MPI3_SCSIIO_FLAGS_CMDPRI_MASK (0x00f00000) #define MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT (20) -#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000) #define MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER (0x00000000) #define MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE (0x00040000) #define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000) #define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000) +#define MPI3_SCSIIO_FLAGS_DMAOPERATION_SHIFT (16) #define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000) #define MPI3_SCSIIO_FLAGS_DIVERT_REASON_MASK (0x000000f0) +#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_SHIFT (4) #define MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING (0x00000010) #define MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE (0x00000020) #define MPI3_SCSIIO_FLAGS_DIVERT_REASON_PROD_SPECIFIC (0x00000080) @@ -99,6 +107,7 @@ struct mpi3_scsi_io_reply { #define MPI3_SCSI_STATUS_ACA_ACTIVE (0x30) #define MPI3_SCSI_STATUS_TASK_ABORTED (0x40) #define MPI3_SCSI_STATE_SENSE_MASK (0x03) +#define MPI3_SCSI_STATE_SENSE_SHIFT (0) #define MPI3_SCSI_STATE_SENSE_VALID (0x00) #define MPI3_SCSI_STATE_SENSE_FAILED (0x01) #define MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY (0x02) diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h index 0cb24fc03620..b42933fcd423 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h @@ -27,9 +27,10 @@ struct mpi3_ioc_init_request { __le64 sense_buffer_free_queue_address; __le64 driver_information_address; }; - +#define MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED (0x08) #define MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED (0x04) #define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03) +#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SHIFT (0) #define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00) #define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01) #define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02) @@ -39,6 +40,13 @@ struct mpi3_ioc_init_request { #define MPI3_WHOINIT_HOST_DRIVER (0x03) #define MPI3_WHOINIT_MANUFACTURER (0x04) +#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_MASK (0x00000003) +#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_SHIFT (0) +#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_GUIDANCE (0x00000000) +#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL (0x00000001) +#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_HDD (0x00000002) +#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_SSD (0x00000003) + struct mpi3_ioc_facts_request { __le16 host_tag; u8 ioc_use_only02; @@ -101,20 +109,24 @@ struct mpi3_ioc_facts_data { __le16 max_io_throttle_group; __le16 io_throttle_low; __le16 io_throttle_high; + __le32 diag_fdl_size; + __le32 diag_tty_size; }; #define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000) +#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_SHIFT (31) #define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000) #define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x80000000) #define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_MASK (0x00000600) +#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_SHIFT (9) #define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_FIXED_THRESHOLD (0x00000000) #define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_OUTSTANDING_IO (0x00000200) -#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_CAPABLE (0x00000100) -#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_ENABLED (0x00000080) -#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_ENABLED (0x00000040) -#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_ENABLED (0x00000020) -#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_ENABLED (0x00000010) -#define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008) -#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED (0x00000002) +#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_SUPPORTED (0x00000100) +#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED (0x00000080) +#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_SUPPORTED (0x00000040) +#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_SUPPORTED (0x00000020) +#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_SUPPORTED (0x00000010) +#define MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED (0x00000008) +#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED (0x00000002) #define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_SUPPORTED (0x00000001) #define MPI3_IOCFACTS_PID_TYPE_MASK (0xf000) #define MPI3_IOCFACTS_PID_TYPE_SHIFT (12) @@ -126,6 +138,7 @@ struct mpi3_ioc_facts_data { #define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000) #define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800) #define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_SHIFT (8) #define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000) #define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100) #define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200) @@ -138,7 +151,10 @@ struct mpi3_ioc_facts_data { #define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020) #define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010) #define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008) +#define MPI3_IOCFACTS_EXCEPT_BLOCKING_BOOT_EVENT (0x0004) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_SELFTEST_FAILURE (0x0002) #define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001) +#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SHIFT (0) #define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000) #define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001) #define MPI3_IOCFACTS_PROTOCOL_SAS (0x0010) @@ -151,14 +167,18 @@ struct mpi3_ioc_facts_data { #define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00) #define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8) #define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030) +#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_SHIFT (4) #define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000) #define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010) #define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_COMPLETE (0x00000020) #define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000f) +#define MPI3_IOCFACTS_FLAGS_PERSONALITY_SHIFT (0) #define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000) #define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002) #define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000) #define MPI3_IOCFACTS_MAX_IO_THROTTLE_GROUP_NOT_REQUIRED (0x0000) +#define MPI3_IOCFACTS_DIAGFDLSIZE_NOT_SUPPORTED (0x00000000) +#define MPI3_IOCFACTS_DIAGTTYSIZE_NOT_SUPPORTED (0x00000000) struct mpi3_mgmt_passthrough_request { __le16 host_tag; u8 ioc_use_only02; @@ -192,6 +212,7 @@ struct mpi3_create_request_queue_request { }; #define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80) +#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SHIFT (7) #define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80) #define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00) #define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2) @@ -225,10 +246,12 @@ struct mpi3_create_reply_queue_request { }; #define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80) +#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SHIFT (7) #define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80) #define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00) #define MPI3_CREATE_REPLY_QUEUE_FLAGS_COALESCE_DISABLE (0x02) #define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01) +#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_SHIFT (0) #define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00) #define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01) #define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2) @@ -314,9 +337,11 @@ struct mpi3_event_notification_reply { }; #define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK (0x01) +#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_SHIFT (0) #define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED (0x01) #define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_NOT_REQUIRED (0x00) #define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_MASK (0x02) +#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_SHIFT (1) #define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_ORIGINAL (0x00) #define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_REPLAY (0x02) struct mpi3_event_data_gpio_interrupt { @@ -449,9 +474,6 @@ struct mpi3_event_data_sas_notify_primitive { #define MPI3_EVENT_NOTIFY_PRIMITIVE_POWER_LOSS_EXPECTED (0x02) #define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED1 (0x03) #define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED2 (0x04) -#ifndef MPI3_EVENT_SAS_TOPO_PHY_COUNT -#define MPI3_EVENT_SAS_TOPO_PHY_COUNT (1) -#endif struct mpi3_event_sas_topo_phy_entry { __le16 attached_dev_handle; u8 link_rate; @@ -478,6 +500,7 @@ struct mpi3_event_sas_topo_phy_entry { #define MPI3_EVENT_SAS_TOPO_PHY_STATUS_NO_EXIST (0x40) #define MPI3_EVENT_SAS_TOPO_PHY_STATUS_VACANT (0x80) #define MPI3_EVENT_SAS_TOPO_PHY_RC_MASK (0x0f) +#define MPI3_EVENT_SAS_TOPO_PHY_RC_SHIFT (0) #define MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING (0x02) #define MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED (0x03) #define MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE (0x04) @@ -492,7 +515,7 @@ struct mpi3_event_data_sas_topology_change_list { u8 start_phy_num; u8 exp_status; u8 io_unit_port; - struct mpi3_event_sas_topo_phy_entry phy_entry[MPI3_EVENT_SAS_TOPO_PHY_COUNT]; + struct mpi3_event_sas_topo_phy_entry phy_entry[] __counted_by(num_entries); }; #define MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00) @@ -541,9 +564,6 @@ struct mpi3_event_data_pcie_enumeration { #define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000) #define MPI3_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000) #define MPI3_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000) -#ifndef MPI3_EVENT_PCIE_TOPO_PORT_COUNT -#define MPI3_EVENT_PCIE_TOPO_PORT_COUNT (1) -#endif struct mpi3_event_pcie_topo_port_entry { __le16 attached_dev_handle; u8 port_status; @@ -560,6 +580,7 @@ struct mpi3_event_pcie_topo_port_entry { #define MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05) #define MPI3_EVENT_PCIE_TOPO_PS_RESPONDING (0x06) #define MPI3_EVENT_PCIE_TOPO_PI_LANES_MASK (0xf0) +#define MPI3_EVENT_PCIE_TOPO_PI_LANES_SHIFT (4) #define MPI3_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00) #define MPI3_EVENT_PCIE_TOPO_PI_LANES_1 (0x10) #define MPI3_EVENT_PCIE_TOPO_PI_LANES_2 (0x20) @@ -567,6 +588,7 @@ struct mpi3_event_pcie_topo_port_entry { #define MPI3_EVENT_PCIE_TOPO_PI_LANES_8 (0x40) #define MPI3_EVENT_PCIE_TOPO_PI_LANES_16 (0x50) #define MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0f) +#define MPI3_EVENT_PCIE_TOPO_PI_RATE_SHIFT (0) #define MPI3_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00) #define MPI3_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01) #define MPI3_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02) @@ -584,7 +606,7 @@ struct mpi3_event_data_pcie_topology_change_list { u8 switch_status; u8 io_unit_port; __le32 reserved0c; - struct mpi3_event_pcie_topo_port_entry port_entry[MPI3_EVENT_PCIE_TOPO_PORT_COUNT]; + struct mpi3_event_pcie_topo_port_entry port_entry[] __counted_by(num_entries); }; #define MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00) @@ -875,6 +897,7 @@ struct mpi3_pel_req_action_acknowledge { }; #define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03) +#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_SHIFT (0) #define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00) #define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01) #define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02) @@ -918,6 +941,7 @@ struct mpi3_ci_download_request { #define MPI3_CI_DOWNLOAD_MSGFLAGS_FORCE_FMC_ENABLE (0x40) #define MPI3_CI_DOWNLOAD_MSGFLAGS_SIGNED_NVDATA (0x20) #define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MASK (0x03) +#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SHIFT (0) #define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_FAST (0x00) #define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MEDIUM (0x01) #define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SLOW (0x02) @@ -947,6 +971,7 @@ struct mpi3_ci_download_reply { #define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20) #define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10) #define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e) +#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_SHIFT (1) #define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00) #define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_AWAITING (0x02) #define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_ONLINE_PENDING (0x04) @@ -970,9 +995,11 @@ struct mpi3_ci_upload_request { }; #define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_MASK (0x01) +#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SHIFT (0) #define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY (0x00) #define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SECONDARY (0x01) #define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_MASK (0x02) +#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_SHIFT (1) #define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_FLASH (0x00) #define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_EXECUTABLE (0x02) #define MPI3_CTRL_OP_FORCE_FULL_DISCOVERY (0x01) diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_tool.h b/drivers/scsi/mpi3mr/mpi/mpi30_tool.h new file mode 100644 index 000000000000..50a65b16a818 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi/mpi30_tool.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2016-2024 Broadcom Inc. All rights reserved. + */ +#ifndef MPI30_TOOL_H +#define MPI30_TOOL_H 1 + +#define MPI3_DIAG_BUFFER_TYPE_TRACE (0x01) +#define MPI3_DIAG_BUFFER_TYPE_FW (0x02) +#define MPI3_DIAG_BUFFER_ACTION_RELEASE (0x01) + +#define MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED (0x01) +struct mpi3_diag_buffer_post_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 reserved0a; + u8 type; + u8 reserved0d; + __le16 reserved0e; + __le64 address; + __le32 length; + __le32 reserved1c; +}; + +struct mpi3_diag_buffer_manage_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 reserved0a; + u8 type; + u8 action; + __le16 reserved0e; +}; + + +#endif diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h index 1e0a3dcaf723..5c522e2531c3 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h @@ -18,7 +18,7 @@ union mpi3_version_union { #define MPI3_VERSION_MAJOR (3) #define MPI3_VERSION_MINOR (0) -#define MPI3_VERSION_UNIT (28) +#define MPI3_VERSION_UNIT (35) #define MPI3_VERSION_DEV (0) #define MPI3_DEVHANDLE_INVALID (0xffff) struct mpi3_sysif_oper_queue_indexes { @@ -80,6 +80,7 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20) #define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ (0x000f0000) #define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT (16) +#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_SHIFT (14) #define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000) #define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000) #define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000) @@ -97,6 +98,7 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_IOC_STATUS_READY (0x00000001) #define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET (0x00000024) #define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_MASK (0x0fff) +#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_SHIFT (0) #define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_OFFSET (0x00000026) #define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_MASK (0x0fff0000) #define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_SHIFT (16) @@ -106,6 +108,7 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_HIGH_OFFSET (0x00000034) #define MPI3_SYSIF_COALESCE_CONTROL_OFFSET (0x00000040) #define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_MASK (0xc0000000) +#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_SHIFT (30) #define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000) #define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000) #define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000) @@ -124,6 +127,7 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8)) #define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04) #define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f) +#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_SHIFT (0) #define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0) #define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST (0xf) #define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND (0x4) @@ -133,6 +137,7 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH (0xd) #define MPI3_SYSIF_HOST_DIAG_OFFSET (0x00001c08) #define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700) +#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SHIFT (8) #define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000) #define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100) #define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200) @@ -151,6 +156,7 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_FAULT_FUNC_AREA_SHIFT (24) #define MPI3_SYSIF_FAULT_FUNC_AREA_MPI_DEFINED (0x00000000) #define MPI3_SYSIF_FAULT_CODE_MASK (0x0000ffff) +#define MPI3_SYSIF_FAULT_CODE_SHIFT (0) #define MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET (0x0000f000) #define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001) #define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002) @@ -158,6 +164,7 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004) #define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005) #define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006) +#define MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER (0x0000f007) #define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14) #define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18) #define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c) @@ -175,17 +182,20 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00001c5c) #define MPI3_SYSIF_DIAG_RW_CONTROL_OFFSET (0x00001c60) #define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_MASK (0x00000030) +#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_SHIFT (4) #define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_1BYTE (0x00000000) #define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_2BYTES (0x00000010) #define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_4BYTES (0x00000020) #define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_8BYTES (0x00000030) #define MPI3_SYSIF_DIAG_RW_CONTROL_RESET (0x00000004) #define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_MASK (0x00000002) +#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_SHIFT (1) #define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_READ (0x00000000) #define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_WRITE (0x00000002) #define MPI3_SYSIF_DIAG_RW_CONTROL_START (0x00000001) #define MPI3_SYSIF_DIAG_RW_STATUS_OFFSET (0x00001c62) #define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_MASK (0x0000000e) +#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SHIFT (1) #define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SUCCESS (0x00000000) #define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_INV_ADDR (0x00000002) #define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_ACC_ERR (0x00000004) @@ -206,7 +216,9 @@ struct mpi3_default_reply_descriptor { }; #define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK (0x0001) +#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_SHIFT (0) #define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK (0xf000) +#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SHIFT (12) #define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY (0x0000) #define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000) #define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000) @@ -300,6 +312,7 @@ union mpi3_sge_union { }; #define MPI3_SGE_FLAGS_ELEMENT_TYPE_MASK (0xf0) +#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SHIFT (4) #define MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE (0x00) #define MPI3_SGE_FLAGS_ELEMENT_TYPE_BIT_BUCKET (0x10) #define MPI3_SGE_FLAGS_ELEMENT_TYPE_CHAIN (0x20) @@ -308,6 +321,7 @@ union mpi3_sge_union { #define MPI3_SGE_FLAGS_END_OF_LIST (0x08) #define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04) #define MPI3_SGE_FLAGS_DLAS_MASK (0x03) +#define MPI3_SGE_FLAGS_DLAS_SHIFT (0) #define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00) #define MPI3_SGE_FLAGS_DLAS_IOC_UDP (0x01) #define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02) @@ -321,15 +335,18 @@ union mpi3_sge_union { #define MPI3_EEDPFLAGS_CHK_APP_TAG (0x0200) #define MPI3_EEDPFLAGS_CHK_GUARD (0x0100) #define MPI3_EEDPFLAGS_ESC_MODE_MASK (0x00c0) +#define MPI3_EEDPFLAGS_ESC_MODE_SHIFT (6) #define MPI3_EEDPFLAGS_ESC_MODE_DO_NOT_DISABLE (0x0040) #define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE (0x0080) #define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_REFTAG_DISABLE (0x00c0) #define MPI3_EEDPFLAGS_HOST_GUARD_MASK (0x0030) +#define MPI3_EEDPFLAGS_HOST_GUARD_SHIFT (4) #define MPI3_EEDPFLAGS_HOST_GUARD_T10_CRC (0x0000) #define MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM (0x0010) #define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020) #define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008) #define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007) +#define MPI3_EEDPFLAGS_EEDP_OP_SHIFT (0) #define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001) #define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002) #define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003) @@ -402,6 +419,7 @@ struct mpi3_default_reply { #define MPI3_IOCSTATUS_LOG_INFO_AVAIL_MASK (0x8000) #define MPI3_IOCSTATUS_LOG_INFO_AVAILABLE (0x8000) #define MPI3_IOCSTATUS_STATUS_MASK (0x7fff) +#define MPI3_IOCSTATUS_STATUS_SHIFT (0) #define MPI3_IOCSTATUS_SUCCESS (0x0000) #define MPI3_IOCSTATUS_INVALID_FUNCTION (0x0001) #define MPI3_IOCSTATUS_BUSY (0x0002) @@ -410,6 +428,7 @@ struct mpi3_default_reply { #define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) #define MPI3_IOCSTATUS_INVALID_FIELD (0x0007) #define MPI3_IOCSTATUS_INVALID_STATE (0x0008) +#define MPI3_IOCSTATUS_SHUTDOWN_ACTIVE (0x0009) #define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a) #define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b) #define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c) @@ -467,4 +486,5 @@ struct mpi3_default_reply { #define MPI3_IOCLOGINFO_TYPE_NONE (0x0) #define MPI3_IOCLOGINFO_TYPE_SAS (0x3) #define MPI3_IOCLOGINFO_LOG_DATA_MASK (0x0fffffff) +#define MPI3_IOCLOGINFO_LOG_DATA_SHIFT (0) #endif diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index 3de1ee05c44e..9bbc7cb98ca3 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -12,7 +12,6 @@ #include <linux/blkdev.h> #include <linux/blk-mq.h> -#include <linux/blk-mq-pci.h> #include <linux/delay.h> #include <linux/dmapool.h> #include <linux/errno.h> @@ -23,6 +22,7 @@ #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/aer.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> @@ -30,7 +30,7 @@ #include <linux/uaccess.h> #include <linux/utsname.h> #include <linux/workqueue.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> @@ -47,6 +47,7 @@ #include "mpi/mpi30_ioc.h" #include "mpi/mpi30_sas.h" #include "mpi/mpi30_pci.h" +#include "mpi/mpi30_tool.h" #include "mpi3mr_debug.h" /* Global list and lock for storing multiple adapters managed by the driver */ @@ -55,15 +56,15 @@ extern struct list_head mrioc_list; extern int prot_mask; extern atomic64_t event_counter; -#define MPI3MR_DRIVER_VERSION "8.5.1.0.0" -#define MPI3MR_DRIVER_RELDATE "5-December-2023" +#define MPI3MR_DRIVER_VERSION "8.13.0.5.50" +#define MPI3MR_DRIVER_RELDATE "20-February-2025" #define MPI3MR_DRIVER_NAME "mpi3mr" #define MPI3MR_DRIVER_LICENSE "GPL" #define MPI3MR_DRIVER_AUTHOR "Broadcom Inc. <mpi3mr-linuxdrv.pdl@broadcom.com>" #define MPI3MR_DRIVER_DESC "MPI3 Storage Controller Device Driver" -#define MPI3MR_NAME_LENGTH 32 +#define MPI3MR_NAME_LENGTH 64 #define IOCNAME "%s: " #define MPI3MR_DEFAULT_MAX_IO_SIZE (1 * 1024 * 1024) @@ -79,13 +80,14 @@ extern atomic64_t event_counter; /* Admin queue management definitions */ #define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K) -#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K) +#define MPI3MR_ADMIN_REPLY_Q_SIZE (8 * MPI3MR_PAGE_SIZE_4K) #define MPI3MR_ADMIN_REQ_FRAME_SZ 128 #define MPI3MR_ADMIN_REPLY_FRAME_SZ 16 /* Operational queue management definitions */ #define MPI3MR_OP_REQ_Q_QD 512 #define MPI3MR_OP_REP_Q_QD 1024 +#define MPI3MR_OP_REP_Q_QD2K 2048 #define MPI3MR_OP_REP_Q_QD4K 4096 #define MPI3MR_OP_REQ_Q_SEG_SIZE 4096 #define MPI3MR_OP_REP_Q_SEG_SIZE 4096 @@ -128,11 +130,10 @@ extern atomic64_t event_counter; #define MPI3MR_PREPARE_FOR_RESET_TIMEOUT 180 #define MPI3MR_RESET_ACK_TIMEOUT 30 #define MPI3MR_MUR_TIMEOUT 120 +#define MPI3MR_RESET_TIMEOUT 510 #define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */ -#define MPI3MR_DEFAULT_CFG_PAGE_SZ 1024 /* in bytes */ - #define MPI3MR_RESET_TOPOLOGY_SETTLE_TIME 10 #define MPI3MR_SCMD_TIMEOUT (60 * HZ) @@ -175,7 +176,7 @@ extern atomic64_t event_counter; #define MPI3MR_DEFAULT_SDEV_QD 32 /* Definitions for Threaded IRQ poll*/ -#define MPI3MR_IRQ_POLL_SLEEP 2 +#define MPI3MR_IRQ_POLL_SLEEP 20 #define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8 /* Definitions for the controller security status*/ @@ -187,6 +188,31 @@ extern atomic64_t event_counter; #define MPI3MR_HARD_SECURE_DEVICE 0x08 #define MPI3MR_TAMPERED_DEVICE 0x0C +#define MPI3MR_DEFAULT_HDB_MAX_SZ (4 * 1024 * 1024) +#define MPI3MR_DEFAULT_HDB_DEC_SZ (1 * 1024 * 1024) +#define MPI3MR_DEFAULT_HDB_MIN_SZ (2 * 1024 * 1024) +#define MPI3MR_MAX_NUM_HDB 2 + +#define MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN 0 +#define MPI3MR_HDB_TRIGGER_TYPE_FAULT 1 +#define MPI3MR_HDB_TRIGGER_TYPE_ELEMENT 2 +#define MPI3MR_HDB_TRIGGER_TYPE_GLOBAL 3 +#define MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET 4 +#define MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED 5 + +#define MPI3MR_HDB_REFRESH_TYPE_RESERVED 0 +#define MPI3MR_HDB_REFRESH_TYPE_CURRENT 1 +#define MPI3MR_HDB_REFRESH_TYPE_DEFAULT 2 +#define MPI3MR_HDB_HDB_REFRESH_TYPE_PERSISTENT 3 + +#define MPI3MR_DEFAULT_HDB_SZ (4 * 1024 * 1024) +#define MPI3MR_MAX_NUM_HDB 2 + +#define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_INDEX 0 +#define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA 1 + +#define MPI3MR_THRESHOLD_REPLY_COUNT 100 + /* SGE Flag definition */ #define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \ (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | MPI3_SGE_FLAGS_DLAS_SYSTEM | \ @@ -210,6 +236,8 @@ extern atomic64_t event_counter; #define MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS 256 #define MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS 2048 +#define MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER (0xFFFD) + /** * struct mpi3mr_nvme_pt_sge - Structure to store SGEs for NVMe * Encapsulated commands. @@ -289,11 +317,19 @@ enum mpi3mr_reset_reason { MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22, MPI3MR_RESET_FROM_SYSFS = 23, MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24, + MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT = 25, + MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT = 26, MPI3MR_RESET_FROM_FIRMWARE = 27, MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29, MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30, + MPI3MR_RESET_FROM_TRIGGER = 31, }; +#define MPI3MR_RESET_REASON_OSTYPE_LINUX 1 +#define MPI3MR_RESET_REASON_OSTYPE_SHIFT 28 +#define MPI3MR_RESET_REASON_IOCNUM_SHIFT 20 + + /* Queue type definitions */ enum queue_type { MPI3MR_DEFAULT_QUEUE = 0, @@ -323,6 +359,9 @@ struct mpi3mr_ioc_facts { u32 ioc_capabilities; struct mpi3mr_compimg_ver fw_ver; u32 mpi_version; + u32 diag_trace_sz; + u32 diag_fw_sz; + u32 diag_drvr_sz; u16 max_reqs; u16 product_id; u16 op_req_sz; @@ -350,6 +389,7 @@ struct mpi3mr_ioc_facts { u16 max_msix_vectors; u8 personality; u8 dma_mask; + bool max_req_limit; u8 protocol_flags; u8 sge_mod_mask; u8 sge_mod_value; @@ -419,6 +459,8 @@ struct op_req_qinfo { * @enable_irq_poll: Flag to indicate polling is enabled * @in_use: Queue is handled by poll/ISR * @qtype: Type of queue (types defined in enum queue_type) + * @qfull_watermark: Watermark defined in reply queue to avoid + * reply queue full */ struct op_reply_qinfo { u16 ci; @@ -434,6 +476,7 @@ struct op_reply_qinfo { bool enable_irq_poll; atomic_t in_use; enum queue_type qtype; + u16 qfull_watermark; }; /** @@ -480,6 +523,7 @@ struct mpi3mr_throttle_group_info { /* HBA port flags */ #define MPI3MR_HBA_PORT_FLAG_DIRTY 0x01 +#define MPI3MR_HBA_PORT_FLAG_NEW 0x02 /* IOCTL data transfer sge*/ #define MPI3MR_NUM_IOCTL_SGE 256 @@ -501,8 +545,8 @@ struct mpi3mr_hba_port { * @port_list: List of ports belonging to a SAS node * @num_phys: Number of phys associated with port * @marked_responding: used while refresing the sas ports - * @lowest_phy: lowest phy ID of current sas port - * @phy_mask: phy_mask of current sas port + * @lowest_phy: lowest phy ID of current sas port, valid for controller port + * @phy_mask: phy_mask of current sas port, valid for controller port * @hba_port: HBA port entry * @remote_identify: Attached device identification * @rphy: SAS transport layer rphy object @@ -849,6 +893,63 @@ struct mpi3mr_drv_cmd { }; /** + * union mpi3mr_trigger_data - Trigger data information + * @fault: Fault code + * @global: Global trigger data + * @element: element trigger data + */ +union mpi3mr_trigger_data { + u16 fault; + u64 global; + union mpi3_driver2_trigger_element element; +}; + +/** + * struct trigger_event_data - store trigger related + * information. + * + * @trace_hdb: Trace diag buffer descriptor reference + * @fw_hdb: FW diag buffer descriptor reference + * @trigger_type: Trigger type + * @trigger_specific_data: Trigger specific data + * @snapdump: Snapdump enable or disable flag + */ +struct trigger_event_data { + struct diag_buffer_desc *trace_hdb; + struct diag_buffer_desc *fw_hdb; + u8 trigger_type; + union mpi3mr_trigger_data trigger_specific_data; + bool snapdump; +}; + +/** + * struct diag_buffer_desc - memory descriptor structure to + * store virtual, dma addresses, size, buffer status for host + * diagnostic buffers. + * + * @type: Buffer type + * @trigger_data: Trigger data + * @trigger_type: Trigger type + * @status: Buffer status + * @size: Buffer size + * @addr: Virtual address + * @dma_addr: Buffer DMA address + * @is_segmented: The buffer is segmented or not + * @disabled_after_reset: The buffer is disabled after reset + */ +struct diag_buffer_desc { + u8 type; + union mpi3mr_trigger_data trigger_data; + u8 trigger_type; + u8 status; + u32 size; + void *addr; + dma_addr_t dma_addr; + bool is_segmented; + bool disabled_after_reset; +}; + +/** * struct dma_memory_desc - memory descriptor structure to store * virtual address, dma address and size for any generic dma * memory allocations in the driver. @@ -931,6 +1032,8 @@ struct scmd_priv { * @admin_reply_base: Admin reply queue base virtual address * @admin_reply_dma: Admin reply queue base dma address * @admin_reply_q_in_use: Queue is handled by poll/ISR + * @admin_pend_isr: Count of unprocessed admin ISR/poll calls + * due to another thread processing replies * @ready_timeout: Controller ready timeout * @intr_info: Interrupt cookie pointer * @intr_info_count: Number of interrupt cookies @@ -966,7 +1069,6 @@ struct scmd_priv { * @sbq_lock: Sense buffer queue lock * @sbq_host_index: Sense buffer queuehost index * @event_masks: Event mask bitmap - * @fwevt_worker_name: Firmware event worker thread name * @fwevt_worker_thread: Firmware event worker thread * @fwevt_lock: Firmware event lock * @fwevt_list: Firmware event list @@ -997,8 +1099,10 @@ struct scmd_priv { * @evtack_cmds_bitmap: Event Ack bitmap * @delayed_evtack_cmds_list: Delayed event acknowledgment list * @ts_update_counter: Timestamp update counter + * @ts_update_interval: Timestamp update interval * @reset_in_progress: Reset in progress flag * @unrecoverable: Controller unrecoverable flag + * @io_admin_reset_sync: Manage state of I/O ops during an admin reset process * @prev_reset_result: Result of previous reset * @reset_mutex: Controller reset mutex * @reset_waitq: Controller reset wait queue @@ -1039,9 +1143,6 @@ struct scmd_priv { * @io_throttle_low: I/O size to stop throttle in 512b blocks * @num_io_throttle_group: Maximum number of throttle groups * @throttle_groups: Pointer to throttle group info structures - * @cfg_page: Default memory for configuration pages - * @cfg_page_dma: Configuration page DMA address - * @cfg_page_sz: Default configuration page memory size * @sas_transport_enabled: SAS transport enabled or not * @scsi_device_channel: Channel ID for SCSI devices * @transport_cmds: Command tracker for SAS transport commands @@ -1050,11 +1151,27 @@ struct scmd_priv { * @sas_node_lock: Lock to protect SAS node list * @hba_port_table_list: List of HBA Ports * @enclosure_list: List of Enclosure objects + * @diag_buffers: Host diagnostic buffers + * @driver_pg2: Driver page 2 pointer + * @reply_trigger_present: Reply trigger present flag + * @event_trigger_present: Event trigger present flag + * @scsisense_trigger_present: Scsi sense trigger present flag * @ioctl_dma_pool: DMA pool for IOCTL data buffers * @ioctl_sge: DMA buffer descriptors for IOCTL data * @ioctl_chain_sge: DMA buffer descriptor for IOCTL chain * @ioctl_resp_sge: DMA buffer descriptor for Mgmt cmd response * @ioctl_sges_allocated: Flag for IOCTL SGEs allocated or not + * @trace_release_trigger_active: Trace trigger active flag + * @fw_release_trigger_active: Fw release trigger active flag + * @snapdump_trigger_active: Snapdump trigger active flag + * @pci_err_recovery: PCI error recovery in progress + * @block_on_pci_err: Block IO during PCI error recovery + * @reply_qfull_count: Occurences of reply queue full avoidance kicking-in + * @prevent_reply_qfull: Enable reply queue prevention + * @seg_tb_support: Segmented trace buffer support + * @num_tb_segs: Number of Segments in Trace buffer + * @trace_buf_pool: DMA pool for Segmented trace buffer segments + * @trace_buf: Trace buffer segments memory descriptor */ struct mpi3mr_ioc { struct list_head list; @@ -1091,6 +1208,7 @@ struct mpi3mr_ioc { void *admin_reply_base; dma_addr_t admin_reply_dma; atomic_t admin_reply_q_in_use; + atomic_t admin_pend_isr; u32 ready_timeout; @@ -1137,12 +1255,11 @@ struct mpi3mr_ioc { u32 sbq_host_index; u32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS]; - char fwevt_worker_name[MPI3MR_NAME_LENGTH]; struct workqueue_struct *fwevt_worker_thread; spinlock_t fwevt_lock; struct list_head fwevt_list; - char watchdog_work_q_name[20]; + char watchdog_work_q_name[50]; struct workqueue_struct *watchdog_work_q; struct delayed_work watchdog_work; spinlock_t watchdog_lock; @@ -1175,9 +1292,11 @@ struct mpi3mr_ioc { unsigned long *evtack_cmds_bitmap; struct list_head delayed_evtack_cmds_list; - u32 ts_update_counter; + u16 ts_update_counter; + u16 ts_update_interval; u8 reset_in_progress; u8 unrecoverable; + u8 io_admin_reset_sync; int prev_reset_result; struct mutex reset_mutex; wait_queue_head_t reset_waitq; @@ -1228,10 +1347,6 @@ struct mpi3mr_ioc { u16 num_io_throttle_group; struct mpi3mr_throttle_group_info *throttle_groups; - void *cfg_page; - dma_addr_t cfg_page_dma; - u16 cfg_page_sz; - u8 sas_transport_enabled; u8 scsi_device_channel; struct mpi3mr_drv_cmd transport_cmds; @@ -1246,6 +1361,24 @@ struct mpi3mr_ioc { struct dma_memory_desc ioctl_chain_sge; struct dma_memory_desc ioctl_resp_sge; bool ioctl_sges_allocated; + bool reply_trigger_present; + bool event_trigger_present; + bool scsisense_trigger_present; + struct diag_buffer_desc diag_buffers[MPI3MR_MAX_NUM_HDB]; + struct mpi3_driver_page2 *driver_pg2; + spinlock_t trigger_lock; + bool snapdump_trigger_active; + bool trace_release_trigger_active; + bool fw_release_trigger_active; + bool pci_err_recovery; + bool block_on_pci_err; + atomic_t reply_qfull_count; + bool prevent_reply_qfull; + bool seg_tb_support; + u32 num_tb_segs; + struct dma_pool *trace_buf_pool; + struct segments *trace_buf; + }; /** @@ -1336,7 +1469,7 @@ void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc); void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc); int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, - u32 reset_reason, u8 snapdump); + u16 reset_reason, u8 snapdump); void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc); void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc); @@ -1348,7 +1481,6 @@ void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout); void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc); void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc); void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc); -void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc); void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc); void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code); void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc); @@ -1403,6 +1535,8 @@ int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz); int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc, struct mpi3_driver_page1 *driver_pg1, u16 pg_sz); +int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc, + struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_type); u8 mpi3mr_is_expander_device(u16 device_info); int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle); @@ -1436,4 +1570,28 @@ void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc); int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc); void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, struct mpi3mr_sas_node *sas_expander); +void mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc *mrioc); +int mpi3mr_post_diag_bufs(struct mpi3mr_ioc *mrioc); +int mpi3mr_issue_diag_buf_release(struct mpi3mr_ioc *mrioc, + struct diag_buffer_desc *diag_buffer); +void mpi3mr_release_diag_bufs(struct mpi3mr_ioc *mrioc, u8 skip_rel_action); +void mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc *hdb, + u8 type, union mpi3mr_trigger_data *trigger_data, bool force); +int mpi3mr_refresh_trigger(struct mpi3mr_ioc *mrioc, u8 page_type); +struct diag_buffer_desc *mpi3mr_diag_buffer_for_type(struct mpi3mr_ioc *mrioc, + u8 buf_type); +int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc, + struct diag_buffer_desc *diag_buffer); +void mpi3mr_set_trigger_data_in_all_hdb(struct mpi3mr_ioc *mrioc, + u8 type, union mpi3mr_trigger_data *trigger_data, bool force); +void mpi3mr_reply_trigger(struct mpi3mr_ioc *mrioc, u16 iocstatus, + u32 iocloginfo); +void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc, + struct trigger_event_data *event_data); +void mpi3mr_scsisense_trigger(struct mpi3mr_ioc *mrioc, u8 senseky, u8 asc, + u8 ascq); +void mpi3mr_event_trigger(struct mpi3mr_ioc *mrioc, u8 event); +void mpi3mr_global_trigger(struct mpi3mr_ioc *mrioc, u64 trigger_data); +void mpi3mr_hdbstatuschg_evt_th(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply); #endif /*MPI3MR_H_INCLUDED*/ diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c index 55d590b91947..f36663613950 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_app.c +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -12,6 +12,912 @@ #include <uapi/scsi/scsi_bsg_mpi3mr.h> /** + * mpi3mr_alloc_trace_buffer: Allocate segmented trace buffer + * @mrioc: Adapter instance reference + * @trace_size: Trace buffer size + * + * Allocate either segmented memory pools or contiguous buffer + * based on the controller capability for the host trace + * buffer. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc *mrioc, u32 trace_size) +{ + struct diag_buffer_desc *diag_buffer = &mrioc->diag_buffers[0]; + int i, sz; + u64 *diag_buffer_list = NULL; + dma_addr_t diag_buffer_list_dma; + u32 seg_count; + + if (mrioc->seg_tb_support) { + seg_count = (trace_size) / MPI3MR_PAGE_SIZE_4K; + trace_size = seg_count * MPI3MR_PAGE_SIZE_4K; + + diag_buffer_list = dma_alloc_coherent(&mrioc->pdev->dev, + sizeof(u64) * seg_count, + &diag_buffer_list_dma, GFP_KERNEL); + if (!diag_buffer_list) + return -1; + + mrioc->num_tb_segs = seg_count; + + sz = sizeof(struct segments) * seg_count; + mrioc->trace_buf = kzalloc(sz, GFP_KERNEL); + if (!mrioc->trace_buf) + goto trace_buf_failed; + + mrioc->trace_buf_pool = dma_pool_create("trace_buf pool", + &mrioc->pdev->dev, MPI3MR_PAGE_SIZE_4K, MPI3MR_PAGE_SIZE_4K, + 0); + if (!mrioc->trace_buf_pool) { + ioc_err(mrioc, "trace buf pool: dma_pool_create failed\n"); + goto trace_buf_pool_failed; + } + + for (i = 0; i < seg_count; i++) { + mrioc->trace_buf[i].segment = + dma_pool_zalloc(mrioc->trace_buf_pool, GFP_KERNEL, + &mrioc->trace_buf[i].segment_dma); + diag_buffer_list[i] = + (u64) mrioc->trace_buf[i].segment_dma; + if (!diag_buffer_list[i]) + goto tb_seg_alloc_failed; + } + + diag_buffer->addr = diag_buffer_list; + diag_buffer->dma_addr = diag_buffer_list_dma; + diag_buffer->is_segmented = true; + + dprint_init(mrioc, "segmented trace diag buffer\n" + "is allocated successfully seg_count:%d\n", seg_count); + return 0; + } else { + diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev, + trace_size, &diag_buffer->dma_addr, GFP_KERNEL); + if (diag_buffer->addr) { + dprint_init(mrioc, "trace diag buffer is allocated successfully\n"); + return 0; + } + return -1; + } + +tb_seg_alloc_failed: + if (mrioc->trace_buf_pool) { + for (i = 0; i < mrioc->num_tb_segs; i++) { + if (mrioc->trace_buf[i].segment) { + dma_pool_free(mrioc->trace_buf_pool, + mrioc->trace_buf[i].segment, + mrioc->trace_buf[i].segment_dma); + mrioc->trace_buf[i].segment = NULL; + } + mrioc->trace_buf[i].segment = NULL; + } + dma_pool_destroy(mrioc->trace_buf_pool); + mrioc->trace_buf_pool = NULL; + } +trace_buf_pool_failed: + kfree(mrioc->trace_buf); + mrioc->trace_buf = NULL; +trace_buf_failed: + if (diag_buffer_list) + dma_free_coherent(&mrioc->pdev->dev, + sizeof(u64) * mrioc->num_tb_segs, + diag_buffer_list, diag_buffer_list_dma); + return -1; +} + +/** + * mpi3mr_alloc_diag_bufs - Allocate memory for diag buffers + * @mrioc: Adapter instance reference + * + * This functions checks whether the driver defined buffer sizes + * are greater than IOCFacts provided controller local buffer + * sizes and if the driver defined sizes are more then the + * driver allocates the specific buffer by reading driver page1 + * + * Return: Nothing. + */ +void mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc *mrioc) +{ + struct diag_buffer_desc *diag_buffer; + struct mpi3_driver_page1 driver_pg1; + u32 trace_dec_size, trace_min_size, fw_dec_size, fw_min_size, + trace_size, fw_size; + u16 pg_sz = sizeof(driver_pg1); + int retval = 0; + bool retry = false; + + if (mrioc->diag_buffers[0].addr || mrioc->diag_buffers[1].addr) + return; + + retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz); + if (retval) { + ioc_warn(mrioc, + "%s: driver page 1 read failed, allocating trace\n" + "and firmware diag buffers of default size\n", __func__); + trace_size = fw_size = MPI3MR_DEFAULT_HDB_MAX_SZ; + trace_dec_size = fw_dec_size = MPI3MR_DEFAULT_HDB_DEC_SZ; + trace_min_size = fw_min_size = MPI3MR_DEFAULT_HDB_MIN_SZ; + + } else { + trace_size = driver_pg1.host_diag_trace_max_size * 1024; + trace_dec_size = driver_pg1.host_diag_trace_decrement_size + * 1024; + trace_min_size = driver_pg1.host_diag_trace_min_size * 1024; + fw_size = driver_pg1.host_diag_fw_max_size * 1024; + fw_dec_size = driver_pg1.host_diag_fw_decrement_size * 1024; + fw_min_size = driver_pg1.host_diag_fw_min_size * 1024; + dprint_init(mrioc, + "%s:trace diag buffer sizes read from driver\n" + "page1: maximum size = %dKB, decrement size = %dKB\n" + ", minimum size = %dKB\n", __func__, driver_pg1.host_diag_trace_max_size, + driver_pg1.host_diag_trace_decrement_size, + driver_pg1.host_diag_trace_min_size); + dprint_init(mrioc, + "%s:firmware diag buffer sizes read from driver\n" + "page1: maximum size = %dKB, decrement size = %dKB\n" + ", minimum size = %dKB\n", __func__, driver_pg1.host_diag_fw_max_size, + driver_pg1.host_diag_fw_decrement_size, + driver_pg1.host_diag_fw_min_size); + if ((trace_size == 0) && (fw_size == 0)) + return; + } + + +retry_trace: + diag_buffer = &mrioc->diag_buffers[0]; + diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_TRACE; + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED; + if ((mrioc->facts.diag_trace_sz < trace_size) && (trace_size >= + trace_min_size)) { + if (!retry) + dprint_init(mrioc, + "trying to allocate trace diag buffer of size = %dKB\n", + trace_size / 1024); + if ((!mrioc->seg_tb_support && (get_order(trace_size) > MAX_PAGE_ORDER)) || + mpi3mr_alloc_trace_buffer(mrioc, trace_size)) { + + retry = true; + trace_size -= trace_dec_size; + dprint_init(mrioc, "trace diag buffer allocation failed\n" + "retrying smaller size %dKB\n", trace_size / 1024); + goto retry_trace; + } else + diag_buffer->size = trace_size; + } + + retry = false; +retry_fw: + + diag_buffer = &mrioc->diag_buffers[1]; + + diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_FW; + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED; + if ((mrioc->facts.diag_fw_sz < fw_size) && (fw_size >= fw_min_size)) { + if (get_order(fw_size) <= MAX_PAGE_ORDER) { + diag_buffer->addr + = dma_alloc_coherent(&mrioc->pdev->dev, fw_size, + &diag_buffer->dma_addr, + GFP_KERNEL); + } + if (!retry) + dprint_init(mrioc, + "%s:trying to allocate firmware diag buffer of size = %dKB\n", + __func__, fw_size / 1024); + if (diag_buffer->addr) { + dprint_init(mrioc, "%s:firmware diag buffer allocated successfully\n", + __func__); + diag_buffer->size = fw_size; + } else { + retry = true; + fw_size -= fw_dec_size; + dprint_init(mrioc, "%s:trace diag buffer allocation failed,\n" + "retrying smaller size %dKB\n", + __func__, fw_size / 1024); + goto retry_fw; + } + } +} + +/** + * mpi3mr_issue_diag_buf_post - Send diag buffer post req + * @mrioc: Adapter instance reference + * @diag_buffer: Diagnostic buffer descriptor + * + * Issue diagnostic buffer post MPI request through admin queue + * and wait for the completion of it or time out. + * + * Return: 0 on success, non-zero on failures. + */ +int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc, + struct diag_buffer_desc *diag_buffer) +{ + struct mpi3_diag_buffer_post_request diag_buf_post_req; + u8 prev_status; + int retval = 0; + + if (diag_buffer->disabled_after_reset) { + dprint_bsg_err(mrioc, "%s: skipping diag buffer posting\n" + "as it is disabled after reset\n", __func__); + return -1; + } + + memset(&diag_buf_post_req, 0, sizeof(diag_buf_post_req)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); + mutex_unlock(&mrioc->init_cmds.mutex); + return -1; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + diag_buf_post_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + diag_buf_post_req.function = MPI3_FUNCTION_DIAG_BUFFER_POST; + diag_buf_post_req.type = diag_buffer->type; + diag_buf_post_req.address = le64_to_cpu(diag_buffer->dma_addr); + diag_buf_post_req.length = le32_to_cpu(diag_buffer->size); + + if (diag_buffer->is_segmented) + diag_buf_post_req.msg_flags |= MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED; + + dprint_bsg_info(mrioc, "%s: posting diag buffer type %d segmented:%d\n", __func__, + diag_buffer->type, diag_buffer->is_segmented); + + prev_status = diag_buffer->status; + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED; + init_completion(&mrioc->init_cmds.done); + retval = mpi3mr_admin_request_post(mrioc, &diag_buf_post_req, + sizeof(diag_buf_post_req), 1); + if (retval) { + dprint_bsg_err(mrioc, "%s: admin request post failed\n", + __func__); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->init_cmds.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + mrioc->init_cmds.is_waiting = 0; + dprint_bsg_err(mrioc, "%s: command timedout\n", __func__); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT); + retval = -1; + goto out_unlock; + } + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + dprint_bsg_err(mrioc, + "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n", + __func__, diag_buffer->type, + (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + mrioc->init_cmds.ioc_loginfo); + retval = -1; + goto out_unlock; + } + dprint_bsg_info(mrioc, "%s: diag buffer type %d posted successfully\n", + __func__, diag_buffer->type); + +out_unlock: + if (retval) + diag_buffer->status = prev_status; + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); + return retval; +} + +/** + * mpi3mr_post_diag_bufs - Post diag buffers to the controller + * @mrioc: Adapter instance reference + * + * This function calls helper function to post both trace and + * firmware buffers to the controller. + * + * Return: None + */ +int mpi3mr_post_diag_bufs(struct mpi3mr_ioc *mrioc) +{ + u8 i; + struct diag_buffer_desc *diag_buffer; + + for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { + diag_buffer = &mrioc->diag_buffers[i]; + if (!(diag_buffer->addr)) + continue; + if (mpi3mr_issue_diag_buf_post(mrioc, diag_buffer)) + return -1; + } + return 0; +} + +/** + * mpi3mr_issue_diag_buf_release - Send diag buffer release req + * @mrioc: Adapter instance reference + * @diag_buffer: Diagnostic buffer descriptor + * + * Issue diagnostic buffer manage MPI request with release + * action request through admin queue and wait for the + * completion of it or time out. + * + * Return: 0 on success, non-zero on failures. + */ +int mpi3mr_issue_diag_buf_release(struct mpi3mr_ioc *mrioc, + struct diag_buffer_desc *diag_buffer) +{ + struct mpi3_diag_buffer_manage_request diag_buf_manage_req; + int retval = 0; + + if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && + (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) + return retval; + + memset(&diag_buf_manage_req, 0, sizeof(diag_buf_manage_req)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + dprint_reset(mrioc, "%s: command is in use\n", __func__); + mutex_unlock(&mrioc->init_cmds.mutex); + return -1; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + diag_buf_manage_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + diag_buf_manage_req.function = MPI3_FUNCTION_DIAG_BUFFER_MANAGE; + diag_buf_manage_req.type = diag_buffer->type; + diag_buf_manage_req.action = MPI3_DIAG_BUFFER_ACTION_RELEASE; + + + dprint_reset(mrioc, "%s: releasing diag buffer type %d\n", __func__, + diag_buffer->type); + init_completion(&mrioc->init_cmds.done); + retval = mpi3mr_admin_request_post(mrioc, &diag_buf_manage_req, + sizeof(diag_buf_manage_req), 1); + if (retval) { + dprint_reset(mrioc, "%s: admin request post failed\n", __func__); + mpi3mr_set_trigger_data_in_hdb(diag_buffer, + MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->init_cmds.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + mrioc->init_cmds.is_waiting = 0; + dprint_reset(mrioc, "%s: command timedout\n", __func__); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT); + retval = -1; + goto out_unlock; + } + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + dprint_reset(mrioc, + "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n", + __func__, diag_buffer->type, + (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + mrioc->init_cmds.ioc_loginfo); + retval = -1; + goto out_unlock; + } + dprint_reset(mrioc, "%s: diag buffer type %d released successfully\n", + __func__, diag_buffer->type); + +out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); + return retval; +} + +/** + * mpi3mr_process_trigger - Generic HDB Trigger handler + * @mrioc: Adapter instance reference + * @trigger_type: Trigger type + * @trigger_data: Trigger data + * @trigger_flags: Trigger flags + * + * This function checks validity of HDB, triggers and based on + * trigger information, creates an event to be processed in the + * firmware event worker thread . + * + * This function should be called with trigger spinlock held + * + * Return: Nothing + */ +static void mpi3mr_process_trigger(struct mpi3mr_ioc *mrioc, u8 trigger_type, + union mpi3mr_trigger_data *trigger_data, u8 trigger_flags) +{ + struct trigger_event_data event_data; + struct diag_buffer_desc *trace_hdb = NULL; + struct diag_buffer_desc *fw_hdb = NULL; + u64 global_trigger; + + trace_hdb = mpi3mr_diag_buffer_for_type(mrioc, + MPI3_DIAG_BUFFER_TYPE_TRACE); + if (trace_hdb && + (trace_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && + (trace_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) + trace_hdb = NULL; + + fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW); + + if (fw_hdb && + (fw_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && + (fw_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) + fw_hdb = NULL; + + if (mrioc->snapdump_trigger_active || (mrioc->fw_release_trigger_active + && mrioc->trace_release_trigger_active) || + (!trace_hdb && !fw_hdb) || (!mrioc->driver_pg2) || + ((trigger_type == MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) + && (!mrioc->driver_pg2->num_triggers))) + return; + + memset(&event_data, 0, sizeof(event_data)); + event_data.trigger_type = trigger_type; + memcpy(&event_data.trigger_specific_data, trigger_data, + sizeof(*trigger_data)); + global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger); + + if (global_trigger & MPI3_DRIVER2_GLOBALTRIGGER_SNAPDUMP_ENABLED) { + event_data.snapdump = true; + event_data.trace_hdb = trace_hdb; + event_data.fw_hdb = fw_hdb; + mrioc->snapdump_trigger_active = true; + } else if (trigger_type == MPI3MR_HDB_TRIGGER_TYPE_GLOBAL) { + if ((trace_hdb) && (global_trigger & + MPI3_DRIVER2_GLOBALTRIGGER_DIAG_TRACE_RELEASE) && + (!mrioc->trace_release_trigger_active)) { + event_data.trace_hdb = trace_hdb; + mrioc->trace_release_trigger_active = true; + } + if ((fw_hdb) && (global_trigger & + MPI3_DRIVER2_GLOBALTRIGGER_DIAG_FW_RELEASE) && + (!mrioc->fw_release_trigger_active)) { + event_data.fw_hdb = fw_hdb; + mrioc->fw_release_trigger_active = true; + } + } else if (trigger_type == MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) { + if ((trace_hdb) && (trigger_flags & + MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE) && + (!mrioc->trace_release_trigger_active)) { + event_data.trace_hdb = trace_hdb; + mrioc->trace_release_trigger_active = true; + } + if ((fw_hdb) && (trigger_flags & + MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE) && + (!mrioc->fw_release_trigger_active)) { + event_data.fw_hdb = fw_hdb; + mrioc->fw_release_trigger_active = true; + } + } + + if (event_data.trace_hdb || event_data.fw_hdb) + mpi3mr_hdb_trigger_data_event(mrioc, &event_data); +} + +/** + * mpi3mr_global_trigger - Global HDB trigger handler + * @mrioc: Adapter instance reference + * @trigger_data: Trigger data + * + * This function checks whether the given global trigger is + * enabled in the driver page 2 and if so calls generic trigger + * handler to queue event for HDB release. + * + * Return: Nothing + */ +void mpi3mr_global_trigger(struct mpi3mr_ioc *mrioc, u64 trigger_data) +{ + unsigned long flags; + union mpi3mr_trigger_data trigger_specific_data; + + spin_lock_irqsave(&mrioc->trigger_lock, flags); + if (le64_to_cpu(mrioc->driver_pg2->global_trigger) & trigger_data) { + memset(&trigger_specific_data, 0, + sizeof(trigger_specific_data)); + trigger_specific_data.global = trigger_data; + mpi3mr_process_trigger(mrioc, MPI3MR_HDB_TRIGGER_TYPE_GLOBAL, + &trigger_specific_data, 0); + } + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); +} + +/** + * mpi3mr_scsisense_trigger - SCSI sense HDB trigger handler + * @mrioc: Adapter instance reference + * @sensekey: Sense Key + * @asc: Additional Sense Code + * @ascq: Additional Sense Code Qualifier + * + * This function compares SCSI sense trigger values with driver + * page 2 values and calls generic trigger handler to release + * HDBs if match found + * + * Return: Nothing + */ +void mpi3mr_scsisense_trigger(struct mpi3mr_ioc *mrioc, u8 sensekey, u8 asc, + u8 ascq) +{ + struct mpi3_driver2_trigger_scsi_sense *scsi_sense_trigger = NULL; + u64 i = 0; + unsigned long flags; + u8 num_triggers, trigger_flags; + + if (mrioc->scsisense_trigger_present) { + spin_lock_irqsave(&mrioc->trigger_lock, flags); + scsi_sense_trigger = (struct mpi3_driver2_trigger_scsi_sense *) + mrioc->driver_pg2->trigger; + num_triggers = mrioc->driver_pg2->num_triggers; + for (i = 0; i < num_triggers; i++, scsi_sense_trigger++) { + if (scsi_sense_trigger->type != + MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE) + continue; + if (!(scsi_sense_trigger->sense_key == + MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL + || scsi_sense_trigger->sense_key == sensekey)) + continue; + if (!(scsi_sense_trigger->asc == + MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL || + scsi_sense_trigger->asc == asc)) + continue; + if (!(scsi_sense_trigger->ascq == + MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL || + scsi_sense_trigger->ascq == ascq)) + continue; + trigger_flags = scsi_sense_trigger->flags; + mpi3mr_process_trigger(mrioc, + MPI3MR_HDB_TRIGGER_TYPE_ELEMENT, + (union mpi3mr_trigger_data *)scsi_sense_trigger, + trigger_flags); + break; + } + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); + } +} + +/** + * mpi3mr_event_trigger - MPI event HDB trigger handler + * @mrioc: Adapter instance reference + * @event: MPI Event + * + * This function compares event trigger values with driver page + * 2 values and calls generic trigger handler to release + * HDBs if match found. + * + * Return: Nothing + */ +void mpi3mr_event_trigger(struct mpi3mr_ioc *mrioc, u8 event) +{ + struct mpi3_driver2_trigger_event *event_trigger = NULL; + u64 i = 0; + unsigned long flags; + u8 num_triggers, trigger_flags; + + if (mrioc->event_trigger_present) { + spin_lock_irqsave(&mrioc->trigger_lock, flags); + event_trigger = (struct mpi3_driver2_trigger_event *) + mrioc->driver_pg2->trigger; + num_triggers = mrioc->driver_pg2->num_triggers; + + for (i = 0; i < num_triggers; i++, event_trigger++) { + if (event_trigger->type != + MPI3_DRIVER2_TRIGGER_TYPE_EVENT) + continue; + if (event_trigger->event != event) + continue; + trigger_flags = event_trigger->flags; + mpi3mr_process_trigger(mrioc, + MPI3MR_HDB_TRIGGER_TYPE_ELEMENT, + (union mpi3mr_trigger_data *)event_trigger, + trigger_flags); + break; + } + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); + } +} + +/** + * mpi3mr_reply_trigger - MPI Reply HDB trigger handler + * @mrioc: Adapter instance reference + * @ioc_status: Masked value of IOC Status from MPI Reply + * @ioc_loginfo: IOC Log Info from MPI Reply + * + * This function compares IOC status and IOC log info trigger + * values with driver page 2 values and calls generic trigger + * handler to release HDBs if match found. + * + * Return: Nothing + */ +void mpi3mr_reply_trigger(struct mpi3mr_ioc *mrioc, u16 ioc_status, + u32 ioc_loginfo) +{ + struct mpi3_driver2_trigger_reply *reply_trigger = NULL; + u64 i = 0; + unsigned long flags; + u8 num_triggers, trigger_flags; + + if (mrioc->reply_trigger_present) { + spin_lock_irqsave(&mrioc->trigger_lock, flags); + reply_trigger = (struct mpi3_driver2_trigger_reply *) + mrioc->driver_pg2->trigger; + num_triggers = mrioc->driver_pg2->num_triggers; + for (i = 0; i < num_triggers; i++, reply_trigger++) { + if (reply_trigger->type != + MPI3_DRIVER2_TRIGGER_TYPE_REPLY) + continue; + if ((le16_to_cpu(reply_trigger->ioc_status) != + ioc_status) + && (le16_to_cpu(reply_trigger->ioc_status) != + MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL)) + continue; + if ((le32_to_cpu(reply_trigger->ioc_log_info) != + (le32_to_cpu(reply_trigger->ioc_log_info_mask) & + ioc_loginfo))) + continue; + trigger_flags = reply_trigger->flags; + mpi3mr_process_trigger(mrioc, + MPI3MR_HDB_TRIGGER_TYPE_ELEMENT, + (union mpi3mr_trigger_data *)reply_trigger, + trigger_flags); + break; + } + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); + } +} + +/** + * mpi3mr_get_num_trigger - Gets number of HDB triggers + * @mrioc: Adapter instance reference + * @num_triggers: Number of triggers + * @page_action: Page action + * + * This function reads number of triggers by reading driver page + * 2 + * + * Return: 0 on success and proper error codes on failure + */ +static int mpi3mr_get_num_trigger(struct mpi3mr_ioc *mrioc, u8 *num_triggers, + u8 page_action) +{ + struct mpi3_driver_page2 drvr_page2; + int retval = 0; + + *num_triggers = 0; + + retval = mpi3mr_cfg_get_driver_pg2(mrioc, &drvr_page2, + sizeof(struct mpi3_driver_page2), page_action); + + if (retval) { + dprint_init(mrioc, "%s: driver page 2 read failed\n", __func__); + return retval; + } + *num_triggers = drvr_page2.num_triggers; + return retval; +} + +/** + * mpi3mr_refresh_trigger - Handler for Refresh trigger BSG + * @mrioc: Adapter instance reference + * @page_action: Page action + * + * This function caches the driver page 2 in the driver's memory + * by reading driver page 2 from the controller for a given page + * type and updates the HDB trigger values + * + * Return: 0 on success and proper error codes on failure + */ +int mpi3mr_refresh_trigger(struct mpi3mr_ioc *mrioc, u8 page_action) +{ + u16 pg_sz = sizeof(struct mpi3_driver_page2); + struct mpi3_driver_page2 *drvr_page2 = NULL; + u8 trigger_type, num_triggers; + int retval; + int i = 0; + unsigned long flags; + + retval = mpi3mr_get_num_trigger(mrioc, &num_triggers, page_action); + + if (retval) + goto out; + + pg_sz = offsetof(struct mpi3_driver_page2, trigger) + + (num_triggers * sizeof(union mpi3_driver2_trigger_element)); + drvr_page2 = kzalloc(pg_sz, GFP_KERNEL); + if (!drvr_page2) { + retval = -ENOMEM; + goto out; + } + + retval = mpi3mr_cfg_get_driver_pg2(mrioc, drvr_page2, pg_sz, page_action); + if (retval) { + dprint_init(mrioc, "%s: driver page 2 read failed\n", __func__); + kfree(drvr_page2); + goto out; + } + spin_lock_irqsave(&mrioc->trigger_lock, flags); + kfree(mrioc->driver_pg2); + mrioc->driver_pg2 = drvr_page2; + mrioc->reply_trigger_present = false; + mrioc->event_trigger_present = false; + mrioc->scsisense_trigger_present = false; + + for (i = 0; (i < mrioc->driver_pg2->num_triggers); i++) { + trigger_type = mrioc->driver_pg2->trigger[i].event.type; + switch (trigger_type) { + case MPI3_DRIVER2_TRIGGER_TYPE_REPLY: + mrioc->reply_trigger_present = true; + break; + case MPI3_DRIVER2_TRIGGER_TYPE_EVENT: + mrioc->event_trigger_present = true; + break; + case MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE: + mrioc->scsisense_trigger_present = true; + break; + default: + break; + } + } + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); +out: + return retval; +} + +/** + * mpi3mr_release_diag_bufs - Release diag buffers + * @mrioc: Adapter instance reference + * @skip_rel_action: Skip release action and set buffer state + * + * This function calls helper function to release both trace and + * firmware buffers from the controller. + * + * Return: None + */ +void mpi3mr_release_diag_bufs(struct mpi3mr_ioc *mrioc, u8 skip_rel_action) +{ + u8 i; + struct diag_buffer_desc *diag_buffer; + + for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { + diag_buffer = &mrioc->diag_buffers[i]; + if (!(diag_buffer->addr)) + continue; + if (diag_buffer->status == MPI3MR_HDB_BUFSTATUS_RELEASED) + continue; + if (!skip_rel_action) + mpi3mr_issue_diag_buf_release(mrioc, diag_buffer); + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_RELEASED; + atomic64_inc(&event_counter); + } +} + +/** + * mpi3mr_set_trigger_data_in_hdb - Updates HDB trigger type and + * trigger data + * + * @hdb: HDB pointer + * @type: Trigger type + * @data: Trigger data + * @force: Trigger overwrite flag + * @trigger_data: Pointer to trigger data information + * + * Updates trigger type and trigger data based on parameter + * passed to this function + * + * Return: Nothing + */ +void mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc *hdb, + u8 type, union mpi3mr_trigger_data *trigger_data, bool force) +{ + if ((!force) && (hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN)) + return; + hdb->trigger_type = type; + if (!trigger_data) + memset(&hdb->trigger_data, 0, sizeof(*trigger_data)); + else + memcpy(&hdb->trigger_data, trigger_data, sizeof(*trigger_data)); +} + +/** + * mpi3mr_set_trigger_data_in_all_hdb - Updates HDB trigger type + * and trigger data for all HDB + * + * @mrioc: Adapter instance reference + * @type: Trigger type + * @data: Trigger data + * @force: Trigger overwrite flag + * @trigger_data: Pointer to trigger data information + * + * Updates trigger type and trigger data based on parameter + * passed to this function + * + * Return: Nothing + */ +void mpi3mr_set_trigger_data_in_all_hdb(struct mpi3mr_ioc *mrioc, + u8 type, union mpi3mr_trigger_data *trigger_data, bool force) +{ + struct diag_buffer_desc *hdb = NULL; + + hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_TRACE); + if (hdb) + mpi3mr_set_trigger_data_in_hdb(hdb, type, trigger_data, force); + hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW); + if (hdb) + mpi3mr_set_trigger_data_in_hdb(hdb, type, trigger_data, force); +} + +/** + * mpi3mr_hdbstatuschg_evt_th - HDB status change evt tophalf + * @mrioc: Adapter instance reference + * @event_reply: event data + * + * Modifies the status of the applicable diag buffer descriptors + * + * Return: Nothing + */ +void mpi3mr_hdbstatuschg_evt_th(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply) +{ + struct mpi3_event_data_diag_buffer_status_change *evtdata; + struct diag_buffer_desc *diag_buffer; + + evtdata = (struct mpi3_event_data_diag_buffer_status_change *) + event_reply->event_data; + + diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, evtdata->type); + if (!diag_buffer) + return; + if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && + (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) + return; + switch (evtdata->reason_code) { + case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED: + { + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_RELEASED; + mpi3mr_set_trigger_data_in_hdb(diag_buffer, + MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0); + atomic64_inc(&event_counter); + break; + } + case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED: + { + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED; + break; + } + case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED: + { + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED; + break; + } + default: + dprint_event_th(mrioc, "%s: unknown reason_code(%d)\n", + __func__, evtdata->reason_code); + break; + } +} + +/** + * mpi3mr_diag_buffer_for_type - returns buffer desc for type + * @mrioc: Adapter instance reference + * @buf_type: Diagnostic buffer type + * + * Identifies matching diag descriptor from mrioc for given diag + * buffer type. + * + * Return: diag buffer descriptor on success, NULL on failures. + */ + +struct diag_buffer_desc * +mpi3mr_diag_buffer_for_type(struct mpi3mr_ioc *mrioc, u8 buf_type) +{ + u8 i; + + for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { + if (mrioc->diag_buffers[i].type == buf_type) + return &mrioc->diag_buffers[i]; + } + return NULL; +} + +/** * mpi3mr_bsg_pel_abort - sends PEL abort request * @mrioc: Adapter instance reference * @@ -31,7 +937,7 @@ static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc) dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); return -1; } - if (mrioc->stop_bsgs) { + if (mrioc->stop_bsgs || mrioc->block_on_pci_err) { dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); return -1; } @@ -126,6 +1032,259 @@ static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number) } /** + * mpi3mr_bsg_refresh_hdb_triggers - Refresh HDB trigger data + * @mrioc: Adapter instance reference + * @job: BSG Job pointer + * + * This function reads the controller trigger config page as + * defined by the input page type and refreshes the driver's + * local trigger information structures with the controller's + * config page data. + * + * Return: 0 on success and proper error codes on failure + */ +static long +mpi3mr_bsg_refresh_hdb_triggers(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + struct mpi3mr_bsg_out_refresh_hdb_triggers refresh_triggers; + uint32_t data_out_sz; + u8 page_action; + long rval = -EINVAL; + + data_out_sz = job->request_payload.payload_len; + + if (data_out_sz != sizeof(refresh_triggers)) { + dprint_bsg_err(mrioc, "%s: invalid size argument\n", + __func__); + return rval; + } + + if (mrioc->unrecoverable) { + dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", + __func__); + return -EFAULT; + } + if (mrioc->reset_in_progress) { + dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); + return -EAGAIN; + } + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &refresh_triggers, sizeof(refresh_triggers)); + + switch (refresh_triggers.page_type) { + case MPI3MR_HDB_REFRESH_TYPE_CURRENT: + page_action = MPI3_CONFIG_ACTION_READ_CURRENT; + break; + case MPI3MR_HDB_REFRESH_TYPE_DEFAULT: + page_action = MPI3_CONFIG_ACTION_READ_DEFAULT; + break; + case MPI3MR_HDB_HDB_REFRESH_TYPE_PERSISTENT: + page_action = MPI3_CONFIG_ACTION_READ_PERSISTENT; + break; + default: + dprint_bsg_err(mrioc, + "%s: unsupported refresh trigger, page_type %d\n", + __func__, refresh_triggers.page_type); + return rval; + } + rval = mpi3mr_refresh_trigger(mrioc, page_action); + + return rval; +} + +/** + * mpi3mr_bsg_upload_hdb - Upload a specific HDB to user space + * @mrioc: Adapter instance reference + * @job: BSG Job pointer + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_bsg_upload_hdb(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + struct mpi3mr_bsg_out_upload_hdb upload_hdb; + struct diag_buffer_desc *diag_buffer; + uint32_t data_out_size; + uint32_t data_in_size; + + data_out_size = job->request_payload.payload_len; + data_in_size = job->reply_payload.payload_len; + + if (data_out_size != sizeof(upload_hdb)) { + dprint_bsg_err(mrioc, "%s: invalid size argument\n", + __func__); + return -EINVAL; + } + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &upload_hdb, sizeof(upload_hdb)); + + if ((!upload_hdb.length) || (data_in_size != upload_hdb.length)) { + dprint_bsg_err(mrioc, "%s: invalid length argument\n", + __func__); + return -EINVAL; + } + diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, upload_hdb.buf_type); + if ((!diag_buffer) || (!diag_buffer->addr)) { + dprint_bsg_err(mrioc, "%s: invalid buffer type %d\n", + __func__, upload_hdb.buf_type); + return -EINVAL; + } + + if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_RELEASED) && + (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) { + dprint_bsg_err(mrioc, + "%s: invalid buffer status %d for type %d\n", + __func__, diag_buffer->status, upload_hdb.buf_type); + return -EINVAL; + } + + if ((upload_hdb.start_offset + upload_hdb.length) > diag_buffer->size) { + dprint_bsg_err(mrioc, + "%s: invalid start offset %d, length %d for type %d\n", + __func__, upload_hdb.start_offset, upload_hdb.length, + upload_hdb.buf_type); + return -EINVAL; + } + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + (diag_buffer->addr + upload_hdb.start_offset), + data_in_size); + return 0; +} + +/** + * mpi3mr_bsg_repost_hdb - Re-post HDB + * @mrioc: Adapter instance reference + * @job: BSG job pointer + * + * This function retrieves the HDB descriptor corresponding to a + * given buffer type and if the HDB is in released status then + * posts the HDB with the firmware. + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_bsg_repost_hdb(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + struct mpi3mr_bsg_out_repost_hdb repost_hdb; + struct diag_buffer_desc *diag_buffer; + uint32_t data_out_sz; + + data_out_sz = job->request_payload.payload_len; + + if (data_out_sz != sizeof(repost_hdb)) { + dprint_bsg_err(mrioc, "%s: invalid size argument\n", + __func__); + return -EINVAL; + } + if (mrioc->unrecoverable) { + dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", + __func__); + return -EFAULT; + } + if (mrioc->reset_in_progress) { + dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); + return -EAGAIN; + } + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &repost_hdb, sizeof(repost_hdb)); + + diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, repost_hdb.buf_type); + if ((!diag_buffer) || (!diag_buffer->addr)) { + dprint_bsg_err(mrioc, "%s: invalid buffer type %d\n", + __func__, repost_hdb.buf_type); + return -EINVAL; + } + + if (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_RELEASED) { + dprint_bsg_err(mrioc, + "%s: invalid buffer status %d for type %d\n", + __func__, diag_buffer->status, repost_hdb.buf_type); + return -EINVAL; + } + + if (mpi3mr_issue_diag_buf_post(mrioc, diag_buffer)) { + dprint_bsg_err(mrioc, "%s: post failed for type %d\n", + __func__, repost_hdb.buf_type); + return -EFAULT; + } + mpi3mr_set_trigger_data_in_hdb(diag_buffer, + MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); + + return 0; +} + +/** + * mpi3mr_bsg_query_hdb - Handler for query HDB command + * @mrioc: Adapter instance reference + * @job: BSG job pointer + * + * This function prepares and copies the host diagnostic buffer + * entries to the user buffer. + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_bsg_query_hdb(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + long rval = 0; + struct mpi3mr_bsg_in_hdb_status *hbd_status; + struct mpi3mr_hdb_entry *hbd_status_entry; + u32 length, min_length; + u8 i; + struct diag_buffer_desc *diag_buffer; + uint32_t data_in_sz = 0; + + data_in_sz = job->request_payload.payload_len; + + length = (sizeof(*hbd_status) + ((MPI3MR_MAX_NUM_HDB - 1) * + sizeof(*hbd_status_entry))); + hbd_status = kmalloc(length, GFP_KERNEL); + if (!hbd_status) + return -ENOMEM; + hbd_status_entry = &hbd_status->entry[0]; + + hbd_status->num_hdb_types = MPI3MR_MAX_NUM_HDB; + for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { + diag_buffer = &mrioc->diag_buffers[i]; + hbd_status_entry->buf_type = diag_buffer->type; + hbd_status_entry->status = diag_buffer->status; + hbd_status_entry->trigger_type = diag_buffer->trigger_type; + memcpy(&hbd_status_entry->trigger_data, + &diag_buffer->trigger_data, + sizeof(hbd_status_entry->trigger_data)); + hbd_status_entry->size = (diag_buffer->size / 1024); + hbd_status_entry++; + } + hbd_status->element_trigger_format = + MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA; + + if (data_in_sz < 4) { + dprint_bsg_err(mrioc, "%s: invalid size passed\n", __func__); + rval = -EINVAL; + goto out; + } + min_length = min(data_in_sz, length); + if (job->request_payload.payload_len >= min_length) { + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + hbd_status, min_length); + rval = 0; + } +out: + kfree(hbd_status); + return rval; +} + + +/** * mpi3mr_enable_logdata - Handler for log data enable * @mrioc: Adapter instance reference * @job: BSG job reference @@ -424,6 +1583,9 @@ static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc, goto out; } + if (mrioc->unrecoverable || mrioc->block_on_pci_err) + return -EINVAL; + sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, &adpreset, sizeof(adpreset)); @@ -553,6 +1715,18 @@ static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job) case MPI3MR_DRVBSG_OPCODE_PELENABLE: rval = mpi3mr_bsg_pel_enable(mrioc, job); break; + case MPI3MR_DRVBSG_OPCODE_QUERY_HDB: + rval = mpi3mr_bsg_query_hdb(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_REPOST_HDB: + rval = mpi3mr_bsg_repost_hdb(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_UPLOAD_HDB: + rval = mpi3mr_bsg_upload_hdb(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_REFRESH_HDB_TRIGGERS: + rval = mpi3mr_bsg_refresh_hdb_triggers(mrioc, job); + break; case MPI3MR_DRVBSG_OPCODE_UNKNOWN: default: pr_err("%s: unsupported driver command opcode %d\n", @@ -1241,7 +2415,17 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) if (!mrioc) return -ENODEV; + if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) + return -ERESTARTSYS; + + if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) { + dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); + mutex_unlock(&mrioc->bsg_cmds.mutex); + return -EAGAIN; + } + if (!mrioc->ioctl_sges_allocated) { + mutex_unlock(&mrioc->bsg_cmds.mutex); dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n", __func__); return -ENOMEM; @@ -1251,13 +2435,16 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT; mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL); - if (!mpi_req) + if (!mpi_req) { + mutex_unlock(&mrioc->bsg_cmds.mutex); return -ENOMEM; + } mpi_header = (struct mpi3_request_header *)mpi_req; bufcnt = karg->buf_entry_list.num_of_entries; drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL); if (!drv_bufs) { + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -ENOMEM; goto out; } @@ -1265,6 +2452,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) dout_buf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); if (!dout_buf) { + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -ENOMEM; goto out; } @@ -1272,6 +2460,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) din_buf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL); if (!din_buf) { + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -ENOMEM; goto out; } @@ -1347,6 +2536,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) { dprint_bsg_err(mrioc, "%s: invalid MPI message size\n", __func__); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -1359,6 +2549,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) if (invalid_be) { dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n", __func__); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -1366,12 +2557,14 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) { dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n", __func__); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) { dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n", __func__); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -1384,6 +2577,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n", __func__, __LINE__, mpi_header->function, din_size, dout_size); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -1392,6 +2586,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n", __func__, __LINE__, mpi_header->function, din_size); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -1399,6 +2594,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n", __func__, __LINE__, mpi_header->function, dout_size); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -1409,6 +2605,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) dprint_bsg_err(mrioc, "%s:%d: invalid message size passed:%d:%d:%d:%d\n", __func__, __LINE__, din_cnt, dout_cnt, din_size, dout_size); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -1456,6 +2653,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) continue; if (mpi3mr_map_data_buffer_dma(mrioc, drv_buf_iter, desc_count)) { rval = -ENOMEM; + mutex_unlock(&mrioc->bsg_cmds.mutex); dprint_bsg_err(mrioc, "%s:%d: mapping data buffers failed\n", __func__, __LINE__); goto out; @@ -1468,20 +2666,11 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) sense_buff_k = kzalloc(erbsz, GFP_KERNEL); if (!sense_buff_k) { rval = -ENOMEM; + mutex_unlock(&mrioc->bsg_cmds.mutex); goto out; } } - if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) { - rval = -ERESTARTSYS; - goto out; - } - if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) { - rval = -EAGAIN; - dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); - mutex_unlock(&mrioc->bsg_cmds.mutex); - goto out; - } if (mrioc->unrecoverable) { dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", __func__); @@ -1495,7 +2684,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) mutex_unlock(&mrioc->bsg_cmds.mutex); goto out; } - if (mrioc->stop_bsgs) { + if (mrioc->stop_bsgs || mrioc->block_on_pci_err) { dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); rval = -EAGAIN; mutex_unlock(&mrioc->bsg_cmds.mutex); @@ -1598,26 +2787,33 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) rval = -EAGAIN; if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET) goto out_unlock; - dprint_bsg_err(mrioc, - "%s: bsg request timedout after %d seconds\n", __func__, - karg->timeout); - if (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR) { - dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, + if (((mpi_header->function != MPI3_FUNCTION_SCSI_IO) && + (mpi_header->function != MPI3_FUNCTION_NVME_ENCAPSULATED)) + || (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR)) { + ioc_info(mrioc, "%s: bsg request timedout after %d seconds\n", + __func__, karg->timeout); + if (!(mrioc->logging_level & MPI3_DEBUG_BSG_INFO)) { + dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, "bsg_mpi3_req"); if (mpi_header->function == - MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { + MPI3_FUNCTION_MGMT_PASSTHROUGH) { drv_buf_iter = &drv_bufs[0]; dprint_dump(drv_buf_iter->kern_buf, rmc_size, "mpi3_mgmt_req"); + } } } if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) || - (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO)) + (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO)) { + dprint_bsg_err(mrioc, "%s: bsg request timedout after %d seconds,\n" + "issuing target reset to (0x%04x)\n", __func__, + karg->timeout, mpi_header->function_dependent); mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, mpi_header->function_dependent, 0, MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL); + } if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) && !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)) mpi3mr_soft_reset_handler(mrioc, @@ -1838,6 +3034,11 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc) { struct device *bsg_dev = &mrioc->bsg_dev; struct device *parent = &mrioc->shost->shost_gendev; + struct queue_limits lim = { + .max_hw_sectors = MPI3MR_MAX_APP_XFER_SECTORS, + .max_segments = MPI3MR_MAX_APP_XFER_SEGMENTS, + }; + struct request_queue *q; device_initialize(bsg_dev); @@ -1853,9 +3054,9 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc) return; } - mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), + q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim, mpi3mr_bsg_request, NULL, 0); - if (IS_ERR(mrioc->bsg_queue)) { + if (IS_ERR(q)) { ioc_err(mrioc, "%s: bsg registration failed\n", dev_name(bsg_dev)); device_del(bsg_dev); @@ -1863,10 +3064,7 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc) return; } - blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS); - blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS); - - return; + mrioc->bsg_queue = q; } /** @@ -1950,6 +3148,29 @@ reply_queue_count_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(reply_queue_count); /** + * reply_qfull_count_show - Show reply qfull count + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Retrieves the current value of the reply_qfull_count from the mrioc structure and + * formats it as a string for display. + * + * Return: sysfs_emit() return + */ +static ssize_t +reply_qfull_count_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + + return sysfs_emit(buf, "%u\n", atomic_read(&mrioc->reply_qfull_count)); +} + +static DEVICE_ATTR_RO(reply_qfull_count); + +/** * logging_level_show - Show controller debug level * @dev: class device * @attr: Device attributes @@ -2023,7 +3244,8 @@ adp_state_show(struct device *dev, struct device_attribute *attr, ioc_state = mpi3mr_get_iocstate(mrioc); if (ioc_state == MRIOC_STATE_UNRECOVERABLE) adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; - else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs)) + else if (mrioc->reset_in_progress || mrioc->stop_bsgs || + mrioc->block_on_pci_err) adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; else if (ioc_state == MRIOC_STATE_FAULT) adp_state = MPI3MR_BSG_ADPSTATE_FAULT; @@ -2040,6 +3262,7 @@ static struct attribute *mpi3mr_host_attrs[] = { &dev_attr_fw_queue_depth.attr, &dev_attr_op_req_q_count.attr, &dev_attr_reply_queue_count.attr, + &dev_attr_reply_qfull_count.attr, &dev_attr_logging_level.attr, &dev_attr_adp_state.attr, NULL, @@ -2158,10 +3381,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(persistent_id); +/** + * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority + * @dev: pointer to embedded device + * @attr: sas_ncq_prio_supported attribute descriptor + * @buf: the buffer returned + * + * A sysfs 'read-only' sdev attribute, only works with SATA devices + */ +static ssize_t +sas_ncq_prio_supported_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev)); +} +static DEVICE_ATTR_RO(sas_ncq_prio_supported); + +/** + * sas_ncq_prio_enable_show - send prioritized io commands to device + * @dev: pointer to embedded device + * @attr: sas_ncq_prio_enable attribute descriptor + * @buf: the buffer returned + * + * A sysfs 'read/write' sdev attribute, only works with SATA devices + */ +static ssize_t +sas_ncq_prio_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; + + if (!sdev_priv_data) + return 0; + + return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable); +} + +static ssize_t +sas_ncq_prio_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; + bool ncq_prio_enable = 0; + + if (kstrtobool(buf, &ncq_prio_enable)) + return -EINVAL; + + if (!sas_ata_ncq_prio_supported(sdev)) + return -EINVAL; + + sdev_priv_data->ncq_prio_enable = ncq_prio_enable; + + return strlen(buf); +} +static DEVICE_ATTR_RW(sas_ncq_prio_enable); + static struct attribute *mpi3mr_dev_attrs[] = { &dev_attr_sas_address.attr, &dev_attr_device_handle.attr, &dev_attr_persistent_id.attr, + &dev_attr_sas_ncq_prio_supported.attr, + &dev_attr_sas_ncq_prio_enable.attr, NULL, }; diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 528f19f782f2..1d7901a8f0e4 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -11,13 +11,13 @@ #include <linux/io-64-nonatomic-lo-hi.h> static int -mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason); +mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason); static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc); static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, struct mpi3_ioc_facts_data *facts_data); static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, struct mpi3mr_drv_cmd *drv_cmd); - +static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc); static int poll_queues; module_param(poll_queues, int, 0444); MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); @@ -174,6 +174,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, char *desc = NULL; u16 event; + if (!(mrioc->logging_level & MPI3_DEBUG_EVENT)) + return; + event = event_reply->event; switch (event) { @@ -274,6 +277,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, case MPI3_EVENT_PREPARE_FOR_RESET: desc = "Prepare For Reset"; break; + case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE: + desc = "Diagnostic Buffer Status Change"; + break; } if (!desc) @@ -342,13 +348,15 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, { u16 reply_desc_type, host_tag = 0; u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; - u32 ioc_loginfo = 0; + u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS; + u32 ioc_loginfo = 0, sense_count = 0; struct mpi3_status_reply_descriptor *status_desc; struct mpi3_address_reply_descriptor *addr_desc; struct mpi3_success_reply_descriptor *success_desc; struct mpi3_default_reply *def_reply = NULL; struct mpi3mr_drv_cmd *cmdptr = NULL; struct mpi3_scsi_io_reply *scsi_reply; + struct scsi_sense_hdr sshdr; u8 *sense_buf = NULL; *reply_dma = 0; @@ -362,7 +370,8 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, if (ioc_status & MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); - ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; + masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK; + mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo); break; case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; @@ -375,12 +384,20 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, if (ioc_status & MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); - ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; + masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK; if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, le64_to_cpu(scsi_reply->sense_data_buffer_address)); + sense_count = le32_to_cpu(scsi_reply->sense_count); + if (sense_buf) { + scsi_normalize_sense(sense_buf, sense_count, + &sshdr); + mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key, + sshdr.asc, sshdr.ascq); + } } + mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo); break; case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; @@ -395,7 +412,10 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, if (cmdptr->state & MPI3MR_CMD_PENDING) { cmdptr->state |= MPI3MR_CMD_COMPLETE; cmdptr->ioc_loginfo = ioc_loginfo; - cmdptr->ioc_status = ioc_status; + if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS) + cmdptr->ioc_status = ioc_status; + else + cmdptr->ioc_status = masked_ioc_status; cmdptr->state &= ~MPI3MR_CMD_PENDING; if (def_reply) { cmdptr->state |= MPI3MR_CMD_REPLY_VALID; @@ -426,11 +446,15 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) u32 admin_reply_ci = mrioc->admin_reply_ci; u32 num_admin_replies = 0; u64 reply_dma = 0; + u16 threshold_comps = 0; struct mpi3_default_reply_descriptor *reply_desc; - if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) + if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) { + atomic_inc(&mrioc->admin_pend_isr); return 0; + } + atomic_set(&mrioc->admin_pend_isr, 0); reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + admin_reply_ci; @@ -441,7 +465,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) } do { - if (mrioc->unrecoverable) + if (mrioc->unrecoverable || mrioc->io_admin_reset_sync) break; mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); @@ -449,6 +473,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) if (reply_dma) mpi3mr_repost_reply_buf(mrioc, reply_dma); num_admin_replies++; + threshold_comps++; if (++admin_reply_ci == mrioc->num_admin_replies) { admin_reply_ci = 0; exp_phase ^= 1; @@ -459,6 +484,11 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) if ((le16_to_cpu(reply_desc->reply_flags) & MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) break; + if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) { + writel(admin_reply_ci, + &mrioc->sysif_regs->admin_reply_queue_ci); + threshold_comps = 0; + } } while (1); writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); @@ -512,7 +542,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, u32 num_op_reply = 0; u64 reply_dma = 0; struct mpi3_default_reply_descriptor *reply_desc; - u16 req_q_idx = 0, reply_qidx; + u16 req_q_idx = 0, reply_qidx, threshold_comps = 0; reply_qidx = op_reply_q->qid - 1; @@ -530,7 +560,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, } do { - if (mrioc->unrecoverable) + if (mrioc->unrecoverable || mrioc->io_admin_reset_sync) break; req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; @@ -539,10 +569,11 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, reply_qidx); - atomic_dec(&op_reply_q->pend_ios); + if (reply_dma) mpi3mr_repost_reply_buf(mrioc, reply_dma); num_op_reply++; + threshold_comps++; if (++reply_ci == op_reply_q->num_replies) { reply_ci = 0; @@ -564,13 +595,19 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, break; } #endif + if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) { + writel(reply_ci, + &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); + atomic_sub(threshold_comps, &op_reply_q->pend_ios); + threshold_comps = 0; + } } while (1); writel(reply_ci, &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); op_reply_q->ci = reply_ci; op_reply_q->ephase = exp_phase; - + atomic_sub(threshold_comps, &op_reply_q->pend_ios); atomic_dec(&op_reply_q->in_use); return num_op_reply; } @@ -595,7 +632,7 @@ int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) mrioc = (struct mpi3mr_ioc *)shost->hostdata; if ((mrioc->reset_in_progress || mrioc->prepare_for_reset || - mrioc->unrecoverable)) + mrioc->unrecoverable || mrioc->pci_err_recovery)) return 0; num_entries = mpi3mr_process_op_reply_q(mrioc, @@ -697,7 +734,7 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) mpi3mr_process_op_reply_q(mrioc, intr_info->op_reply_q); - usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP); + usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1); } while (atomic_read(&intr_info->op_reply_q->pend_ios) && (num_op_reply < mrioc->max_host_ios)); @@ -938,6 +975,14 @@ static const struct { }, { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, + { + MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT, + "diagnostic buffer post timeout" + }, + { + MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT, + "diagnostic buffer release timeout" + }, { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" }, { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"}, { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" }, @@ -997,6 +1042,36 @@ static const char *mpi3mr_reset_type_name(u16 reset_type) } /** + * mpi3mr_is_fault_recoverable - Read fault code and decide + * whether the controller can be recoverable + * @mrioc: Adapter instance reference + * Return: true if fault is recoverable, false otherwise. + */ +static inline bool mpi3mr_is_fault_recoverable(struct mpi3mr_ioc *mrioc) +{ + u32 fault; + + fault = (readl(&mrioc->sysif_regs->fault) & + MPI3_SYSIF_FAULT_CODE_MASK); + + switch (fault) { + case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED: + case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: + ioc_warn(mrioc, + "controller requires system power cycle, marking controller as unrecoverable\n"); + return false; + case MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER: + ioc_warn(mrioc, + "controller faulted due to insufficient power,\n" + " try by connecting it to a different slot\n"); + return false; + default: + break; + } + return true; +} + +/** * mpi3mr_print_fault_info - Display fault information * @mrioc: Adapter instance reference * @@ -1195,7 +1270,7 @@ static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, u32 reset_reason) { - u32 ioc_config, timeout, ioc_status; + u32 ioc_config, timeout, ioc_status, scratch_pad0; int retval = -1; ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); @@ -1204,7 +1279,11 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, return retval; } mpi3mr_clear_reset_history(mrioc); - writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); + scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX << + MPI3MR_RESET_REASON_OSTYPE_SHIFT) | + (mrioc->facts.ioc_num << + MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason); + writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]); ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); @@ -1229,7 +1308,7 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) retval = 0; - ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", + ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n", (!retval) ? "successful" : "failed", ioc_status, ioc_config); return retval; } @@ -1276,12 +1355,25 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length); if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities & - MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) + MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) ioc_err(mrioc, "critical error: multipath capability is enabled at the\n" "\tcontroller while sas transport support is enabled at the\n" "\tdriver, please reboot the system or reload the driver\n"); + if (mrioc->seg_tb_support) { + if (!(mrioc->facts.ioc_capabilities & + MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) { + ioc_err(mrioc, + "critical error: previously enabled segmented trace\n" + " buffer capability is disabled after reset. Please\n" + " update the firmware or reboot the system or\n" + " reload the driver to enable trace diag buffer\n"); + mrioc->diag_buffers[0].disabled_after_reset = true; + } else + mrioc->diag_buffers[0].disabled_after_reset = false; + } + if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) { removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle, GFP_KERNEL); @@ -1319,6 +1411,10 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) int retval = 0; enum mpi3mr_iocstate ioc_state; u64 base_info; + u8 retry = 0; + u64 start_time, elapsed_time_sec; + +retry_bring_ioc_ready: ioc_status = readl(&mrioc->sysif_regs->ioc_status); ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); @@ -1326,6 +1422,11 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n", ioc_status, ioc_config, base_info); + if (!mpi3mr_is_fault_recoverable(mrioc)) { + mrioc->unrecoverable = 1; + goto out_device_not_present; + } + /*The timeout value is in 2sec unit, changing it to seconds*/ mrioc->ready_timeout = ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> @@ -1337,26 +1438,23 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) ioc_info(mrioc, "controller is in %s state during detection\n", mpi3mr_iocstate_name(ioc_state)); - if (ioc_state == MRIOC_STATE_BECOMING_READY || - ioc_state == MRIOC_STATE_RESET_REQUESTED) { - timeout = mrioc->ready_timeout * 10; - do { - msleep(100); - } while (--timeout); + timeout = mrioc->ready_timeout * 10; + + do { + ioc_state = mpi3mr_get_iocstate(mrioc); + + if (ioc_state != MRIOC_STATE_BECOMING_READY && + ioc_state != MRIOC_STATE_RESET_REQUESTED) + break; if (!pci_device_is_present(mrioc->pdev)) { mrioc->unrecoverable = 1; - ioc_err(mrioc, - "controller is not present while waiting to reset\n"); - retval = -1; + ioc_err(mrioc, "controller is not present while waiting to reset\n"); goto out_device_not_present; } - ioc_state = mpi3mr_get_iocstate(mrioc); - ioc_info(mrioc, - "controller is in %s state after waiting to reset\n", - mpi3mr_iocstate_name(ioc_state)); - } + msleep(100); + } while (--timeout); if (ioc_state == MRIOC_STATE_READY) { ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); @@ -1417,6 +1515,9 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); + if (retry == 0) + start_time = jiffies; + timeout = mrioc->ready_timeout * 10; do { ioc_state = mpi3mr_get_iocstate(mrioc); @@ -1426,6 +1527,12 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) mpi3mr_iocstate_name(ioc_state)); return 0; } + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || + (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { + mpi3mr_print_fault_info(mrioc); + goto out_failed; + } if (!pci_device_is_present(mrioc->pdev)) { mrioc->unrecoverable = 1; ioc_err(mrioc, @@ -1434,9 +1541,19 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) goto out_device_not_present; } msleep(100); - } while (--timeout); + elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000; + } while (elapsed_time_sec < mrioc->ready_timeout); out_failed: + elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000; + if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) { + retry++; + + ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n" + " elapsed time =%llu\n", retry, elapsed_time_sec); + + goto retry_bring_ioc_ready; + } ioc_state = mpi3mr_get_iocstate(mrioc); ioc_err(mrioc, "failed to bring to ready state, current state: %s\n", @@ -1520,11 +1637,11 @@ static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) * Return: 0 on success, non-zero on failure. */ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, - u32 reset_reason) + u16 reset_reason) { int retval = -1; u8 unlock_retry_count = 0; - u32 host_diagnostic, ioc_status, ioc_config; + u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0; u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) && @@ -1576,6 +1693,9 @@ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, unlock_retry_count, host_diagnostic); } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE)); + scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX << + MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num << + MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason); writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); writel(host_diagnostic | reset_type, &mrioc->sysif_regs->host_diagnostic); @@ -1616,7 +1736,7 @@ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); ioc_status = readl(&mrioc->sysif_regs->ioc_status); ioc_info(mrioc, - "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n", + "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n", (!retval)?"successful":"failed", ioc_status, ioc_config); if (retval) @@ -1665,6 +1785,12 @@ int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, retval = -EAGAIN; goto out; } + if (mrioc->pci_err_recovery) { + ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n"); + retval = -EAGAIN; + goto out; + } + areq_entry = (u8 *)mrioc->admin_req_base + (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); @@ -1997,15 +2123,22 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) } reply_qid = qidx + 1; - op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; - if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && - !mrioc->pdev->revision) - op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K; + + if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) { + if (mrioc->pdev->revision) + op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; + else + op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K; + } else + op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K; + op_reply_q->ci = 0; op_reply_q->ephase = 1; atomic_set(&op_reply_q->pend_ios, 0); atomic_set(&op_reply_q->in_use, 0); op_reply_q->enable_irq_poll = false; + op_reply_q->qfull_watermark = + op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2); if (!op_reply_q->q_segments) { retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); @@ -2309,8 +2442,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, void *segment_base_addr; u16 req_sz = mrioc->facts.op_req_sz; struct segments *segments = op_req_q->q_segments; + struct op_reply_qinfo *op_reply_q = NULL; reply_qidx = op_req_q->reply_qid - 1; + op_reply_q = mrioc->op_reply_qinfo + reply_qidx; if (mrioc->unrecoverable) return -EFAULT; @@ -2335,6 +2470,20 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, retval = -EAGAIN; goto out; } + if (mrioc->pci_err_recovery) { + ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n"); + retval = -EAGAIN; + goto out; + } + + /* Reply queue is nearing to get full, push back IOs to SML */ + if ((mrioc->prevent_reply_qfull == true) && + (atomic_read(&op_reply_q->pend_ios) > + (op_reply_q->qfull_watermark))) { + atomic_inc(&mrioc->reply_qfull_count); + retval = -EAGAIN; + goto out; + } segment_base_addr = segments[pi / op_req_q->segment_qd].segment; req_entry = (u8 *)segment_base_addr + @@ -2380,6 +2529,7 @@ out: void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) { u32 ioc_status, host_diagnostic, timeout; + union mpi3mr_trigger_data trigger_data; if (mrioc->unrecoverable) { ioc_err(mrioc, "controller is unrecoverable\n"); @@ -2391,16 +2541,30 @@ void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) ioc_err(mrioc, "controller is not present\n"); return; } - + memset(&trigger_data, 0, sizeof(trigger_data)); ioc_status = readl(&mrioc->sysif_regs->ioc_status); - if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || - (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { + + if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { + mpi3mr_set_trigger_data_in_all_hdb(mrioc, + MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0); + return; + } else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { + trigger_data.fault = (readl(&mrioc->sysif_regs->fault) & + MPI3_SYSIF_FAULT_CODE_MASK); + + mpi3mr_set_trigger_data_in_all_hdb(mrioc, + MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0); mpi3mr_print_fault_info(mrioc); return; } + mpi3mr_set_diagsave(mrioc); mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reason_code); + trigger_data.fault = (readl(&mrioc->sysif_regs->fault) & + MPI3_SYSIF_FAULT_CODE_MASK); + mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT, + &trigger_data, 0); timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; do { host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); @@ -2580,10 +2744,11 @@ static void mpi3mr_watchdog_work(struct work_struct *work) container_of(work, struct mpi3mr_ioc, watchdog_work.work); unsigned long flags; enum mpi3mr_iocstate ioc_state; - u32 fault, host_diagnostic, ioc_status; - u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH; + u32 host_diagnostic, ioc_status; + union mpi3mr_trigger_data trigger_data; + u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH; - if (mrioc->reset_in_progress) + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) return; if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) { @@ -2598,7 +2763,16 @@ static void mpi3mr_watchdog_work(struct work_struct *work) return; } - if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { + if (atomic_read(&mrioc->admin_pend_isr)) { + ioc_err(mrioc, "Unprocessed admin ISR instance found\n" + "flush admin replies\n"); + mpi3mr_process_admin_reply_q(mrioc); + } + + if (!(mrioc->facts.ioc_capabilities & + MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) && + (mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) { + mrioc->ts_update_counter = 0; mpi3mr_sync_timestamp(mrioc); } @@ -2611,8 +2785,11 @@ static void mpi3mr_watchdog_work(struct work_struct *work) return; } + memset(&trigger_data, 0, sizeof(trigger_data)); ioc_status = readl(&mrioc->sysif_regs->ioc_status); if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { + mpi3mr_set_trigger_data_in_all_hdb(mrioc, + MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0); mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0); return; } @@ -2622,7 +2799,9 @@ static void mpi3mr_watchdog_work(struct work_struct *work) if (ioc_state != MRIOC_STATE_FAULT) goto schedule_work; - fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; + trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; + mpi3mr_set_trigger_data_in_all_hdb(mrioc, + MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0); host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { if (!mrioc->diagsave_timeout) { @@ -2636,7 +2815,12 @@ static void mpi3mr_watchdog_work(struct work_struct *work) mpi3mr_print_fault_info(mrioc); mrioc->diagsave_timeout = 0; - switch (fault) { + if (!mpi3mr_is_fault_recoverable(mrioc)) { + mrioc->unrecoverable = 1; + goto schedule_work; + } + + switch (trigger_data.fault) { case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED: case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: ioc_warn(mrioc, @@ -2682,8 +2866,8 @@ void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) snprintf(mrioc->watchdog_work_q_name, sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, mrioc->id); - mrioc->watchdog_work_q = - create_singlethread_workqueue(mrioc->watchdog_work_q_name); + mrioc->watchdog_work_q = alloc_ordered_workqueue( + "%s", WQ_MEM_RECLAIM, mrioc->watchdog_work_q_name); if (!mrioc->watchdog_work_q) { ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); return; @@ -2745,6 +2929,7 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) mrioc->admin_reply_ci = 0; mrioc->admin_reply_ephase = 1; atomic_set(&mrioc->admin_reply_q_in_use, 0); + atomic_set(&mrioc->admin_pend_isr, 0); if (!mrioc->admin_req_base) { mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, @@ -2953,6 +3138,9 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, mrioc->facts.dma_mask = (facts_flags & MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; + mrioc->facts.dma_mask = (facts_flags & + MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> + MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; mrioc->facts.protocol_flags = facts_data->protocol_flags; mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests); @@ -2996,7 +3184,11 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; mrioc->facts.shutdown_timeout = le16_to_cpu(facts_data->shutdown_timeout); - + mrioc->facts.diag_trace_sz = + le32_to_cpu(facts_data->diag_trace_size); + mrioc->facts.diag_fw_sz = + le32_to_cpu(facts_data->diag_fw_size); + mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size); mrioc->facts.max_dev_per_tg = facts_data->max_devices_per_throttle_group; mrioc->facts.io_throttle_data_length = @@ -3302,6 +3494,8 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) iocinit_req.msg_flags |= MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED; + iocinit_req.msg_flags |= + MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED; init_completion(&mrioc->init_cmds.done); retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, @@ -3668,15 +3862,126 @@ static const struct { u32 capability; char *name; } mpi3mr_capabilities[] = { - { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" }, - { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED, "MultiPath" }, + { MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" }, + { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" }, }; /** + * mpi3mr_repost_diag_bufs - repost host diag buffers + * @mrioc: Adapter instance reference + * + * repost firmware and trace diag buffers based on global + * trigger flag from driver page 2 + * + * Return: 0 on success, non-zero on failures. + */ +static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc) +{ + u64 global_trigger; + union mpi3mr_trigger_data prev_trigger_data; + struct diag_buffer_desc *trace_hdb = NULL; + struct diag_buffer_desc *fw_hdb = NULL; + int retval = 0; + bool trace_repost_needed = false; + bool fw_repost_needed = false; + u8 prev_trigger_type; + + retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT); + if (retval) + return -1; + + trace_hdb = mpi3mr_diag_buffer_for_type(mrioc, + MPI3_DIAG_BUFFER_TYPE_TRACE); + + if (trace_hdb && + trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED && + trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL && + trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) + trace_repost_needed = true; + + fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW); + + if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED && + fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL && + fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) + fw_repost_needed = true; + + if (trace_repost_needed || fw_repost_needed) { + global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger); + if (global_trigger & + MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED) + trace_repost_needed = false; + if (global_trigger & + MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED) + fw_repost_needed = false; + } + + if (trace_repost_needed) { + prev_trigger_type = trace_hdb->trigger_type; + memcpy(&prev_trigger_data, &trace_hdb->trigger_data, + sizeof(trace_hdb->trigger_data)); + retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb); + if (!retval) { + dprint_init(mrioc, "trace diag buffer reposted"); + mpi3mr_set_trigger_data_in_hdb(trace_hdb, + MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); + } else { + trace_hdb->trigger_type = prev_trigger_type; + memcpy(&trace_hdb->trigger_data, &prev_trigger_data, + sizeof(prev_trigger_data)); + ioc_err(mrioc, "trace diag buffer repost failed"); + return -1; + } + } + + if (fw_repost_needed) { + prev_trigger_type = fw_hdb->trigger_type; + memcpy(&prev_trigger_data, &fw_hdb->trigger_data, + sizeof(fw_hdb->trigger_data)); + retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb); + if (!retval) { + dprint_init(mrioc, "firmware diag buffer reposted"); + mpi3mr_set_trigger_data_in_hdb(fw_hdb, + MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); + } else { + fw_hdb->trigger_type = prev_trigger_type; + memcpy(&fw_hdb->trigger_data, &prev_trigger_data, + sizeof(prev_trigger_data)); + ioc_err(mrioc, "firmware diag buffer repost failed"); + return -1; + } + } + return retval; +} + +/** + * mpi3mr_read_tsu_interval - Update time stamp interval + * @mrioc: Adapter instance reference + * + * Update time stamp interval if its defined in driver page 1, + * otherwise use default value. + * + * Return: Nothing + */ +static void +mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc) +{ + struct mpi3_driver_page1 driver_pg1; + u16 pg_sz = sizeof(driver_pg1); + int retval = 0; + + mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL; + + retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz); + if (!retval && driver_pg1.time_stamp_update) + mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60); +} + +/** * mpi3mr_print_ioc_info - Display controller information * @mrioc: Adapter instance reference * - * Display controller personalit, capability, supported + * Display controller personality, capability, supported * protocols etc. * * Return: Nothing @@ -3685,20 +3990,20 @@ static void mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc) { int i = 0, bytes_written = 0; - char personality[16]; + const char *personality; char protocol[50] = {0}; char capabilities[100] = {0}; struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; switch (mrioc->facts.personality) { case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA: - strncpy(personality, "Enhanced HBA", sizeof(personality)); + personality = "Enhanced HBA"; break; case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR: - strncpy(personality, "RAID", sizeof(personality)); + personality = "RAID"; break; default: - strncpy(personality, "Unknown", sizeof(personality)); + personality = "Unknown"; break; } @@ -3889,6 +4194,7 @@ static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc) mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET); mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE); retval = mpi3mr_issue_event_notification(mrioc); if (retval) @@ -3951,13 +4257,20 @@ retry_init: MPI3MR_HOST_IOS_KDUMP); if (!(mrioc->facts.ioc_capabilities & - MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) { + MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) { mrioc->sas_transport_enabled = 1; mrioc->scsi_device_channel = 1; mrioc->shost->max_channel = 1; mrioc->shost->transportt = mpi3mr_transport_template; } + if (mrioc->facts.max_req_limit) + mrioc->prevent_reply_qfull = true; + + if (mrioc->facts.ioc_capabilities & + MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED) + mrioc->seg_tb_support = true; + mrioc->reply_sz = mrioc->facts.reply_sz; retval = mpi3mr_check_reset_dma_mask(mrioc); @@ -3967,22 +4280,21 @@ retry_init: goto out_failed_noretry; } + mpi3mr_read_tsu_interval(mrioc); mpi3mr_print_ioc_info(mrioc); - if (!mrioc->cfg_page) { - dprint_init(mrioc, "allocating config page buffers\n"); - mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ; - mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev, - mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL); - if (!mrioc->cfg_page) { - retval = -1; - goto out_failed_noretry; - } - } + dprint_init(mrioc, "allocating host diag buffers\n"); + mpi3mr_alloc_diag_bufs(mrioc); dprint_init(mrioc, "allocating ioctl dma buffers\n"); mpi3mr_alloc_ioctl_dma_memory(mrioc); + dprint_init(mrioc, "posting host diag buffers\n"); + retval = mpi3mr_post_diag_bufs(mrioc); + + if (retval) + ioc_warn(mrioc, "failed to post host diag buffers\n"); + if (!mrioc->init_cmds.reply) { retval = mpi3mr_alloc_reply_sense_bufs(mrioc); if (retval) { @@ -4058,6 +4370,12 @@ retry_init: goto out_failed; } + retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT); + if (retval) { + ioc_err(mrioc, "failed to refresh triggers\n"); + goto out_failed; + } + ioc_info(mrioc, "controller initialization completed successfully\n"); return retval; out_failed: @@ -4109,7 +4427,8 @@ retry_init: goto out_failed_noretry; } - if (is_resume) { + mrioc->io_admin_reset_sync = 0; + if (is_resume || mrioc->block_on_pci_err) { dprint_reset(mrioc, "setting up single ISR\n"); retval = mpi3mr_setup_isr(mrioc, 1); if (retval) { @@ -4133,8 +4452,20 @@ retry_init: goto out_failed_noretry; } + mpi3mr_read_tsu_interval(mrioc); mpi3mr_print_ioc_info(mrioc); + if (is_resume) { + dprint_reset(mrioc, "posting host diag buffers\n"); + retval = mpi3mr_post_diag_bufs(mrioc); + if (retval) + ioc_warn(mrioc, "failed to post host diag buffers\n"); + } else { + retval = mpi3mr_repost_diag_bufs(mrioc); + if (retval) + ioc_warn(mrioc, "failed to re post host diag buffers\n"); + } + dprint_reset(mrioc, "sending ioc_init\n"); retval = mpi3mr_issue_iocinit(mrioc); if (retval) { @@ -4149,7 +4480,7 @@ retry_init: goto out_failed; } - if (is_resume) { + if (is_resume || mrioc->block_on_pci_err) { dprint_reset(mrioc, "setting up multiple ISR\n"); retval = mpi3mr_setup_isr(mrioc, 0); if (retval) { @@ -4327,6 +4658,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) if (mrioc->admin_reply_base) memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); atomic_set(&mrioc->admin_reply_q_in_use, 0); + atomic_set(&mrioc->admin_pend_isr, 0); if (mrioc->init_cmds.reply) { memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); @@ -4398,8 +4730,9 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) */ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) { - u16 i; + u16 i, j; struct mpi3mr_intr_info *intr_info; + struct diag_buffer_desc *diag_buffer; mpi3mr_free_enclosure_list(mrioc); mpi3mr_free_ioctl_dma_memory(mrioc); @@ -4523,17 +4856,46 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) mrioc->admin_req_base, mrioc->admin_req_dma); mrioc->admin_req_base = NULL; } - if (mrioc->cfg_page) { - dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz, - mrioc->cfg_page, mrioc->cfg_page_dma); - mrioc->cfg_page = NULL; - } + if (mrioc->pel_seqnum_virt) { dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz, mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma); mrioc->pel_seqnum_virt = NULL; } + for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { + diag_buffer = &mrioc->diag_buffers[i]; + if ((i == 0) && mrioc->seg_tb_support) { + if (mrioc->trace_buf_pool) { + for (j = 0; j < mrioc->num_tb_segs; j++) { + if (mrioc->trace_buf[j].segment) { + dma_pool_free(mrioc->trace_buf_pool, + mrioc->trace_buf[j].segment, + mrioc->trace_buf[j].segment_dma); + mrioc->trace_buf[j].segment = NULL; + } + + mrioc->trace_buf[j].segment = NULL; + } + dma_pool_destroy(mrioc->trace_buf_pool); + mrioc->trace_buf_pool = NULL; + } + + kfree(mrioc->trace_buf); + mrioc->trace_buf = NULL; + diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs; + } + if (diag_buffer->addr) { + dma_free_coherent(&mrioc->pdev->dev, + diag_buffer->size, diag_buffer->addr, + diag_buffer->dma_addr); + diag_buffer->addr = NULL; + diag_buffer->size = 0; + diag_buffer->type = 0; + diag_buffer->status = 0; + } + } + kfree(mrioc->throttle_groups); mrioc->throttle_groups = NULL; @@ -4600,7 +4962,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) } ioc_info(mrioc, - "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", + "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n", (!retval) ? "successful" : "failed", ioc_status, ioc_config); } @@ -4623,7 +4985,8 @@ void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) ioc_state = mpi3mr_get_iocstate(mrioc); - if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && + if (!mrioc->unrecoverable && !mrioc->reset_in_progress && + !mrioc->pci_err_recovery && (ioc_state == MRIOC_STATE_READY)) { if (mpi3mr_issue_and_process_mur(mrioc, MPI3MR_RESET_FROM_CTLR_CLEANUP)) @@ -4945,6 +5308,55 @@ cleanup_drv_cmd: } /** + * mpi3mr_check_op_admin_proc - + * @mrioc: Adapter instance reference + * + * Check if any of the operation reply queues + * or the admin reply queue are currently in use. + * If any queue is in use, this function waits for + * a maximum of 10 seconds for them to become available. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc) +{ + + u16 timeout = 10 * 10; + u16 elapsed_time = 0; + bool op_admin_in_use = false; + + do { + op_admin_in_use = false; + + /* Check admin_reply queue first to exit early */ + if (atomic_read(&mrioc->admin_reply_q_in_use) == 1) + op_admin_in_use = true; + else { + /* Check op_reply queues */ + int i; + + for (i = 0; i < mrioc->num_queues; i++) { + if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) { + op_admin_in_use = true; + break; + } + } + } + + if (!op_admin_in_use) + break; + + msleep(100); + + } while (++elapsed_time < timeout); + + if (op_admin_in_use) + return 1; + + return 0; +} + +/** * mpi3mr_soft_reset_handler - Reset the controller * @mrioc: Adapter instance reference * @reset_reason: Reset reason code @@ -4966,11 +5378,12 @@ cleanup_drv_cmd: * Return: 0 on success, non-zero on failure. */ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, - u32 reset_reason, u8 snapdump) + u16 reset_reason, u8 snapdump) { int retval = 0, i; unsigned long flags; u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; + union mpi3mr_trigger_data trigger_data; /* Block the reset handler until diag save in progress*/ dprint_reset(mrioc, @@ -5003,10 +5416,16 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, mrioc->reset_in_progress = 1; mrioc->stop_bsgs = 1; mrioc->prev_reset_result = -1; + memset(&trigger_data, 0, sizeof(trigger_data)); if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) && (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { + mpi3mr_set_trigger_data_in_all_hdb(mrioc, + MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0); + dprint_reset(mrioc, + "soft_reset_handler: releasing host diagnostic buffers\n"); + mpi3mr_release_diag_bufs(mrioc, 0); for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) mrioc->event_masks[i] = -1; @@ -5017,12 +5436,15 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); mpi3mr_ioc_disable_intr(mrioc); + mrioc->io_admin_reset_sync = 1; if (snapdump) { mpi3mr_set_diagsave(mrioc); retval = mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); if (!retval) { + trigger_data.fault = (readl(&mrioc->sysif_regs->fault) & + MPI3_SYSIF_FAULT_CODE_MASK); do { host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); @@ -5031,6 +5453,8 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, break; msleep(100); } while (--timeout); + mpi3mr_set_trigger_data_in_all_hdb(mrioc, + MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0); } } @@ -5040,6 +5464,16 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, ioc_err(mrioc, "Failed to issue soft reset to the ioc\n"); goto out; } + + retval = mpi3mr_check_op_admin_proc(mrioc); + if (retval) { + ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n" + "thread still processing replies even after a 10 second\n" + "timeout. Marking the controller as unrecoverable!\n"); + + goto out; + } + if (mrioc->num_io_throttle_group != mrioc->facts.max_io_throttle_group) { ioc_err(mrioc, @@ -5066,6 +5500,15 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, mrioc->prepare_for_reset_timeout_counter = 0; } mpi3mr_memset_buffers(mrioc); + mpi3mr_release_diag_bufs(mrioc, 1); + mrioc->fw_release_trigger_active = false; + mrioc->trace_release_trigger_active = false; + mrioc->snapdump_trigger_active = false; + mpi3mr_set_trigger_data_in_all_hdb(mrioc, + MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0); + + dprint_reset(mrioc, + "soft_reset_handler: reinitializing the controller\n"); retval = mpi3mr_reinit_ioc(mrioc, 0); if (retval) { pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", @@ -5102,6 +5545,7 @@ out: mrioc->device_refresh_on = 0; mrioc->unrecoverable = 1; mrioc->reset_in_progress = 0; + mrioc->stop_bsgs = 0; retval = -1; mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); } @@ -5112,55 +5556,6 @@ out: return retval; } - -/** - * mpi3mr_free_config_dma_memory - free memory for config page - * @mrioc: Adapter instance reference - * @mem_desc: memory descriptor structure - * - * Check whether the size of the buffer specified by the memory - * descriptor is greater than the default page size if so then - * free the memory pointed by the descriptor. - * - * Return: Nothing. - */ -static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc, - struct dma_memory_desc *mem_desc) -{ - if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) { - dma_free_coherent(&mrioc->pdev->dev, mem_desc->size, - mem_desc->addr, mem_desc->dma_addr); - mem_desc->addr = NULL; - } -} - -/** - * mpi3mr_alloc_config_dma_memory - Alloc memory for config page - * @mrioc: Adapter instance reference - * @mem_desc: Memory descriptor to hold dma memory info - * - * This function allocates new dmaable memory or provides the - * default config page dmaable memory based on the memory size - * described by the descriptor. - * - * Return: 0 on success, non-zero on failure. - */ -static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc, - struct dma_memory_desc *mem_desc) -{ - if (mem_desc->size > mrioc->cfg_page_sz) { - mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev, - mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL); - if (!mem_desc->addr) - return -ENOMEM; - } else { - mem_desc->addr = mrioc->cfg_page; - mem_desc->dma_addr = mrioc->cfg_page_dma; - memset(mem_desc->addr, 0, mrioc->cfg_page_sz); - } - return 0; -} - /** * mpi3mr_post_cfg_req - Issue config requests and wait * @mrioc: Adapter instance reference @@ -5316,8 +5711,12 @@ static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc, cfg_req->page_length = cfg_hdr->page_length; cfg_req->page_version = cfg_hdr->page_version; } - if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc)) - goto out; + + mem_desc.addr = dma_alloc_coherent(&mrioc->pdev->dev, + mem_desc.size, &mem_desc.dma_addr, GFP_KERNEL); + + if (!mem_desc.addr) + return retval; mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size, mem_desc.dma_addr); @@ -5346,7 +5745,12 @@ static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc, } out: - mpi3mr_free_config_dma_memory(mrioc, &mem_desc); + if (mem_desc.addr) { + dma_free_coherent(&mrioc->pdev->dev, mem_desc.size, + mem_desc.addr, mem_desc.dma_addr); + mem_desc.addr = NULL; + } + return retval; } @@ -5944,3 +6348,64 @@ int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc, out_failed: return -1; } + +/** + * mpi3mr_cfg_get_driver_pg2 - Read current driver page2 + * @mrioc: Adapter instance reference + * @driver_pg2: Pointer to return driver page 2 + * @pg_sz: Size of the memory allocated to the page pointer + * @page_action: Page action + * + * This is handler for config page read for the driver page2. + * This routine checks ioc_status to decide whether the page + * read is success or not. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc, + struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action) +{ + struct mpi3_config_page_header cfg_hdr; + struct mpi3_config_request cfg_req; + u16 ioc_status = 0; + + memset(driver_pg2, 0, pg_sz); + memset(&cfg_hdr, 0, sizeof(cfg_hdr)); + memset(&cfg_req, 0, sizeof(cfg_req)); + + cfg_req.function = MPI3_FUNCTION_CONFIG; + cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; + cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER; + cfg_req.page_number = 2; + cfg_req.page_address = 0; + cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { + ioc_err(mrioc, "driver page2 header read failed\n"); + goto out_failed; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "driver page2 header read failed with\n" + "ioc_status(0x%04x)\n", + ioc_status); + goto out_failed; + } + cfg_req.action = page_action; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) { + ioc_err(mrioc, "driver page2 read failed\n"); + goto out_failed; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "driver page2 read failed with\n" + "ioc_status(0x%04x)\n", + ioc_status); + goto out_failed; + } + return 0; +out_failed: + return -1; +} + diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index 73c831a97d27..ce444efd859e 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -242,6 +242,40 @@ static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, } /** + * mpi3mr_hdb_trigger_data_event - Add hdb trigger data event to + * the list + * @mrioc: Adapter instance reference + * @event_data: Event data + * + * Add the given hdb trigger data event to the firmware event + * list. + * + * Return: Nothing. + */ +void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc, + struct trigger_event_data *event_data) +{ + struct mpi3mr_fwevt *fwevt; + u16 sz = sizeof(*event_data); + + fwevt = mpi3mr_alloc_fwevt(sz); + if (!fwevt) { + ioc_warn(mrioc, "failed to queue hdb trigger data event\n"); + return; + } + + fwevt->mrioc = mrioc; + fwevt->event_id = MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER; + fwevt->send_ack = 0; + fwevt->process_evt = 1; + fwevt->evt_ctx = 0; + fwevt->event_data_size = sz; + memcpy(fwevt->event_data, event_data, sz); + + mpi3mr_fwevt_add_to_list(mrioc, fwevt); +} + +/** * mpi3mr_fwevt_del_from_list - Delete firmware event from list * @mrioc: Adapter instance reference * @fwevt: Firmware event reference @@ -898,6 +932,8 @@ void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, } } else mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); + mpi3mr_global_trigger(mrioc, + MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED); ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); @@ -920,7 +956,7 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, int retval = 0; struct mpi3mr_tgt_dev *tgtdev; - if (mrioc->reset_in_progress) + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) return -1; tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); @@ -949,6 +985,10 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, goto out; } } + dprint_event_bh(mrioc, + "exposed target device with handle(0x%04x), perst_id(%d)\n", + tgtdev->dev_handle, perst_id); + goto out; } else mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); out: @@ -986,6 +1026,25 @@ static int mpi3mr_change_queue_depth(struct scsi_device *sdev, return retval; } +static void mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev *tgt_dev, + struct queue_limits *lim) +{ + u8 pgsz = tgt_dev->dev_spec.pcie_inf.pgsz ? : MPI3MR_DEFAULT_PGSZEXP; + + lim->max_hw_sectors = tgt_dev->dev_spec.pcie_inf.mdts / 512; + lim->virt_boundary_mask = (1 << pgsz) - 1; +} + +static void mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev *tgt_dev, + struct queue_limits *lim) +{ + if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE && + (tgt_dev->dev_spec.pcie_inf.dev_info & + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) + mpi3mr_configure_nvme_dev(tgt_dev, lim); +} + /** * mpi3mr_update_sdev - Update SCSI device information * @sdev: SCSI device reference @@ -1001,35 +1060,21 @@ static void mpi3mr_update_sdev(struct scsi_device *sdev, void *data) { struct mpi3mr_tgt_dev *tgtdev; + struct queue_limits lim; tgtdev = (struct mpi3mr_tgt_dev *)data; if (!tgtdev) return; mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); - switch (tgtdev->dev_type) { - case MPI3_DEVICE_DEVFORM_PCIE: - /*The block layer hw sector size = 512*/ - if ((tgtdev->dev_spec.pcie_inf.dev_info & - MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == - MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { - blk_queue_max_hw_sectors(sdev->request_queue, - tgtdev->dev_spec.pcie_inf.mdts / 512); - if (tgtdev->dev_spec.pcie_inf.pgsz == 0) - blk_queue_virt_boundary(sdev->request_queue, - ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); - else - blk_queue_virt_boundary(sdev->request_queue, - ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); - } - break; - default: - break; - } + + lim = queue_limits_start_update(sdev->request_queue); + mpi3mr_configure_tgt_dev(tgtdev, &lim); + WARN_ON_ONCE(queue_limits_commit_update(sdev->request_queue, &lim)); } /** - * mpi3mr_rfresh_tgtdevs - Refresh target device exposure + * mpi3mr_refresh_tgtdevs - Refresh target device exposure * @mrioc: Adapter instance reference * * This is executed post controller reset to identify any @@ -1038,8 +1083,7 @@ mpi3mr_update_sdev(struct scsi_device *sdev, void *data) * * Return: Nothing. */ - -void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) +static void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc) { struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; struct mpi3mr_stgt_priv_data *tgt_priv; @@ -1047,8 +1091,8 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) dprint_reset(mrioc, "refresh target devices: check for removals\n"); list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, list) { - if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) && - tgtdev->is_hidden && + if (((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) || + tgtdev->is_hidden) && tgtdev->host_exposed && tgtdev->starget && tgtdev->starget->hostdata) { tgt_priv = tgtdev->starget->hostdata; @@ -1304,9 +1348,9 @@ static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, (struct mpi3_event_data_device_status_change *)fwevt->event_data; dev_handle = le16_to_cpu(evtdata->dev_handle); - ioc_info(mrioc, - "%s :device status change: handle(0x%04x): reason code(0x%x)\n", - __func__, dev_handle, evtdata->reason_code); + dprint_event_bh(mrioc, + "processing device status change event bottom half for handle(0x%04x), rc(0x%02x)\n", + dev_handle, evtdata->reason_code); switch (evtdata->reason_code) { case MPI3_EVENT_DEV_STAT_RC_HIDDEN: delete = 1; @@ -1325,8 +1369,13 @@ static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, } tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); - if (!tgtdev) + if (!tgtdev) { + dprint_event_bh(mrioc, + "processing device status change event bottom half,\n" + "cannot identify target device for handle(0x%04x), rc(0x%02x)\n", + dev_handle, evtdata->reason_code); goto out; + } if (uhide) { tgtdev->is_hidden = 0; if (!tgtdev->host_exposed) @@ -1366,12 +1415,17 @@ static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, perst_id = le16_to_cpu(dev_pg0->persistent_id); dev_handle = le16_to_cpu(dev_pg0->dev_handle); - ioc_info(mrioc, - "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", - __func__, dev_handle, perst_id); + dprint_event_bh(mrioc, + "processing device info change event bottom half for handle(0x%04x), perst_id(%d)\n", + dev_handle, perst_id); tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); - if (!tgtdev) + if (!tgtdev) { + dprint_event_bh(mrioc, + "cannot identify target device for device info\n" + "change event handle(0x%04x), perst_id(%d)\n", + dev_handle, perst_id); goto out; + } mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); if (!tgtdev->is_hidden && !tgtdev->host_exposed) mpi3mr_report_tgtdev_to_host(mrioc, perst_id); @@ -1430,6 +1484,62 @@ out: } /** + * mpi3mr_process_trigger_data_event_bh - Process trigger event + * data + * @mrioc: Adapter instance reference + * @event_data: Event data + * + * This function releases diage buffers or issues diag fault + * based on trigger conditions + * + * Return: Nothing + */ +static void mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc *mrioc, + struct trigger_event_data *event_data) +{ + struct diag_buffer_desc *trace_hdb = event_data->trace_hdb; + struct diag_buffer_desc *fw_hdb = event_data->fw_hdb; + unsigned long flags; + int retval = 0; + u8 trigger_type = event_data->trigger_type; + union mpi3mr_trigger_data *trigger_data = + &event_data->trigger_specific_data; + + if (event_data->snapdump) { + if (trace_hdb) + mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, + trigger_data, 1); + if (fw_hdb) + mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, + trigger_data, 1); + mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_TRIGGER, 1); + return; + } + + if (trace_hdb) { + retval = mpi3mr_issue_diag_buf_release(mrioc, trace_hdb); + if (!retval) { + mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, + trigger_data, 1); + } + spin_lock_irqsave(&mrioc->trigger_lock, flags); + mrioc->trace_release_trigger_active = false; + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); + } + if (fw_hdb) { + retval = mpi3mr_issue_diag_buf_release(mrioc, fw_hdb); + if (!retval) { + mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, + trigger_data, 1); + } + spin_lock_irqsave(&mrioc->trigger_lock, flags); + mrioc->fw_release_trigger_active = false; + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); + } +} + +/** * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event * @mrioc: Adapter instance reference * @encl_pg0: Enclosure page 0. @@ -1911,12 +2021,16 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, struct mpi3_device_page0 *dev_pg0 = NULL; u16 perst_id, handle, dev_info; struct mpi3_device0_sas_sata_format *sasinf = NULL; + unsigned int timeout; mpi3mr_fwevt_del_from_list(mrioc, fwevt); mrioc->current_event = fwevt; - if (mrioc->stop_drv_processing) + if (mrioc->stop_drv_processing) { + dprint_event_bh(mrioc, "ignoring event(0x%02x) in the bottom half handler\n" + "due to stop_drv_processing\n", fwevt->event_id); goto out; + } if (mrioc->unrecoverable) { dprint_event_bh(mrioc, @@ -1928,6 +2042,9 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, if (!fwevt->process_evt) goto evt_ack; + dprint_event_bh(mrioc, "processing event(0x%02x) in the bottom half handler\n", + fwevt->event_id); + switch (fwevt->event_id) { case MPI3_EVENT_DEVICE_ADDED: { @@ -2001,8 +2118,18 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, } case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: { - while (mrioc->device_refresh_on) + timeout = MPI3MR_RESET_TIMEOUT * 2; + while ((mrioc->device_refresh_on || mrioc->block_on_pci_err) && + !mrioc->unrecoverable && !mrioc->pci_err_recovery) { msleep(500); + if (!timeout--) { + mrioc->unrecoverable = 1; + break; + } + } + + if (mrioc->unrecoverable || mrioc->pci_err_recovery) + break; dprint_event_bh(mrioc, "scan for non responding and newly added devices after soft reset started\n"); @@ -2010,11 +2137,17 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, mpi3mr_refresh_sas_ports(mrioc); mpi3mr_refresh_expanders(mrioc); } - mpi3mr_rfresh_tgtdevs(mrioc); + mpi3mr_refresh_tgtdevs(mrioc); ioc_info(mrioc, "scan for non responding and newly added devices after soft reset completed\n"); break; } + case MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER: + { + mpi3mr_process_trigger_data_event_bh(mrioc, + (struct trigger_event_data *)fwevt->event_data); + break; + } default: break; } @@ -2650,6 +2783,9 @@ static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, goto out; dev_handle = le16_to_cpu(evtdata->dev_handle); + dprint_event_th(mrioc, + "device status change event top half with rc(0x%02x) for handle(0x%04x)\n", + evtdata->reason_code, dev_handle); switch (evtdata->reason_code) { case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: @@ -2673,8 +2809,12 @@ static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, } tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); - if (!tgtdev) + if (!tgtdev) { + dprint_event_th(mrioc, + "processing device status change event could not identify device for handle(0x%04x)\n", + dev_handle); goto out; + } if (hide) tgtdev->is_hidden = hide; if (tgtdev->starget && tgtdev->starget->hostdata) { @@ -2750,13 +2890,13 @@ static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); if (shutdown_timeout <= 0) { - ioc_warn(mrioc, + dprint_event_th(mrioc, "%s :Invalid Shutdown Timeout received = %d\n", __func__, shutdown_timeout); return; } - ioc_info(mrioc, + dprint_event_th(mrioc, "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); mrioc->facts.shutdown_timeout = shutdown_timeout; @@ -2832,9 +2972,9 @@ void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) * @mrioc: Adapter instance reference * @event_reply: event data * - * Identify whteher the event has to handled and acknowledged - * and either process the event in the tophalf and/or schedule a - * bottom half through mpi3mr_fwevt_worker. + * Identifies whether the event has to be handled and acknowledged, + * and either processes the event in the top-half and/or schedule a + * bottom-half through mpi3mr_fwevt_worker(). * * Return: Nothing */ @@ -2853,6 +2993,7 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, ack_req = 1; evt_type = event_reply->event; + mpi3mr_event_trigger(mrioc, event_reply->event); switch (evt_type) { case MPI3_EVENT_DEVICE_ADDED: @@ -2860,9 +3001,11 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, struct mpi3_device_page0 *dev_pg0 = (struct mpi3_device_page0 *)event_reply->event_data; if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) - ioc_err(mrioc, - "%s :Failed to add device in the device add event\n", - __func__); + dprint_event_th(mrioc, + "failed to process device added event for handle(0x%04x),\n" + "perst_id(%d) in the event top half handler\n", + le16_to_cpu(dev_pg0->dev_handle), + le16_to_cpu(dev_pg0->persistent_id)); else process_evt_bh = 1; break; @@ -2891,6 +3034,11 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, ack_req = 0; break; } + case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE: + { + mpi3mr_hdbstatuschg_evt_th(mrioc, event_reply); + break; + } case MPI3_EVENT_DEVICE_INFO_CHANGED: case MPI3_EVENT_LOG_DATA: case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: @@ -2920,11 +3068,15 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, break; } if (process_evt_bh || ack_req) { + dprint_event_th(mrioc, + "scheduling bottom half handler for event(0x%02x),ack_required=%d\n", + evt_type, ack_req); sz = event_reply->event_data_length * 4; fwevt = mpi3mr_alloc_fwevt(sz); if (!fwevt) { - ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", - __func__, __FILE__, __LINE__, __func__); + dprint_event_th(mrioc, + "failed to schedule bottom half handler for\n" + "event(0x%02x), ack_required=%d\n", evt_type, ack_req); return; } @@ -3154,6 +3306,7 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; + mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); break; case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; @@ -3182,6 +3335,12 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) panic("%s: Ran out of sense buffers\n", mrioc->name); + if (sense_buf) { + scsi_normalize_sense(sense_buf, sense_count, &sshdr); + mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key, + sshdr.asc, sshdr.ascq); + } + mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); break; case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; @@ -3449,6 +3608,17 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, scmd->sc_data_direction); priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ } else { + /* + * Some firmware versions byte-swap the REPORT ZONES command + * reply from ATA-ZAC devices by directly accessing in the host + * buffer. This does not respect the default command DMA + * direction and causes IOMMU page faults on some architectures + * with an IOMMU enforcing write mappings (e.g. AMD hosts). + * Avoid such issue by making the REPORT ZONES buffer mapping + * bi-directional. + */ + if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES) + scmd->sc_data_direction = DMA_BIDIRECTIONAL; sg_scmd = scsi_sglist(scmd); sges_left = scsi_dma_map(scmd); } @@ -3681,6 +3851,13 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, mutex_unlock(&drv_cmd->mutex); goto out; } + if (mrioc->block_on_pci_err) { + retval = -1; + dprint_tm(mrioc, "sending task management failed due to\n" + "pci error recovery in progress\n"); + mutex_unlock(&drv_cmd->mutex); + goto out; + } drv_cmd->state = MPI3MR_CMD_PENDING; drv_cmd->is_waiting = 1; @@ -3695,6 +3872,18 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); if (scmd) { + if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + cmd_priv = scsi_cmd_priv(scmd); + if (!cmd_priv) + goto out_unlock; + + struct op_req_qinfo *op_req_q; + + op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx]; + tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag); + tm_req.task_request_queue_id = + cpu_to_le16(op_req_q->qid); + } sdev = scmd->device; sdev_priv_data = sdev->hostdata; scsi_tgt_priv_data = ((sdev_priv_data) ? @@ -3807,6 +3996,8 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, default: break; } + mpi3mr_global_trigger(mrioc, + MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED); out_unlock: drv_cmd->state = MPI3MR_CMD_NOTUSED; @@ -3896,7 +4087,7 @@ static void mpi3mr_map_queues(struct Scsi_Host *shost) */ map->queue_offset = qoff; if (i != HCTX_TYPE_POLL) - blk_mq_pci_map_queues(map, mrioc->pdev, offset); + blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset); else blk_mq_map_queues(map); @@ -4064,6 +4255,7 @@ static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd) struct mpi3mr_sdev_priv_data *sdev_priv_data; u8 dev_type = MPI3_DEVICE_DEVFORM_VD; int retval = FAILED; + unsigned int timeout = MPI3MR_RESET_TIMEOUT; sdev_priv_data = scmd->device->hostdata; if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { @@ -4074,12 +4266,24 @@ static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd) if (dev_type == MPI3_DEVICE_DEVFORM_VD) { mpi3mr_wait_for_host_io(mrioc, MPI3MR_RAID_ERRREC_RESET_TIMEOUT); - if (!mpi3mr_get_fw_pending_ios(mrioc)) + if (!mpi3mr_get_fw_pending_ios(mrioc)) { + while (mrioc->reset_in_progress || + mrioc->prepare_for_reset || + mrioc->block_on_pci_err) { + ssleep(1); + if (!timeout--) { + retval = FAILED; + goto out; + } + } retval = SUCCESS; + goto out; + } } if (retval == FAILED) mpi3mr_print_pending_host_io(mrioc); +out: sdev_printk(KERN_INFO, scmd->device, "Bus reset is %s for scmd(%p)\n", ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); @@ -4229,6 +4433,92 @@ out: } /** + * mpi3mr_eh_abort - Callback function for abort error handling + * @scmd: SCSI command reference + * + * Issues Abort Task Management if the command is in LLD scope + * and verifies if it is aborted successfully, and return status + * accordingly. + * + * Return: SUCCESS if the abort was successful, otherwise FAILED + */ +static int mpi3mr_eh_abort(struct scsi_cmnd *scmd) +{ + struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); + struct mpi3mr_stgt_priv_data *stgt_priv_data; + struct mpi3mr_sdev_priv_data *sdev_priv_data; + struct scmd_priv *cmd_priv; + u16 dev_handle, timeout = MPI3MR_ABORTTM_TIMEOUT; + u8 resp_code = 0; + int retval = FAILED, ret = 0; + struct request *rq = scsi_cmd_to_rq(scmd); + unsigned long scmd_age_ms = jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc); + unsigned long scmd_age_sec = scmd_age_ms / HZ; + + sdev_printk(KERN_INFO, scmd->device, + "%s: attempting abort task for scmd(%p)\n", mrioc->name, scmd); + + sdev_printk(KERN_INFO, scmd->device, + "%s: scmd(0x%p) is outstanding for %lus %lums, timeout %us, retries %d, allowed %d\n", + mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ, + scmd->retries, scmd->allowed); + + scsi_print_command(scmd); + + sdev_priv_data = scmd->device->hostdata; + if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { + sdev_printk(KERN_INFO, scmd->device, + "%s: Device not available, Skip issuing abort task\n", + mrioc->name); + retval = SUCCESS; + goto out; + } + + stgt_priv_data = sdev_priv_data->tgt_priv_data; + dev_handle = stgt_priv_data->dev_handle; + + cmd_priv = scsi_cmd_priv(scmd); + if (!cmd_priv->in_lld_scope || + cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) { + sdev_printk(KERN_INFO, scmd->device, + "%s: scmd (0x%p) not in LLD scope, Skip issuing Abort Task\n", + mrioc->name, scmd); + retval = SUCCESS; + goto out; + } + + if (stgt_priv_data->dev_removed) { + sdev_printk(KERN_INFO, scmd->device, + "%s: Device (handle = 0x%04x) removed, Skip issuing Abort Task\n", + mrioc->name, dev_handle); + retval = FAILED; + goto out; + } + + ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, + timeout, &mrioc->host_tm_cmds, &resp_code, scmd); + + if (ret) + goto out; + + if (cmd_priv->in_lld_scope) { + sdev_printk(KERN_INFO, scmd->device, + "%s: Abort task failed. scmd (0x%p) was not terminated\n", + mrioc->name, scmd); + goto out; + } + + retval = SUCCESS; +out: + sdev_printk(KERN_INFO, scmd->device, + "%s: Abort Task %s for scmd (0x%p)\n", mrioc->name, + ((retval == SUCCESS) ? "SUCCEEDED" : "FAILED"), scmd); + + return retval; +} + +/** * mpi3mr_scan_start - Scan start callback handler * @shost: SCSI host reference * @@ -4306,14 +4596,14 @@ static int mpi3mr_scan_finished(struct Scsi_Host *shost, } /** - * mpi3mr_slave_destroy - Slave destroy callback handler + * mpi3mr_sdev_destroy - Slave destroy callback handler * @sdev: SCSI device reference * * Cleanup and free per device(lun) private data. * * Return: Nothing. */ -static void mpi3mr_slave_destroy(struct scsi_device *sdev) +static void mpi3mr_sdev_destroy(struct scsi_device *sdev) { struct Scsi_Host *shost; struct mpi3mr_ioc *mrioc; @@ -4393,15 +4683,17 @@ static void mpi3mr_target_destroy(struct scsi_target *starget) } /** - * mpi3mr_slave_configure - Slave configure callback handler + * mpi3mr_sdev_configure - Slave configure callback handler * @sdev: SCSI device reference + * @lim: queue limits * * Configure queue depth, max hardware sectors and virt boundary * as required * * Return: 0 always. */ -static int mpi3mr_slave_configure(struct scsi_device *sdev) +static int mpi3mr_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct scsi_target *starget; struct Scsi_Host *shost; @@ -4432,40 +4724,20 @@ static int mpi3mr_slave_configure(struct scsi_device *sdev) sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); - switch (tgt_dev->dev_type) { - case MPI3_DEVICE_DEVFORM_PCIE: - /*The block layer hw sector size = 512*/ - if ((tgt_dev->dev_spec.pcie_inf.dev_info & - MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == - MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { - blk_queue_max_hw_sectors(sdev->request_queue, - tgt_dev->dev_spec.pcie_inf.mdts / 512); - if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) - blk_queue_virt_boundary(sdev->request_queue, - ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); - else - blk_queue_virt_boundary(sdev->request_queue, - ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); - } - break; - default: - break; - } - + mpi3mr_configure_tgt_dev(tgt_dev, lim); mpi3mr_tgtdev_put(tgt_dev); - return retval; } /** - * mpi3mr_slave_alloc -Slave alloc callback handler + * mpi3mr_sdev_init -Slave alloc callback handler * @sdev: SCSI device reference * * Allocate per device(lun) private data and initialize it. * * Return: 0 on success -ENOMEM on memory allocation failure. */ -static int mpi3mr_slave_alloc(struct scsi_device *sdev) +static int mpi3mr_sdev_init(struct scsi_device *sdev) { struct Scsi_Host *shost; struct mpi3mr_ioc *mrioc; @@ -4780,7 +5052,8 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost, goto out; } - if (mrioc->reset_in_progress) { + if (mrioc->reset_in_progress || mrioc->prepare_for_reset + || mrioc->block_on_pci_err) { retval = SCSI_MLQUEUE_HOST_BUSY; goto out; } @@ -4895,7 +5168,7 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost, MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; } - scsiio_req->flags = cpu_to_le32(scsiio_flags); + scsiio_req->flags |= cpu_to_le32(scsiio_flags); if (mpi3mr_op_request_post(mrioc, op_req_q, scmd_priv_data->mpi3mr_scsiio_req)) { @@ -4920,13 +5193,14 @@ static const struct scsi_host_template mpi3mr_driver_template = { .proc_name = MPI3MR_DRIVER_NAME, .queuecommand = mpi3mr_qcmd, .target_alloc = mpi3mr_target_alloc, - .slave_alloc = mpi3mr_slave_alloc, - .slave_configure = mpi3mr_slave_configure, + .sdev_init = mpi3mr_sdev_init, + .sdev_configure = mpi3mr_sdev_configure, .target_destroy = mpi3mr_target_destroy, - .slave_destroy = mpi3mr_slave_destroy, + .sdev_destroy = mpi3mr_sdev_destroy, .scan_finished = mpi3mr_scan_finished, .scan_start = mpi3mr_scan_start, .change_queue_depth = mpi3mr_change_queue_depth, + .eh_abort_handler = mpi3mr_eh_abort, .eh_device_reset_handler = mpi3mr_eh_dev_reset, .eh_target_reset_handler = mpi3mr_eh_target_reset, .eh_bus_reset_handler = mpi3mr_eh_bus_reset, @@ -5073,7 +5347,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) } mrioc = shost_priv(shost); - retval = ida_alloc_range(&mrioc_ida, 1, U8_MAX, GFP_KERNEL); + retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL); if (retval < 0) goto id_alloc_failed; mrioc->id = (u8)retval; @@ -5092,6 +5366,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&mrioc->watchdog_lock); spin_lock_init(&mrioc->chain_buf_lock); spin_lock_init(&mrioc->sas_node_lock); + spin_lock_init(&mrioc->trigger_lock); INIT_LIST_HEAD(&mrioc->fwevt_list); INIT_LIST_HEAD(&mrioc->tgtdev_list); @@ -5174,10 +5449,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) else scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); - snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), - "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); mrioc->fwevt_worker_thread = alloc_ordered_workqueue( - mrioc->fwevt_worker_name, 0); + "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id); if (!mrioc->fwevt_worker_thread) { ioc_err(mrioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); @@ -5263,7 +5536,14 @@ static void mpi3mr_remove(struct pci_dev *pdev) while (mrioc->reset_in_progress || mrioc->is_driver_loading) ssleep(1); - if (!pci_device_is_present(mrioc->pdev)) { + if (mrioc->block_on_pci_err) { + mrioc->block_on_pci_err = false; + scsi_unblock_requests(shost); + mrioc->unrecoverable = 1; + } + + if (!pci_device_is_present(mrioc->pdev) || + mrioc->pci_err_recovery) { mrioc->unrecoverable = 1; mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); } @@ -5447,6 +5727,197 @@ mpi3mr_resume(struct device *dev) return 0; } +/** + * mpi3mr_pcierr_error_detected - PCI error detected callback + * @pdev: PCI device instance + * @state: channel state + * + * This function is called by the PCI error recovery driver and + * based on the state passed the driver decides what actions to + * be recommended back to PCI driver. + * + * For all of the states if there is no valid mrioc or scsi host + * references in the PCI device then this function will return + * the result as disconnect. + * + * For normal state, this function will return the result as can + * recover. + * + * For frozen state, this function will block for any pending + * controller initialization or re-initialization to complete, + * stop any new interactions with the controller and return + * status as reset required. + * + * For permanent failure state, this function will mark the + * controller as unrecoverable and return status as disconnect. + * + * Returns: PCI_ERS_RESULT_NEED_RESET or CAN_RECOVER or + * DISCONNECT based on the controller state. + */ +static pci_ers_result_t +mpi3mr_pcierr_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost; + struct mpi3mr_ioc *mrioc; + unsigned int timeout = MPI3MR_RESET_TIMEOUT; + + dev_info(&pdev->dev, "%s: callback invoked state(%d)\n", __func__, + state); + + shost = pci_get_drvdata(pdev); + mrioc = shost_priv(shost); + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + mrioc->pci_err_recovery = true; + mrioc->block_on_pci_err = true; + do { + if (mrioc->reset_in_progress || mrioc->is_driver_loading) + ssleep(1); + else + break; + } while (--timeout); + + if (!timeout) { + mrioc->pci_err_recovery = true; + mrioc->block_on_pci_err = true; + mrioc->unrecoverable = 1; + mpi3mr_stop_watchdog(mrioc); + mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); + return PCI_ERS_RESULT_DISCONNECT; + } + + scsi_block_requests(mrioc->shost); + mpi3mr_stop_watchdog(mrioc); + mpi3mr_cleanup_resources(mrioc); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + mrioc->pci_err_recovery = true; + mrioc->block_on_pci_err = true; + mrioc->unrecoverable = 1; + mpi3mr_stop_watchdog(mrioc); + mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); + return PCI_ERS_RESULT_DISCONNECT; + default: + return PCI_ERS_RESULT_DISCONNECT; + } +} + +/** + * mpi3mr_pcierr_slot_reset - Post slot reset callback + * @pdev: PCI device instance + * + * This function is called by the PCI error recovery driver + * after a slot or link reset issued by it for the recovery, the + * driver is expected to bring back the controller and + * initialize it. + * + * This function restores PCI state and reinitializes controller + * resources and the controller, this blocks for any pending + * reset to complete. + * + * Returns: PCI_ERS_RESULT_DISCONNECT on failure or + * PCI_ERS_RESULT_RECOVERED + */ +static pci_ers_result_t mpi3mr_pcierr_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct mpi3mr_ioc *mrioc; + unsigned int timeout = MPI3MR_RESET_TIMEOUT; + + dev_info(&pdev->dev, "%s: callback invoked\n", __func__); + + shost = pci_get_drvdata(pdev); + mrioc = shost_priv(shost); + + do { + if (mrioc->reset_in_progress) + ssleep(1); + else + break; + } while (--timeout); + + if (!timeout) + goto out_failed; + + pci_restore_state(pdev); + + if (mpi3mr_setup_resources(mrioc)) { + ioc_err(mrioc, "setup resources failed\n"); + goto out_failed; + } + mrioc->unrecoverable = 0; + mrioc->pci_err_recovery = false; + + if (mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0)) + goto out_failed; + + return PCI_ERS_RESULT_RECOVERED; + +out_failed: + mrioc->unrecoverable = 1; + mrioc->block_on_pci_err = false; + scsi_unblock_requests(shost); + mpi3mr_start_watchdog(mrioc); + return PCI_ERS_RESULT_DISCONNECT; +} + +/** + * mpi3mr_pcierr_resume - PCI error recovery resume + * callback + * @pdev: PCI device instance + * + * This function enables all I/O and IOCTLs post reset issued as + * part of the PCI error recovery + * + * Return: Nothing. + */ +static void mpi3mr_pcierr_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct mpi3mr_ioc *mrioc; + + dev_info(&pdev->dev, "%s: callback invoked\n", __func__); + + shost = pci_get_drvdata(pdev); + mrioc = shost_priv(shost); + + if (mrioc->block_on_pci_err) { + mrioc->block_on_pci_err = false; + scsi_unblock_requests(shost); + mpi3mr_start_watchdog(mrioc); + } +} + +/** + * mpi3mr_pcierr_mmio_enabled - PCI error recovery callback + * @pdev: PCI device instance + * + * This is called only if mpi3mr_pcierr_error_detected returns + * PCI_ERS_RESULT_CAN_RECOVER. + * + * Return: PCI_ERS_RESULT_DISCONNECT when the controller is + * unrecoverable or when the shost/mrioc reference cannot be + * found, else return PCI_ERS_RESULT_RECOVERED + */ +static pci_ers_result_t mpi3mr_pcierr_mmio_enabled(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct mpi3mr_ioc *mrioc; + + dev_info(&pdev->dev, "%s: callback invoked\n", __func__); + + shost = pci_get_drvdata(pdev); + mrioc = shost_priv(shost); + + if (mrioc->unrecoverable) + return PCI_ERS_RESULT_DISCONNECT; + + return PCI_ERS_RESULT_RECOVERED; +} + static const struct pci_device_id mpi3mr_pci_id_table[] = { { PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, @@ -5464,6 +5935,13 @@ static const struct pci_device_id mpi3mr_pci_id_table[] = { }; MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); +static const struct pci_error_handlers mpi3mr_err_handler = { + .error_detected = mpi3mr_pcierr_error_detected, + .mmio_enabled = mpi3mr_pcierr_mmio_enabled, + .slot_reset = mpi3mr_pcierr_slot_reset, + .resume = mpi3mr_pcierr_resume, +}; + static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); static struct pci_driver mpi3mr_pci_driver = { @@ -5472,6 +5950,7 @@ static struct pci_driver mpi3mr_pci_driver = { .probe = mpi3mr_probe, .remove = mpi3mr_remove, .shutdown = mpi3mr_shutdown, + .err_handler = &mpi3mr_err_handler, .driver.pm = &mpi3mr_pm_ops, }; diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c index d32ad46318cb..c8d6ced5640e 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_transport.c +++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c @@ -7,6 +7,8 @@ * */ +#include <linux/vmalloc.h> + #include "mpi3mr.h" /** @@ -103,10 +105,10 @@ struct rep_manu_reply { u8 reserved0[2]; u8 sas_format; u8 reserved2[3]; - u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; - u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; - u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN]; - u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN]; + u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring; + u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring; + u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring; + u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring; u16 component_id; u8 component_revision_id; u8 reserved3; @@ -149,6 +151,11 @@ static int mpi3mr_report_manufacture(struct mpi3mr_ioc *mrioc, return -EFAULT; } + if (mrioc->pci_err_recovery) { + ioc_err(mrioc, "%s: pci error recovery in progress!\n", __func__); + return -EFAULT; + } + data_out_sz = sizeof(struct rep_manu_request); data_in_sz = sizeof(struct rep_manu_reply); data_out = dma_alloc_coherent(&mrioc->pdev->dev, @@ -209,17 +216,13 @@ static int mpi3mr_report_manufacture(struct mpi3mr_ioc *mrioc, goto out; } - strscpy(edev->vendor_id, manufacture_reply->vendor_id, - SAS_EXPANDER_VENDOR_ID_LEN); - strscpy(edev->product_id, manufacture_reply->product_id, - SAS_EXPANDER_PRODUCT_ID_LEN); - strscpy(edev->product_rev, manufacture_reply->product_rev, - SAS_EXPANDER_PRODUCT_REV_LEN); + memtostr(edev->vendor_id, manufacture_reply->vendor_id); + memtostr(edev->product_id, manufacture_reply->product_id); + memtostr(edev->product_rev, manufacture_reply->product_rev); edev->level = manufacture_reply->sas_format & 1; if (edev->level) { - strscpy(edev->component_vendor_id, - manufacture_reply->component_vendor_id, - SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); + memtostr(edev->component_vendor_id, + manufacture_reply->component_vendor_id); tmp = (u8 *)&manufacture_reply->component_id; edev->component_id = tmp[0] << 8 | tmp[1]; edev->component_revision_id = @@ -587,12 +590,13 @@ static enum sas_linkrate mpi3mr_convert_phy_link_rate(u8 link_rate) * @mrioc: Adapter instance reference * @mr_sas_port: Internal Port object * @mr_sas_phy: Internal Phy object + * @host_node: Flag to indicate this is a host_node * * Return: None. */ static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc, struct mpi3mr_sas_port *mr_sas_port, - struct mpi3mr_sas_phy *mr_sas_phy) + struct mpi3mr_sas_phy *mr_sas_phy, u8 host_node) { u64 sas_address = mr_sas_port->remote_identify.sas_address; @@ -602,9 +606,13 @@ static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc, list_del(&mr_sas_phy->port_siblings); mr_sas_port->num_phys--; - mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id); - if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id) - mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + + if (host_node) { + mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id); + + if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id) + mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + } sas_port_delete_phy(mr_sas_port->port, mr_sas_phy->phy); mr_sas_phy->phy_belongs_to_port = 0; } @@ -614,12 +622,13 @@ static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc, * @mrioc: Adapter instance reference * @mr_sas_port: Internal Port object * @mr_sas_phy: Internal Phy object + * @host_node: Flag to indicate this is a host_node * * Return: None. */ static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc, struct mpi3mr_sas_port *mr_sas_port, - struct mpi3mr_sas_phy *mr_sas_phy) + struct mpi3mr_sas_phy *mr_sas_phy, u8 host_node) { u64 sas_address = mr_sas_port->remote_identify.sas_address; @@ -629,9 +638,12 @@ static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc, list_add_tail(&mr_sas_phy->port_siblings, &mr_sas_port->phy_list); mr_sas_port->num_phys++; - mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id); - if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy) - mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + if (host_node) { + mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id); + + if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy) + mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + } sas_port_add_phy(mr_sas_port->port, mr_sas_phy->phy); mr_sas_phy->phy_belongs_to_port = 1; } @@ -672,7 +684,7 @@ static void mpi3mr_add_phy_to_an_existing_port(struct mpi3mr_ioc *mrioc, if (srch_phy == mr_sas_phy) return; } - mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy); + mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy, mr_sas_node->host_node); return; } } @@ -733,7 +745,7 @@ static void mpi3mr_del_phy_from_an_existing_port(struct mpi3mr_ioc *mrioc, mpi3mr_delete_sas_port(mrioc, mr_sas_port); else mpi3mr_delete_sas_phy(mrioc, mr_sas_port, - mr_sas_phy); + mr_sas_phy, mr_sas_node->host_node); return; } } @@ -792,6 +804,12 @@ static int mpi3mr_set_identify(struct mpi3mr_ioc *mrioc, u16 handle, return -EFAULT; } + if (mrioc->pci_err_recovery) { + ioc_err(mrioc, "%s: pci error recovery in progress!\n", + __func__); + return -EFAULT; + } + if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &device_pg0, sizeof(device_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE, handle))) { ioc_err(mrioc, "%s: device page0 read failed\n", __func__); @@ -1009,6 +1027,9 @@ mpi3mr_alloc_hba_port(struct mpi3mr_ioc *mrioc, u16 port_id) hba_port->port_id = port_id; ioc_info(mrioc, "hba_port entry: %p, port: %d is added to hba_port list\n", hba_port, hba_port->port_id); + if (mrioc->reset_in_progress || + mrioc->pci_err_recovery) + hba_port->flags = MPI3MR_HBA_PORT_FLAG_NEW; list_add_tail(&hba_port->list, &mrioc->hba_port_table_list); return hba_port; } @@ -1016,7 +1037,7 @@ mpi3mr_alloc_hba_port(struct mpi3mr_ioc *mrioc, u16 port_id) /** * mpi3mr_get_hba_port_by_id - find hba port by id * @mrioc: Adapter instance reference - * @port_id - Port ID to search + * @port_id: Port ID to search * * Return: mpi3mr_hba_port reference for the matched port */ @@ -1057,7 +1078,7 @@ void mpi3mr_update_links(struct mpi3mr_ioc *mrioc, struct mpi3mr_sas_node *mr_sas_node; struct mpi3mr_sas_phy *mr_sas_phy; - if (mrioc->reset_in_progress) + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) return; spin_lock_irqsave(&mrioc->sas_node_lock, flags); @@ -1355,15 +1376,27 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node, mr_sas_port->remote_identify.sas_address, hba_port); + if (mr_sas_node->host_node && mr_sas_node->num_phys >= + sizeof(mr_sas_port->phy_mask) * 8) + ioc_info(mrioc, "max port count %u could be too high\n", + mr_sas_node->num_phys); + for (i = 0; i < mr_sas_node->num_phys; i++) { if ((mr_sas_node->phy[i].remote_identify.sas_address != mr_sas_port->remote_identify.sas_address) || (mr_sas_node->phy[i].hba_port != hba_port)) continue; + + if (mr_sas_node->host_node && (i >= sizeof(mr_sas_port->phy_mask) * 8)) { + ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n", + i, sizeof(mr_sas_port->phy_mask) * 8); + goto out_fail; + } list_add_tail(&mr_sas_node->phy[i].port_siblings, &mr_sas_port->phy_list); mr_sas_port->num_phys++; - mr_sas_port->phy_mask |= (1 << i); + if (mr_sas_node->host_node) + mr_sas_port->phy_mask |= (1 << i); } if (!mr_sas_port->num_phys) { @@ -1372,7 +1405,8 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, goto out_fail; } - mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + if (mr_sas_node->host_node) + mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) { tgtdev = mpi3mr_get_tgtdev_by_addr(mrioc, @@ -1970,7 +2004,7 @@ int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle) if (!handle) return -1; - if (mrioc->reset_in_progress) + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) return -1; if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0, @@ -2176,7 +2210,7 @@ void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, /* remove sibling ports attached to this expander */ list_for_each_entry_safe(mr_sas_port, next, &sas_expander->sas_port_list, port_list) { - if (mrioc->reset_in_progress) + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) return; if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) @@ -2226,7 +2260,7 @@ void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address, struct mpi3mr_sas_node *sas_expander; unsigned long flags; - if (mrioc->reset_in_progress) + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) return; if (!hba_port) @@ -2537,6 +2571,11 @@ static int mpi3mr_get_expander_phy_error_log(struct mpi3mr_ioc *mrioc, return -EFAULT; } + if (mrioc->pci_err_recovery) { + ioc_err(mrioc, "%s: pci error recovery in progress!\n", __func__); + return -EFAULT; + } + data_out_sz = sizeof(struct phy_error_log_request); data_in_sz = sizeof(struct phy_error_log_reply); sz = data_out_sz + data_in_sz; @@ -2796,6 +2835,12 @@ mpi3mr_expander_phy_control(struct mpi3mr_ioc *mrioc, return -EFAULT; } + if (mrioc->pci_err_recovery) { + ioc_err(mrioc, "%s: pci error recovery in progress!\n", + __func__); + return -EFAULT; + } + data_out_sz = sizeof(struct phy_control_request); data_in_sz = sizeof(struct phy_control_reply); sz = data_out_sz + data_in_sz; @@ -3219,6 +3264,12 @@ mpi3mr_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, goto out; } + if (mrioc->pci_err_recovery) { + ioc_err(mrioc, "%s: pci error recovery in progress!\n", __func__); + rc = -EFAULT; + goto out; + } + rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->request_payload, &dma_addr_out, &dma_len_out, &addr_out); if (rc) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h index 6de35b32223c..b181b113fc80 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h @@ -125,6 +125,12 @@ * 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT * 08-01-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT * 10-02-19 02.00.57 Bumped MPI2_HEADER_VERSION_UNIT + * 07-20-20 02.00.58 Bumped MPI2_HEADER_VERSION_UNIT + * 03-30-21 02.00.59 Bumped MPI2_HEADER_VERSION_UNIT + * 06-03-22 02.00.60 Bumped MPI2_HEADER_VERSION_UNIT + * 09-20-23 02.00.61 Bumped MPI2_HEADER_VERSION_UNIT + * 09-13-24 02.00.62 Bumped MPI2_HEADER_VERSION_UNIT + * Added MPI2_FUNCTION_MCTP_PASSTHROUGH * -------------------------------------------------------------------------- */ @@ -165,7 +171,7 @@ /* Unit and Dev versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x39) +#define MPI2_HEADER_VERSION_UNIT (0x3E) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) @@ -669,6 +675,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION { #define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) #define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31) #define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33) +#define MPI2_FUNCTION_MCTP_PASSTHROUGH (0x34) #define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) #define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index 587f7d248219..02bf26ca976e 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -251,6 +251,7 @@ * 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7. * 08-01-19 02.00.49 Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID * Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT + * 09-13-24 02.00.50 Added PCIe 32 GT/s link rate */ #ifndef MPI2_CNFG_H @@ -606,7 +607,7 @@ typedef struct _MPI2_CONFIG_REPLY { typedef struct _MPI2_CONFIG_PAGE_MAN_0 { MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ - U8 ChipName[16]; /*0x04 */ + U8 ChipName[16] __nonstring; /*0x04 */ U8 ChipRevision[8]; /*0x14 */ U8 BoardName[16]; /*0x1C */ U8 BoardAssembly[16]; /*0x2C */ @@ -1121,6 +1122,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { #define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01) #define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02) #define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_32_0_GBPS (0x04) /*defines for IO Unit Page 7 ProcessorState field */ #define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F) @@ -2301,6 +2303,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 { #define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) /*values for SAS IO Unit Page 1 AdditionalControlFlags */ +#define MPI2_SASIOUNIT1_ACONTROL_PROD_SPECIFIC_1 (0x8000) #define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100) #define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080) #define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040) @@ -3591,6 +3594,7 @@ typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS { #define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03) #define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04) #define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05) +#define MPI26_PCIE_NEG_LINK_RATE_32_0 (0x06) /**************************************************************************** @@ -3700,6 +3704,7 @@ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 { #define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30) #define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40) #define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50) +#define MPI26_PCIEIOUNIT1_MAX_RATE_32_0 (0x60) /*values for PCIe IO Unit Page 1 DMDReportPCIe */ #define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK (0x80) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h index d92852591134..1a279c6e1a9f 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -179,6 +179,7 @@ * Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED * Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP * Added MPI2_FW_UPLOAD_ITYPE_COREDUMP + * 9-13-24 02.00.39 Added MPI26_MCTP_PASSTHROUGH messages * -------------------------------------------------------------------------- */ @@ -382,6 +383,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY { /*ProductID field uses MPI2_FW_HEADER_PID_ */ /*IOCCapabilities */ +#define MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU (0x00800000) #define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED (0x00200000) #define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000) #define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) @@ -1798,5 +1800,57 @@ typedef struct _MPI26_IOUNIT_CONTROL_REPLY { Mpi26IoUnitControlReply_t, *pMpi26IoUnitControlReply_t; +/**************************************************************************** + * MCTP Passthrough messages (MPI v2.6 and later only.) + ****************************************************************************/ + +/* MCTP Passthrough Request Message */ +typedef struct _MPI26_MCTP_PASSTHROUGH_REQUEST { + U8 MsgContext; /* 0x00 */ + U8 Reserved1[2]; /* 0x01 */ + U8 Function; /* 0x03 */ + U8 Reserved2[3]; /* 0x04 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U32 Reserved4; /* 0x0C */ + U8 Flags; /* 0x10 */ + U8 Reserved5[3]; /* 0x11 */ + U32 Reserved6; /* 0x14 */ + U32 H2DLength; /* 0x18 */ + U32 D2HLength; /* 0x1C */ + MPI25_SGE_IO_UNION H2DSGL; /* 0x20 */ + MPI25_SGE_IO_UNION D2HSGL; /* 0x30 */ +} MPI26_MCTP_PASSTHROUGH_REQUEST, + *PTR_MPI26_MCTP_PASSTHROUGH_REQUEST, + Mpi26MctpPassthroughRequest_t, + *pMpi26MctpPassthroughRequest_t; + +/* values for the MsgContext field */ +#define MPI26_MCTP_MSG_CONEXT_UNUSED (0x00) + +/* values for the Flags field */ +#define MPI26_MCTP_FLAGS_MSG_FORMAT_MPT (0x01) + +/* MCTP Passthrough Reply Message */ +typedef struct _MPI26_MCTP_PASSTHROUGH_REPLY { + U8 MsgContext; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 Reserved2[3]; /* 0x04 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 ResponseDataLength; /* 0x14 */ +} MPI26_MCTP_PASSTHROUGH_REPLY, + *PTR_MPI26_MCTP_PASSTHROUGH_REPLY, + Mpi26MctpPassthroughReply_t, + *pMpi26MctpPassthroughReply_t; #endif diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 1b492e9a3e55..bd3efa5b46c7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -846,8 +846,8 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) snprintf(ioc->fault_reset_work_q_name, sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", ioc->driver_name, ioc->id); - ioc->fault_reset_work_q = - create_singlethread_workqueue(ioc->fault_reset_work_q_name); + ioc->fault_reset_work_q = alloc_ordered_workqueue( + "%s", WQ_MEM_RECLAIM, ioc->fault_reset_work_q_name); if (!ioc->fault_reset_work_q) { ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__); return; @@ -1202,6 +1202,11 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, ioc->sge_size; func_str = "nvme_encapsulated"; break; + case MPI2_FUNCTION_MCTP_PASSTHROUGH: + frame_sz = sizeof(Mpi26MctpPassthroughRequest_t) + + ioc->sge_size; + func_str = "mctp_passthru"; + break; default: frame_sz = 32; func_str = "unknown"; @@ -2671,6 +2676,22 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); } +static inline int _base_scsi_dma_map(struct scsi_cmnd *cmd) +{ + /* + * Some firmware versions byte-swap the REPORT ZONES command reply from + * ATA-ZAC devices by directly accessing in the host buffer. This does + * not respect the default command DMA direction and causes IOMMU page + * faults on some architectures with an IOMMU enforcing write mappings + * (e.g. AMD hosts). Avoid such issue by making the report zones buffer + * mapping bi-directional. + */ + if (cmd->cmnd[0] == ZBC_IN && cmd->cmnd[1] == ZI_REPORT_ZONES) + cmd->sc_data_direction = DMA_BIDIRECTIONAL; + + return scsi_dma_map(cmd); +} + /** * _base_build_sg_scmd - main sg creation routine * pcie_device is unused here! @@ -2717,7 +2738,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; sg_scmd = scsi_sglist(scmd); - sges_left = scsi_dma_map(scmd); + sges_left = _base_scsi_dma_map(scmd); if (sges_left < 0) return -ENOMEM; @@ -2861,7 +2882,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, } sg_scmd = scsi_sglist(scmd); - sges_left = scsi_dma_map(scmd); + sges_left = _base_scsi_dma_map(scmd); if (sges_left < 0) return -ENOMEM; @@ -3515,7 +3536,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) ioc_info(ioc, "High IOPs queues : disabled\n"); ioc->reply_queue_count = 1; ioc->iopoll_q_start_index = ioc->reply_queue_count - 0; - r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); + r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_INTX); if (r < 0) { dfailprintk(ioc, ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n", @@ -4774,7 +4795,7 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) char desc[17] = {0}; u32 iounit_pg1_flags; - strncpy(desc, ioc->manu_pg0.ChipName, 16); + memtostr(desc, ioc->manu_pg0.ChipName); ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n", desc, (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, @@ -4858,6 +4879,12 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) i++; } + if (ioc->facts.IOCCapabilities & + MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) { + pr_cont("%sMCTP Passthru", i ? "," : ""); + i++; + } + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { pr_cont("%sNCQ", i ? "," : ""); @@ -5611,10 +5638,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) if (rc) return rc; if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) { - pr_err("%s: overriding NVDATA EEDPTagMode setting\n", + pr_err("%s: overriding NVDATA EEDPTagMode setting from 0 to 1\n", ioc->name); - ioc->manu_pg11.EEDPTagMode &= ~0x3; - ioc->manu_pg11.EEDPTagMode |= 0x1; + ioc->manu_pg11.EEDPTagMode = 0x1; mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); } @@ -7025,11 +7051,12 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, int i; u8 failed; __le32 *mfp; + int ret_val; /* make sure doorbell is not in use */ if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__); - return -EFAULT; + goto doorbell_diag_reset; } /* clear pending doorbell interrupts from previous state changes */ @@ -7119,6 +7146,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, le32_to_cpu(mfp[i])); } return 0; + +doorbell_diag_reset: + ret_val = _base_diag_reset(ioc); + return ret_val; } /** @@ -7998,7 +8029,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) mutex_lock(&ioc->hostdiag_unlock_mutex); if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic)) - goto out; + goto unlock; hcb_size = ioc->base_readl(&ioc->chip->HCBSize); drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n")); @@ -8018,7 +8049,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) ioc_info(ioc, "Invalid host diagnostic register value\n"); _base_dump_reg_set(ioc); - goto out; + goto unlock; } if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) break; @@ -8054,17 +8085,19 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", __func__, ioc_state); _base_dump_reg_set(ioc); - goto out; + goto fail; } pci_cfg_access_unlock(ioc->pdev); ioc_info(ioc, "diag reset: SUCCESS\n"); return 0; - out: +unlock: + mutex_unlock(&ioc->hostdiag_unlock_mutex); + +fail: pci_cfg_access_unlock(ioc->pdev); ioc_err(ioc, "diag reset: FAILED\n"); - mutex_unlock(&ioc->hostdiag_unlock_mutex); return -EFAULT; } @@ -8512,6 +8545,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); if (ioc->facts.MaxDevHandle % 8) ioc->pd_handles_sz++; + /* + * pd_handles_sz should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory touch may occur. + */ + ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long)); + ioc->pd_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL); if (!ioc->pd_handles) { @@ -8529,6 +8568,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); if (ioc->facts.MaxDevHandle % 8) ioc->pend_os_device_add_sz++; + + /* + * pend_os_device_add_sz should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory may occur. + */ + ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz, + sizeof(unsigned long)); ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, GFP_KERNEL); if (!ioc->pend_os_device_add) { @@ -8820,6 +8866,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) if (ioc->facts.MaxDevHandle % 8) pd_handles_sz++; + /* + * pd_handles should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory touch may + * occur. + */ + pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long)); pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, GFP_KERNEL); if (!pd_handles) { @@ -8863,9 +8915,8 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL); if (!device_remove_in_progress) { ioc_info(ioc, - "Unable to allocate the memory for " - "device_remove_in_progress of sz: %d\n " - , pd_handles_sz); + "Unable to allocate the memory for device_remove_in_progress of sz: %d\n", + pd_handles_sz); return -ENOMEM; } memset(device_remove_in_progress + diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index bf100a4ebfc3..939141cde3ca 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -77,11 +77,11 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "48.100.00.00" -#define MPT3SAS_MAJOR_VERSION 48 +#define MPT3SAS_DRIVER_VERSION "52.100.00.00" +#define MPT3SAS_MAJOR_VERSION 52 #define MPT3SAS_MINOR_VERSION 100 -#define MPT3SAS_BUILD_VERSION 0 -#define MPT3SAS_RELEASE_VERSION 00 +#define MPT3SAS_BUILD_VERSION 00 +#define MPT3SAS_RELEASE_VERSION 00 #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" @@ -1162,8 +1162,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @fault_reset_work_q_name: fw fault work queue * @fault_reset_work_q: "" * @fault_reset_work: "" - * @firmware_event_name: fw event work queue - * @firmware_event_thread: "" + * @firmware_event_thread: fw event work queue * @fw_event_lock: * @fw_event_list: list of fw events * @current_evet: current processing firmware event @@ -1351,7 +1350,6 @@ struct MPT3SAS_ADAPTER { struct delayed_work fault_reset_work; /* fw event handler */ - char firmware_event_name[20]; struct workqueue_struct *firmware_event_thread; spinlock_t fw_event_lock; struct list_head fw_event_list; @@ -1860,9 +1858,6 @@ int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, int mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page); -int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, - Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page, - u16 sz); int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, struct Mpi2ManufacturingPage10_t *config_page); @@ -1889,9 +1884,6 @@ int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page, u32 form, u32 handle); -int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, - Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, - u32 form, u32 handle); int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page, u32 form, u32 handle); @@ -2048,9 +2040,6 @@ void mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request); -/* NCQ Prio Handling Check */ -bool scsih_ncq_prio_supp(struct scsi_device *sdev); - void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_init_debugfs(void); diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 2e88f456fc34..45ac853e1289 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -577,44 +577,6 @@ mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc, } /** - * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7 - * @ioc: per adapter object - * @mpi_reply: reply mf payload returned from firmware - * @config_page: contents of the config page - * @sz: size of buffer passed in config_page - * Context: sleep. - * - * Return: 0 for success, non-zero for failure. - */ -int -mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, - Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page, - u16 sz) -{ - Mpi2ConfigRequest_t mpi_request; - int r; - - memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); - mpi_request.Function = MPI2_FUNCTION_CONFIG; - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; - mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; - mpi_request.Header.PageNumber = 7; - mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION; - ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); - r = _config_request(ioc, &mpi_request, mpi_reply, - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); - if (r) - goto out; - - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - r = _config_request(ioc, &mpi_request, mpi_reply, - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, - sz); - out: - return r; -} - -/** * mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware @@ -1214,47 +1176,6 @@ mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, } /** - * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1 - * @ioc: per adapter object - * @mpi_reply: reply mf payload returned from firmware - * @config_page: contents of the config page - * @form: GET_NEXT_HANDLE or HANDLE - * @handle: device handle - * Context: sleep. - * - * Return: 0 for success, non-zero for failure. - */ -int -mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, - Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, - u32 form, u32 handle) -{ - Mpi2ConfigRequest_t mpi_request; - int r; - - memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); - mpi_request.Function = MPI2_FUNCTION_CONFIG; - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; - mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; - mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; - mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION; - mpi_request.Header.PageNumber = 1; - ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); - r = _config_request(ioc, &mpi_request, mpi_reply, - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); - if (r) - goto out; - - mpi_request.PageAddress = cpu_to_le32(form | handle); - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - r = _config_request(ioc, &mpi_request, mpi_reply, - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, - sizeof(*config_page)); - out: - return r; -} - -/** * mpt3sas_config_get_pcie_device_pg0 - obtain pcie device page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 1c9fd26195b8..02fc204b9bf7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -186,6 +186,9 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, case MPI2_FUNCTION_NVME_ENCAPSULATED: desc = "nvme_encapsulated"; break; + case MPI2_FUNCTION_MCTP_PASSTHROUGH: + desc = "mctp_passthrough"; + break; } if (!desc) @@ -653,6 +656,40 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, } /** + * _ctl_send_mctp_passthru_req - Send an MCTP passthru request + * @ioc: per adapter object + * @mctp_passthru_req: MPI mctp passhthru request from caller + * @psge: pointer to the H2DSGL + * @data_out_dma: DMA buffer for H2D SGL + * @data_out_sz: H2D length + * @data_in_dma: DMA buffer for D2H SGL + * @data_in_sz: D2H length + * @smid: SMID to submit the request + * + */ +static void +_ctl_send_mctp_passthru_req( + struct MPT3SAS_ADAPTER *ioc, + Mpi26MctpPassthroughRequest_t *mctp_passthru_req, void *psge, + dma_addr_t data_out_dma, int data_out_sz, + dma_addr_t data_in_dma, int data_in_sz, + u16 smid) +{ + mctp_passthru_req->H2DLength = data_out_sz; + mctp_passthru_req->D2HLength = data_in_sz; + + /* Build the H2D SGL from the data out buffer */ + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 0, 0); + + psge += ioc->sge_size_ieee; + + /* Build the D2H SGL for the data in buffer */ + ioc->build_sg(ioc, psge, 0, 0, data_in_dma, data_in_sz); + + ioc->put_smid_default(ioc, smid); +} + +/** * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode * @ioc: per adapter object * @karg: (struct mpt3_ioctl_command) @@ -679,6 +716,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, size_t data_in_sz = 0; long ret; u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; + int tm_ret; issue_reset = 0; @@ -792,6 +830,23 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, init_completion(&ioc->ctl_cmds.done); switch (mpi_request->Function) { + case MPI2_FUNCTION_MCTP_PASSTHROUGH: + { + Mpi26MctpPassthroughRequest_t *mctp_passthru_req = + (Mpi26MctpPassthroughRequest_t *)request; + + if (!(ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU)) { + ioc_err(ioc, "%s: MCTP Passthrough request not supported\n", + __func__); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + + _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma, + data_out_sz, data_in_dma, data_in_sz, smid); + break; + } case MPI2_FUNCTION_NVME_ENCAPSULATED: { nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request; @@ -1120,18 +1175,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, if (pcie_device && (!ioc->tm_custom_handling) && (!(mpt3sas_scsih_is_pcie_scsi_device( pcie_device->device_info)))) - mpt3sas_scsih_issue_locked_tm(ioc, + tm_ret = mpt3sas_scsih_issue_locked_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, pcie_device->reset_timeout, MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE); else - mpt3sas_scsih_issue_locked_tm(ioc, + tm_ret = mpt3sas_scsih_issue_locked_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET); + + if (tm_ret != SUCCESS) { + ioc_info(ioc, + "target reset failed, issue hard reset: handle (0x%04x)\n", + le16_to_cpu(mpi_request->FunctionDependent1)); + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + } } else mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); } @@ -1200,6 +1262,8 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) } karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + karg.driver_capability |= MPT3_IOCTL_IOCINFO_DRIVER_CAP_MCTP_PASSTHRU; + if (copy_to_user(arg, &karg, sizeof(karg))) { pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); @@ -2787,6 +2851,218 @@ out_unlock_pciaccess: } /** + * _ctl_get_mpt_mctp_passthru_adapter - Traverse the IOC list and return the IOC at + * dev_index positionthat support MCTP passhtru + * @dev_index: position in the mpt3sas_ioc_list to search for + * Return pointer to the IOC on success + * NULL if device not found error + */ +static struct MPT3SAS_ADAPTER * +_ctl_get_mpt_mctp_passthru_adapter(int dev_index) +{ + struct MPT3SAS_ADAPTER *ioc = NULL; + int count = 0; + + spin_lock(&gioc_lock); + /* Traverse ioc list and return number of IOC that support MCTP passthru */ + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) { + if (count == dev_index) { + spin_unlock(&gioc_lock); + return ioc; + } + count++; + } + } + spin_unlock(&gioc_lock); + + return NULL; +} + +/** + * mpt3sas_get_device_count - Retrieve the count of MCTP passthrough + * capable devices managed by the driver. + * + * Returns number of devices that support MCTP passthrough. + */ +int +mpt3sas_get_device_count(void) +{ + int count = 0; + struct MPT3SAS_ADAPTER *ioc = NULL; + + spin_lock(&gioc_lock); + /* Traverse ioc list and return number of IOC that support MCTP passthru */ + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) + if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) + count++; + + spin_unlock(&gioc_lock); + + return count; +} +EXPORT_SYMBOL(mpt3sas_get_device_count); + +/** + * mpt3sas_send_passthru_cmd - Send an MPI MCTP passthrough command to + * firmware + * @command: The MPI MCTP passthrough command to send to firmware + * + * Returns 0 on success, anything else is error. + */ +int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command) +{ + struct MPT3SAS_ADAPTER *ioc; + MPI2RequestHeader_t *mpi_request = NULL, *request; + MPI2DefaultReply_t *mpi_reply; + Mpi26MctpPassthroughRequest_t *mctp_passthru_req; + u16 smid; + unsigned long timeout; + u8 issue_reset = 0; + u32 sz; + void *psge; + void *data_out = NULL; + dma_addr_t data_out_dma = 0; + size_t data_out_sz = 0; + void *data_in = NULL; + dma_addr_t data_in_dma = 0; + size_t data_in_sz = 0; + long ret; + + /* Retrieve ioc from dev_index */ + ioc = _ctl_get_mpt_mctp_passthru_adapter(command->dev_index); + if (!ioc) + return -ENODEV; + + mutex_lock(&ioc->pci_access_mutex); + if (ioc->shost_recovery || + ioc->pci_error_recovery || ioc->is_driver_loading || + ioc->remove_host) { + ret = -EAGAIN; + goto unlock_pci_access; + } + + /* Lock the ctl_cmds mutex to ensure a single ctl cmd is pending */ + if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { + ret = -ERESTARTSYS; + goto unlock_pci_access; + } + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + ret = -EAGAIN; + goto unlock_ctl_cmds; + } + + ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (ret) + goto unlock_ctl_cmds; + + mpi_request = (MPI2RequestHeader_t *)command->mpi_request; + if (mpi_request->Function != MPI2_FUNCTION_MCTP_PASSTHROUGH) { + ioc_err(ioc, "%s: Invalid request received, Function 0x%x\n", + __func__, mpi_request->Function); + ret = -EINVAL; + goto unlock_ctl_cmds; + } + + /* Use first reserved smid for passthrough commands */ + smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; + ret = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + memcpy(request, command->mpi_request, sizeof(Mpi26MctpPassthroughRequest_t)); + ioc->ctl_cmds.smid = smid; + data_out_sz = command->data_out_size; + data_in_sz = command->data_in_size; + + /* obtain dma-able memory for data transfer */ + if (data_out_sz) /* WRITE */ { + data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz, + &data_out_dma, GFP_ATOMIC); + if (!data_out) { + ret = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + memcpy(data_out, command->data_out_buf_ptr, data_out_sz); + + } + + if (data_in_sz) /* READ */ { + data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz, + &data_in_dma, GFP_ATOMIC); + if (!data_in) { + ret = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + psge = &((Mpi26MctpPassthroughRequest_t *)request)->H2DSGL; + + init_completion(&ioc->ctl_cmds.done); + + mctp_passthru_req = (Mpi26MctpPassthroughRequest_t *)request; + + _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma, + data_out_sz, data_in_dma, data_in_sz, smid); + + timeout = command->timeout; + if (timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) + timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; + + wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ); + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi26MctpPassthroughRequest_t) / 4, issue_reset); + goto issue_host_reset; + } + + mpi_reply = ioc->ctl_cmds.reply; + + /* copy out xdata to user */ + if (data_in_sz) + memcpy(command->data_in_buf_ptr, data_in, data_in_sz); + + /* copy out reply message frame to user */ + if (command->max_reply_bytes) { + sz = min_t(u32, command->max_reply_bytes, ioc->reply_sz); + memcpy(command->reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz); + } + +issue_host_reset: + if (issue_reset) { + ret = -ENODATA; + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + } + +out: + /* free memory associated with sg buffers */ + if (data_in) + dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in, + data_in_dma); + + if (data_out) + dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out, + data_out_dma); + + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + +unlock_ctl_cmds: + mutex_unlock(&ioc->ctl_cmds.mutex); + +unlock_pci_access: + mutex_unlock(&ioc->pci_access_mutex); + return ret; + +} +EXPORT_SYMBOL(mpt3sas_send_mctp_passthru_req); + +/** * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) * @file: (struct file) * @cmd: ioctl opcode @@ -4088,7 +4364,7 @@ sas_ncq_prio_supported_show(struct device *dev, { struct scsi_device *sdev = to_scsi_device(dev); - return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev)); + return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev)); } static DEVICE_ATTR_RO(sas_ncq_prio_supported); @@ -4123,7 +4399,7 @@ sas_ncq_prio_enable_store(struct device *dev, if (kstrtobool(buf, &ncq_prio_enable)) return -EINVAL; - if (!scsih_ncq_prio_supp(sdev)) + if (!sas_ata_ncq_prio_supported(sdev)) return -EINVAL; sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h index 171709e91006..483e0549c02f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h @@ -160,6 +160,9 @@ struct mpt3_ioctl_pci_info { #define MPT3_IOCTL_INTERFACE_SAS35 (0x07) #define MPT2_IOCTL_VERSION_LENGTH (32) +/* Bits set for mpt3_ioctl_iocinfo.driver_cap */ +#define MPT3_IOCTL_IOCINFO_DRIVER_CAP_MCTP_PASSTHRU 0x1 + /** * struct mpt3_ioctl_iocinfo - generic controller info * @hdr - generic header @@ -175,6 +178,7 @@ struct mpt3_ioctl_pci_info { * @driver_version - driver version - 32 ASCII characters * @rsvd1 - reserved * @scsi_id - scsi id of adapter 0 + * @driver_capability - driver capabilities * @rsvd2 - reserved * @pci_information - pci info (2nd revision) */ @@ -192,7 +196,8 @@ struct mpt3_ioctl_iocinfo { uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH]; uint8_t rsvd1; uint8_t scsi_id; - uint16_t rsvd2; + uint8_t driver_capability; + uint8_t rsvd2; struct mpt3_ioctl_pci_info pci_information; }; @@ -458,4 +463,46 @@ struct mpt3_enable_diag_sbr_reload { struct mpt3_ioctl_header hdr; }; +/** + * struct mpt3_passthru_command - generic mpt firmware passthru command + * @dev_index - device index + * @timeout - command timeout in seconds. (if zero then use driver default + * value). + * @reply_frame_buf_ptr - MPI reply location + * @data_in_buf_ptr - destination for read + * @data_out_buf_ptr - data source for write + * @max_reply_bytes - maximum number of reply bytes to be sent to app. + * @data_in_size - number bytes for data transfer in (read) + * @data_out_size - number bytes for data transfer out (write) + * @mpi_request - request frame + */ +struct mpt3_passthru_command { + u8 dev_index; + uint32_t timeout; + void *reply_frame_buf_ptr; + void *data_in_buf_ptr; + void *data_out_buf_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + Mpi26MctpPassthroughRequest_t *mpi_request; +}; + +/* + * mpt3sas_get_device_count - Retrieve the count of MCTP passthrough + * capable devices managed by the driver. + * + * Returns number of devices that support MCTP passthrough. + */ +int mpt3sas_get_device_count(void); + +/* + * mpt3sas_send_passthru_cmd - Send an MPI MCTP passthrough command to + * firmware + * @command: The MPI MCTP passthrough command to send to firmware + * + * Returns 0 on success, anything else is error . + */ +int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command); + #endif /* MPT3SAS_CTL_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index ef8ee93005ea..508861e88d9f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -53,8 +53,7 @@ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/raid_class.h> -#include <linux/blk-mq-pci.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "mpt3sas_base.h" @@ -302,8 +301,8 @@ struct _scsi_io_transfer { /** * _scsih_set_debug_level - global setting of ioc->logging_level. - * @val: ? - * @kp: ? + * @val: value of the parameter to be set + * @kp: pointer to kernel_param structure * * Note: The logging levels are defined in mpt3sas_debug.h. */ @@ -2026,14 +2025,14 @@ scsih_target_destroy(struct scsi_target *starget) } /** - * scsih_slave_alloc - device add routine + * scsih_sdev_init - device add routine * @sdev: scsi device struct * * Return: 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -scsih_slave_alloc(struct scsi_device *sdev) +scsih_sdev_init(struct scsi_device *sdev) { struct Scsi_Host *shost; struct MPT3SAS_ADAPTER *ioc; @@ -2108,11 +2107,11 @@ scsih_slave_alloc(struct scsi_device *sdev) } /** - * scsih_slave_destroy - device destroy routine + * scsih_sdev_destroy - device destroy routine * @sdev: scsi device struct */ static void -scsih_slave_destroy(struct scsi_device *sdev) +scsih_sdev_destroy(struct scsi_device *sdev) { struct MPT3SAS_TARGET *sas_target_priv_data; struct scsi_target *starget; @@ -2497,14 +2496,15 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) } /** - * scsih_slave_configure - device configure routine. + * scsih_sdev_configure - device configure routine. * @sdev: scsi device struct + * @lim: queue limits * * Return: 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -scsih_slave_configure(struct scsi_device *sdev) +scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { struct Scsi_Host *shost = sdev->host; struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); @@ -2609,8 +2609,7 @@ scsih_slave_configure(struct scsi_device *sdev) raid_device->num_pds, ds); if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { - blk_queue_max_hw_sectors(sdev->request_queue, - MPT3SAS_RAID_MAX_SECTORS); + lim->max_hw_sectors = MPT3SAS_RAID_MAX_SECTORS; sdev_printk(KERN_INFO, sdev, "Set queue's max_sector to: %u\n", MPT3SAS_RAID_MAX_SECTORS); @@ -2675,20 +2674,12 @@ scsih_slave_configure(struct scsi_device *sdev) pcie_device->connector_name); if (pcie_device->nvme_mdts) - blk_queue_max_hw_sectors(sdev->request_queue, - pcie_device->nvme_mdts/512); + lim->max_hw_sectors = pcie_device->nvme_mdts / 512; pcie_device_put(pcie_device); spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); mpt3sas_scsih_change_queue_depth(sdev, qdepth); - /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be - ** merged and can eliminate holes created during merging - ** operation. - **/ - blk_queue_flag_set(QUEUE_FLAG_NOMERGES, - sdev->request_queue); - blk_queue_virt_boundary(sdev->request_queue, - ioc->page_size - 1); + lim->virt_boundary_mask = ioc->page_size - 1; return 0; } @@ -2712,7 +2703,7 @@ scsih_slave_configure(struct scsi_device *sdev) ssp_target = 1; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SEP) { - sdev_printk(KERN_WARNING, sdev, + sdev_printk(KERN_INFO, sdev, "set ignore_delay_remove for handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); sas_device_priv_data->ignore_delay_remove = 1; @@ -11898,7 +11889,7 @@ static void scsih_map_queues(struct Scsi_Host *shost) */ map->queue_offset = qoff; if (i != HCTX_TYPE_POLL) - blk_mq_pci_map_queues(map, ioc->pdev, offset); + blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset); else blk_mq_map_queues(map); @@ -11913,10 +11904,10 @@ static const struct scsi_host_template mpt2sas_driver_template = { .proc_name = MPT2SAS_DRIVER_NAME, .queuecommand = scsih_qcmd, .target_alloc = scsih_target_alloc, - .slave_alloc = scsih_slave_alloc, - .slave_configure = scsih_slave_configure, + .sdev_init = scsih_sdev_init, + .sdev_configure = scsih_sdev_configure, .target_destroy = scsih_target_destroy, - .slave_destroy = scsih_slave_destroy, + .sdev_destroy = scsih_sdev_destroy, .scan_finished = scsih_scan_finished, .scan_start = scsih_scan_start, .change_queue_depth = scsih_change_queue_depth, @@ -11951,10 +11942,10 @@ static const struct scsi_host_template mpt3sas_driver_template = { .proc_name = MPT3SAS_DRIVER_NAME, .queuecommand = scsih_qcmd, .target_alloc = scsih_target_alloc, - .slave_alloc = scsih_slave_alloc, - .slave_configure = scsih_slave_configure, + .sdev_init = scsih_sdev_init, + .sdev_configure = scsih_sdev_configure, .target_destroy = scsih_target_destroy, - .slave_destroy = scsih_slave_destroy, + .sdev_destroy = scsih_sdev_destroy, .scan_finished = scsih_scan_finished, .scan_start = scsih_scan_start, .change_queue_depth = scsih_change_queue_depth, @@ -12309,10 +12300,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); /* event thread */ - snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), - "fw_event_%s%d", ioc->driver_name, ioc->id); ioc->firmware_event_thread = alloc_ordered_workqueue( - ioc->firmware_event_name, 0); + "fw_event_%s%d", 0, ioc->driver_name, ioc->id); if (!ioc->firmware_event_thread) { ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); @@ -12573,29 +12562,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev) return PCI_ERS_RESULT_RECOVERED; } -/** - * scsih_ncq_prio_supp - Check for NCQ command priority support - * @sdev: scsi device struct - * - * This is called when a user indicates they would like to enable - * ncq command priorities. This works only on SATA devices. - */ -bool scsih_ncq_prio_supp(struct scsi_device *sdev) -{ - struct scsi_vpd *vpd; - bool ncq_prio_supp = false; - - rcu_read_lock(); - vpd = rcu_dereference(sdev->vpd_pg89); - if (!vpd || vpd->len < 214) - goto out; - - ncq_prio_supp = (vpd->data[213] >> 4) & 1; -out: - rcu_read_unlock(); - - return ncq_prio_supp; -} /* * The pci device ids are defined in mpi/mpi2_cnfg.h. */ @@ -12744,7 +12710,7 @@ static const struct pci_device_id mpt3sas_pci_table[] = { }; MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); -static struct pci_error_handlers _mpt3sas_err_handler = { +static const struct pci_error_handlers _mpt3sas_err_handler = { .error_detected = scsih_pci_error_detected, .mmio_enabled = scsih_pci_mmio_enabled, .slot_reset = scsih_pci_slot_reset, diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 421ea511b664..dc74ebc6405a 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -328,10 +328,10 @@ struct rep_manu_reply { u8 reserved0[2]; u8 sas_format; u8 reserved2[3]; - u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; - u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; - u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN]; - u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN]; + u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring; + u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring; + u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring; + u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring; u16 component_id; u8 component_revision_id; u8 reserved3; @@ -458,17 +458,13 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, goto out; manufacture_reply = data_out + sizeof(struct rep_manu_request); - strncpy(edev->vendor_id, manufacture_reply->vendor_id, - SAS_EXPANDER_VENDOR_ID_LEN); - strncpy(edev->product_id, manufacture_reply->product_id, - SAS_EXPANDER_PRODUCT_ID_LEN); - strncpy(edev->product_rev, manufacture_reply->product_rev, - SAS_EXPANDER_PRODUCT_REV_LEN); + memtostr(edev->vendor_id, manufacture_reply->vendor_id); + memtostr(edev->product_id, manufacture_reply->product_id); + memtostr(edev->product_rev, manufacture_reply->product_rev); edev->level = manufacture_reply->sas_format & 1; if (edev->level) { - strncpy(edev->component_vendor_id, - manufacture_reply->component_vendor_id, - SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); + memtostr(edev->component_vendor_id, + manufacture_reply->component_vendor_id); tmp = (u8 *)&manufacture_reply->component_id; edev->component_id = tmp[0] << 8 | tmp[1]; edev->component_revision_id = diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c index 1d64e5056a8a..2b04f0852dec 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c +++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c @@ -42,7 +42,7 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "mpt3sas_base.h" diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c index e08a38e2a442..9b19d5205e50 100644 --- a/drivers/scsi/mvme16x_scsi.c +++ b/drivers/scsi/mvme16x_scsi.c @@ -127,7 +127,7 @@ static struct platform_driver mvme16x_scsi_driver = { .name = "mvme16x-scsi", }, .probe = mvme16x_probe, - .remove_new = mvme16x_device_remove, + .remove = mvme16x_device_remove, }; static int __init mvme16x_scsi_init(void) diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h index c25a5dfe7889..749f616b21af 100644 --- a/drivers/scsi/mvsas/mv_64xx.h +++ b/drivers/scsi/mvsas/mv_64xx.h @@ -101,8 +101,8 @@ enum sas_sata_vsp_regs { VSR_PHY_MODE9 = 0x09, /* Test */ VSR_PHY_MODE10 = 0x0A, /* Power */ VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ - VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ - VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ + VSR_PHY_VS0 = 0x0C, /* Vendor Specific 0 */ + VSR_PHY_VS1 = 0x0D, /* Vendor Specific 1 */ }; enum chip_register_bits { diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 43ebb331e216..2c72da6b8cf0 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -26,33 +26,18 @@ static const struct mvs_chip_info mvs_chips[] = { }; static const struct attribute_group *mvst_host_groups[]; +static const struct attribute_group *mvst_sdev_groups[]; #define SOC_SAS_NUM 2 static const struct scsi_host_template mvs_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .queuecommand = sas_queuecommand, - .dma_need_drain = ata_scsi_dma_need_drain, - .target_alloc = sas_target_alloc, - .slave_configure = sas_slave_configure, + LIBSAS_SHT_BASE .scan_finished = mvs_scan_finished, .scan_start = mvs_scan_start, - .change_queue_depth = sas_change_queue_depth, - .bios_param = sas_bios_param, .can_queue = 1, - .this_id = -1, .sg_tablesize = SG_ALL, - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, - .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_target_reset_handler = sas_eh_target_reset_handler, - .slave_alloc = sas_slave_alloc, - .target_destroy = sas_target_destroy, - .ioctl = sas_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = sas_ioctl, -#endif .shost_groups = mvst_host_groups, + .sdev_groups = mvst_sdev_groups, .track_queue_depth = 1, }; @@ -624,7 +609,7 @@ static void mvs_pci_remove(struct pci_dev *pdev) return; } -static struct pci_device_id mvs_pci_table[] = { +static const struct pci_device_id mvs_pci_table[] = { { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, { @@ -691,13 +676,7 @@ static struct pci_driver mvs_pci_driver = { .remove = mvs_pci_remove, }; -static ssize_t driver_version_show(struct device *cdev, - struct device_attribute *attr, char *buffer) -{ - return sysfs_emit(buffer, "%s\n", DRV_VERSION); -} - -static DEVICE_ATTR_RO(driver_version); +static DEVICE_STRING_ATTR_RO(driver_version, 0444, DRV_VERSION); static ssize_t interrupt_coalescing_store(struct device *cdev, struct device_attribute *attr, @@ -772,13 +751,18 @@ static void __exit mvs_exit(void) } static struct attribute *mvst_host_attrs[] = { - &dev_attr_driver_version.attr, + &dev_attr_driver_version.attr.attr, &dev_attr_interrupt_coalescing.attr, NULL, }; ATTRIBUTE_GROUPS(mvst_host); +static const struct attribute_group *mvst_sdev_groups[] = { + &sas_ata_sdev_attr_group, + NULL +}; + module_init(mvs_init); module_exit(mvs_exit); diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 1444b1f1c4c8..52ac10226cb0 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -151,16 +151,6 @@ static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); } -void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) -{ - u32 no; - for_each_phy(phy_mask, phy_mask, no) { - if (!(phy_mask & 1)) - continue; - MVS_CHIP_DISP->phy_reset(mvi, no, hard); - } -} - int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) { @@ -986,7 +976,7 @@ static u32 mvs_is_sig_fis_received(u32 irq_status) static void mvs_sig_remove_timer(struct mvs_phy *phy) { if (phy->timer.function) - del_timer(&phy->timer); + timer_delete(&phy->timer); phy->timer.function = NULL; } diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 68df771e2975..09ce3f2241f2 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -23,7 +23,7 @@ #include <linux/irq.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/libsas.h> #include <scsi/scsi.h> #include <scsi/scsi_tcq.h> @@ -425,7 +425,6 @@ struct mvs_task_exec_info { void mvs_get_sas_addr(void *buf, u32 buflen); void mvs_iounmap(void __iomem *regs); int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex); -void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard); int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata); void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo, diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index d9d366ec17dc..96549e7f5705 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -2000,7 +2000,8 @@ static struct mvumi_instance_template mvumi_instance_9580 = { .reset_host = mvumi_reset_host_9580, }; -static int mvumi_slave_configure(struct scsi_device *sdev) +static int mvumi_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct mvumi_hba *mhba; unsigned char bitcount = sizeof(unsigned char) * 8; @@ -2172,7 +2173,7 @@ static const struct scsi_host_template mvumi_template = { .module = THIS_MODULE, .name = "Marvell Storage Controller", - .slave_configure = mvumi_slave_configure, + .sdev_configure = mvumi_sdev_configure, .queuecommand = mvumi_queue_command, .eh_timed_out = mvumi_timed_out, .eh_host_reset_handler = mvumi_host_reset, diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index f684eb5e0489..486db5b2f05d 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -16,7 +16,7 @@ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/raid_class.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> @@ -112,9 +112,8 @@ static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb) return false; } - snprintf(cb->work_q_name, sizeof(cb->work_q_name), - "myrb_wq_%d", cb->host->host_no); - cb->work_q = create_singlethread_workqueue(cb->work_q_name); + cb->work_q = alloc_ordered_workqueue("myrb_wq_%d", WQ_MEM_RECLAIM, + cb->host->host_no); if (!cb->work_q) { dma_pool_destroy(cb->dcdb_pool); cb->dcdb_pool = NULL; @@ -892,7 +891,7 @@ static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn) status = mmio_init_fn(pdev, base, &mbox); if (status != MYRB_STATUS_SUCCESS) { dev_err(&pdev->dev, - "Failed to enable mailbox, statux %02X\n", + "Failed to enable mailbox, status %02X\n", status); return false; } @@ -1620,7 +1619,7 @@ static int myrb_queuecommand(struct Scsi_Host *shost, return myrb_pthru_queuecommand(shost, scmd); } -static int myrb_ldev_slave_alloc(struct scsi_device *sdev) +static int myrb_ldev_sdev_init(struct scsi_device *sdev) { struct myrb_hba *cb = shost_priv(sdev->host); struct myrb_ldev_info *ldev_info; @@ -1628,8 +1627,6 @@ static int myrb_ldev_slave_alloc(struct scsi_device *sdev) enum raid_level level; ldev_info = cb->ldev_info_buf + ldev_num; - if (!ldev_info) - return -ENXIO; sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL); if (!sdev->hostdata) @@ -1666,7 +1663,7 @@ static int myrb_ldev_slave_alloc(struct scsi_device *sdev) return 0; } -static int myrb_pdev_slave_alloc(struct scsi_device *sdev) +static int myrb_pdev_sdev_init(struct scsi_device *sdev) { struct myrb_hba *cb = shost_priv(sdev->host); struct myrb_pdev_state *pdev_info; @@ -1702,7 +1699,7 @@ static int myrb_pdev_slave_alloc(struct scsi_device *sdev) return 0; } -static int myrb_slave_alloc(struct scsi_device *sdev) +static int myrb_sdev_init(struct scsi_device *sdev) { if (sdev->channel > myrb_logical_channel(sdev->host)) return -ENXIO; @@ -1711,12 +1708,13 @@ static int myrb_slave_alloc(struct scsi_device *sdev) return -ENXIO; if (sdev->channel == myrb_logical_channel(sdev->host)) - return myrb_ldev_slave_alloc(sdev); + return myrb_ldev_sdev_init(sdev); - return myrb_pdev_slave_alloc(sdev); + return myrb_pdev_sdev_init(sdev); } -static int myrb_slave_configure(struct scsi_device *sdev) +static int myrb_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct myrb_ldev_info *ldev_info; @@ -1742,7 +1740,7 @@ static int myrb_slave_configure(struct scsi_device *sdev) return 0; } -static void myrb_slave_destroy(struct scsi_device *sdev) +static void myrb_sdev_destroy(struct scsi_device *sdev) { kfree(sdev->hostdata); } @@ -2209,9 +2207,9 @@ static const struct scsi_host_template myrb_template = { .proc_name = "myrb", .queuecommand = myrb_queuecommand, .eh_host_reset_handler = myrb_host_reset, - .slave_alloc = myrb_slave_alloc, - .slave_configure = myrb_slave_configure, - .slave_destroy = myrb_slave_destroy, + .sdev_init = myrb_sdev_init, + .sdev_configure = myrb_sdev_configure, + .sdev_destroy = myrb_sdev_destroy, .bios_param = myrb_biosparam, .cmd_size = sizeof(struct myrb_cmdblk), .shost_groups = myrb_shost_groups, diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h index fb8eacfceee8..78dc4136fb10 100644 --- a/drivers/scsi/myrb.h +++ b/drivers/scsi/myrb.h @@ -712,7 +712,6 @@ struct myrb_hba { struct Scsi_Host *host; struct workqueue_struct *work_q; - char work_q_name[20]; struct delayed_work monitor_work; unsigned long primary_monitor_time; unsigned long secondary_monitor_time; diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index e824be9d9bbb..95af3bb03834 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -17,7 +17,7 @@ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/raid_class.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> @@ -1786,7 +1786,7 @@ static unsigned short myrs_translate_ldev(struct myrs_hba *cs, return ldev_num; } -static int myrs_slave_alloc(struct scsi_device *sdev) +static int myrs_sdev_init(struct scsi_device *sdev) { struct myrs_hba *cs = shost_priv(sdev->host); unsigned char status; @@ -1882,7 +1882,8 @@ static int myrs_slave_alloc(struct scsi_device *sdev) return 0; } -static int myrs_slave_configure(struct scsi_device *sdev) +static int myrs_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct myrs_hba *cs = shost_priv(sdev->host); struct myrs_ldev_info *ldev_info; @@ -1910,7 +1911,7 @@ static int myrs_slave_configure(struct scsi_device *sdev) return 0; } -static void myrs_slave_destroy(struct scsi_device *sdev) +static void myrs_sdev_destroy(struct scsi_device *sdev) { kfree(sdev->hostdata); } @@ -1921,9 +1922,9 @@ static const struct scsi_host_template myrs_template = { .proc_name = "myrs", .queuecommand = myrs_queuecommand, .eh_host_reset_handler = myrs_host_reset, - .slave_alloc = myrs_slave_alloc, - .slave_configure = myrs_slave_configure, - .slave_destroy = myrs_slave_destroy, + .sdev_init = myrs_sdev_init, + .sdev_configure = myrs_sdev_configure, + .sdev_destroy = myrs_sdev_destroy, .cmd_size = sizeof(struct myrs_cmdblk), .shost_groups = myrs_shost_groups, .sdev_groups = myrs_sdev_groups, @@ -2206,9 +2207,8 @@ static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs) return false; } - snprintf(cs->work_q_name, sizeof(cs->work_q_name), - "myrs_wq_%d", shost->host_no); - cs->work_q = create_singlethread_workqueue(cs->work_q_name); + cs->work_q = alloc_ordered_workqueue("myrs_wq_%d", WQ_MEM_RECLAIM, + shost->host_no); if (!cs->work_q) { dma_pool_destroy(cs->dcdb_pool); cs->dcdb_pool = NULL; diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h index 9f6696d0ddd5..e1d6b123de7b 100644 --- a/drivers/scsi/myrs.h +++ b/drivers/scsi/myrs.h @@ -904,7 +904,6 @@ struct myrs_hba { bool disable_enc_msg; struct workqueue_struct *work_q; - char work_q_name[20]; struct delayed_work monitor_work; unsigned long primary_monitor_time; unsigned long secondary_monitor_time; diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index 35869b4f9329..14ac81ec0aa0 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -7786,7 +7786,7 @@ static void __init ncr_getclock (struct ncb *np, int mult) /*===================== LINUX ENTRY POINTS SECTION ==========================*/ -static int ncr53c8xx_slave_alloc(struct scsi_device *device) +static int ncr53c8xx_sdev_init(struct scsi_device *device) { struct Scsi_Host *host = device->host; struct ncb *np = ((struct host_data *) host->hostdata)->ncb; @@ -7796,7 +7796,8 @@ static int ncr53c8xx_slave_alloc(struct scsi_device *device) return 0; } -static int ncr53c8xx_slave_configure(struct scsi_device *device) +static int ncr53c8xx_sdev_configure(struct scsi_device *device, + struct queue_limits *lim) { struct Scsi_Host *host = device->host; struct ncb *np = ((struct host_data *) host->hostdata)->ncb; @@ -8093,8 +8094,8 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt, tpnt->shost_groups = ncr53c8xx_host_groups; tpnt->queuecommand = ncr53c8xx_queue_command; - tpnt->slave_configure = ncr53c8xx_slave_configure; - tpnt->slave_alloc = ncr53c8xx_slave_alloc; + tpnt->sdev_configure = ncr53c8xx_sdev_configure; + tpnt->sdev_init = ncr53c8xx_sdev_init; tpnt->eh_bus_reset_handler = ncr53c8xx_bus_reset; tpnt->can_queue = SCSI_NCR_CAN_QUEUE; tpnt->this_id = 7; diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index b7987019686e..abc4ce9eae74 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c @@ -66,7 +66,7 @@ static const char *nsp32_release_version = "1.2"; /**************************************************************************** * Supported hardware */ -static struct pci_device_id nsp32_pci_table[] = { +static const struct pci_device_id nsp32_pci_table[] = { { .vendor = PCI_VENDOR_ID_IODATA, .device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II, diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c index 6a6621728c69..1b54ba51a485 100644 --- a/drivers/scsi/pcmcia/aha152x_stub.c +++ b/drivers/scsi/pcmcia/aha152x_stub.c @@ -75,6 +75,7 @@ module_param(synchronous, int, 0); module_param(reset_delay, int, 0); module_param(ext_trans, int, 0); +MODULE_DESCRIPTION("Adaptec AHA152X-compatible PCMCIA SCSI card driver"); MODULE_LICENSE("Dual MPL/GPL"); /*====================================================================*/ diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 7b27618fd7b2..7618f9cc9986 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -644,7 +644,7 @@ static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL); #define FLASH_CMD_SET_NVMD 0x02 struct flash_command { - u8 command[8]; + u8 command[8] __nonstring; int code; }; @@ -1039,3 +1039,8 @@ const struct attribute_group *pm8001_host_groups[] = { &pm8001_host_attr_group, NULL }; + +const struct attribute_group *pm8001_sdev_groups[] = { + &sas_ata_sdev_attr_group, + NULL +}; diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h index 501b574239e8..4e19d61dffbb 100644 --- a/drivers/scsi/pm8001/pm8001_defs.h +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -90,10 +90,13 @@ enum port_type { #define PM8001_MAX_PORTS 16 /* max. possible ports */ #define PM8001_MAX_DEVICES 2048 /* max supported device */ #define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */ -#define PM8001_RESERVE_SLOT 8 +#define PM8001_RESERVE_SLOT 128 -#define CONFIG_SCSI_PM8001_MAX_DMA_SG 528 -#define PM8001_MAX_DMA_SG CONFIG_SCSI_PM8001_MAX_DMA_SG +#define PM8001_SECTOR_SIZE 512 +#define PM8001_PAGE_SIZE_4K 4096 +#define PM8001_MAX_IO_SIZE (4 * 1024 * 1024) +#define PM8001_MAX_DMA_SG (PM8001_MAX_IO_SIZE / PM8001_PAGE_SIZE_4K) +#define PM8001_MAX_SECTORS (PM8001_MAX_IO_SIZE / PM8001_SECTOR_SIZE) enum memory_region_num { AAP1 = 0x0, /* application acceleration processor */ diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index dec1e2d380f1..42a4eeac24c9 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -3472,12 +3472,13 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) status, tag, scp); switch (status) { case IO_SUCCESS: - pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n"); + pm8001_dbg(pm8001_ha, FAIL, "ABORT IO_SUCCESS for tag %#x\n", + tag); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_SAM_STAT_GOOD; break; case IO_NOT_VALID: - pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n"); + pm8001_dbg(pm8001_ha, FAIL, "IO_NOT_VALID for tag %#x\n", tag); ts->resp = TMF_RESP_FUNC_FAILED; break; } diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index ed6b7d954dda..599410bcdfea 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -68,6 +68,10 @@ static bool pm8001_read_wwn = true; module_param_named(read_wwn, pm8001_read_wwn, bool, 0444); MODULE_PARM_DESC(zoned, "Get WWN from the controller. Default: true"); +uint pcs_event_log_severity = 0x03; +module_param(pcs_event_log_severity, int, 0644); +MODULE_PARM_DESC(pcs_event_log_severity, "PCS event log severity level"); + static struct scsi_transport_template *pm8001_stt; static int pm8001_init_ccb_tag(struct pm8001_hba_info *); @@ -100,40 +104,26 @@ static void pm8001_map_queues(struct Scsi_Host *shost) struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; - if (pm8001_ha->number_of_intr > 1) - blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1); + if (pm8001_ha->number_of_intr > 1) { + blk_mq_map_hw_queues(qmap, &pm8001_ha->pdev->dev, 1); + return; + } - return blk_mq_map_queues(qmap); + blk_mq_map_queues(qmap); } /* * The main structure which LLDD must register for scsi core. */ static const struct scsi_host_template pm8001_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .proc_name = DRV_NAME, - .queuecommand = sas_queuecommand, - .dma_need_drain = ata_scsi_dma_need_drain, - .target_alloc = sas_target_alloc, - .slave_configure = sas_slave_configure, + LIBSAS_SHT_BASE .scan_finished = pm8001_scan_finished, .scan_start = pm8001_scan_start, - .change_queue_depth = sas_change_queue_depth, - .bios_param = sas_bios_param, .can_queue = 1, - .this_id = -1, .sg_tablesize = PM8001_MAX_DMA_SG, - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, - .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_target_reset_handler = sas_eh_target_reset_handler, - .slave_alloc = sas_slave_alloc, - .target_destroy = sas_target_destroy, - .ioctl = sas_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = sas_ioctl, -#endif + .max_sectors = PM8001_MAX_SECTORS, .shost_groups = pm8001_host_groups, + .sdev_groups = pm8001_sdev_groups, .track_queue_depth = 1, .cmd_per_lun = 32, .map_queues = pm8001_map_queues, @@ -462,9 +452,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, } for (i = 0; i < PM8001_MAX_DEVICES; i++) { pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED; - pm8001_ha->devices[i].id = i; - pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; - atomic_set(&pm8001_ha->devices[i].running_req, 0); } pm8001_ha->flags = PM8001F_INIT_TIME; return 0; @@ -749,7 +736,7 @@ static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) return -EIO; } time_remaining = wait_for_completion_timeout(&completion, - msecs_to_jiffies(60*1000)); // 1 min + secs_to_jiffies(60)); // 1 min if (!time_remaining) { kfree(payload.func_specific); pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n"); @@ -1448,7 +1435,7 @@ err_out_disable: /* update of pci device, vendor id and driver data with * unique value for each of the controller */ -static struct pci_device_id pm8001_pci_table[] = { +static const struct pci_device_id pm8001_pci_table[] = { { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 }, { PCI_VDEVICE(PMC_Sierra, 0x8006), chip_8006 }, { PCI_VDEVICE(ADAPTEC2, 0x8006), chip_8006 }, diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index a5a31dfa4512..f7067878b34f 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -101,6 +101,63 @@ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) return 0; } +static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op, + int *ata_tag, bool *task_aborted) +{ + unsigned long flags; + struct ata_queued_cmd *qc = NULL; + + *ata_op = 0; + *ata_tag = -1; + *task_aborted = false; + + if (!task) + return; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) + *task_aborted = true; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + if (task->task_proto == SAS_PROTOCOL_STP) { + // sas_ata_qc_issue path uses SAS_PROTOCOL_STP. + // This only works for scsi + libsas + libata users. + qc = task->uldd_task; + if (qc) { + *ata_op = qc->tf.command; + *ata_tag = qc->tag; + } + } +} + +void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *target_pm8001_dev) +{ + int i = 0, ata_op = 0, ata_tag = -1; + struct pm8001_ccb_info *ccb = NULL; + struct sas_task *task = NULL; + struct pm8001_device *pm8001_dev = NULL; + bool task_aborted; + + for (i = 0; i < pm8001_ha->ccb_count; i++) { + ccb = &pm8001_ha->ccb_info[i]; + if (ccb->ccb_tag == PM8001_INVALID_TAG) + continue; + pm8001_dev = ccb->device; + if (target_pm8001_dev && pm8001_dev && + target_pm8001_dev != pm8001_dev) + continue; + task = ccb->task; + pm80xx_get_tag_opcodes(task, &ata_op, &ata_tag, &task_aborted); + pm8001_dbg(pm8001_ha, FAIL, + "tag %#x, device %#x task %p task aborted %d ata opcode %#x ata tag %d\n", + ccb->ccb_tag, + (pm8001_dev ? pm8001_dev->device_id : 0), + task, task_aborted, + ata_op, ata_tag); + } +} + /** * pm8001_mem_alloc - allocate memory for pm8001. * @pdev: pci device. @@ -166,7 +223,6 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, unsigned long flags; pm8001_ha = sas_phy->ha->lldd_ha; phy = &pm8001_ha->phy[phy_id]; - pm8001_ha->phy[phy_id].enable_completion = &completion; if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { /* @@ -190,6 +246,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, rates->maximum_linkrate; } if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { + pm8001_ha->phy[phy_id].enable_completion = &completion; PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); wait_for_completion(&completion); } @@ -198,6 +255,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_HARD_RESET: if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { + pm8001_ha->phy[phy_id].enable_completion = &completion; PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); wait_for_completion(&completion); } @@ -206,6 +264,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_LINK_RESET: if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { + pm8001_ha->phy[phy_id].enable_completion = &completion; PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); wait_for_completion(&completion); } @@ -372,23 +431,6 @@ static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); } - /* Find the local port id that's attached to this device */ -static int sas_find_local_port_id(struct domain_device *dev) -{ - struct domain_device *pdev = dev->parent; - - /* Directly attached device */ - if (!pdev) - return dev->port->id; - while (pdev) { - struct domain_device *pdev_p = pdev->parent; - if (!pdev_p) - return pdev->port->id; - pdev = pdev->parent; - } - return 0; -} - #define DEV_IS_GONE(pm8001_dev) \ ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) @@ -461,10 +503,10 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags) spin_lock_irqsave(&pm8001_ha->lock, flags); pm8001_dev = dev->lldd_dev; - port = &pm8001_ha->port[sas_find_local_port_id(dev)]; + port = pm8001_ha->phy[pm8001_dev->attached_phy].port; if (!internal_abort && - (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) { + (DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) { ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; if (sas_protocol_ata(task_proto)) { @@ -570,6 +612,13 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, pm8001_ccb_free(pm8001_ha, ccb); } +static void pm8001_init_dev(struct pm8001_device *pm8001_dev, int id) +{ + pm8001_dev->id = id; + pm8001_dev->device_id = PM8001_MAX_DEVICES; + atomic_set(&pm8001_dev->running_req, 0); +} + /** * pm8001_alloc_dev - find a empty pm8001_device * @pm8001_ha: our hba card information @@ -578,9 +627,11 @@ static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { - if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { - pm8001_ha->devices[dev].id = dev; - return &pm8001_ha->devices[dev]; + struct pm8001_device *pm8001_dev = &pm8001_ha->devices[dev]; + + if (pm8001_dev->dev_type == SAS_PHY_UNUSED) { + pm8001_init_dev(pm8001_dev, dev); + return pm8001_dev; } } if (dev == PM8001_MAX_DEVICES) { @@ -611,9 +662,7 @@ struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, void pm8001_free_dev(struct pm8001_device *pm8001_dev) { - u32 id = pm8001_dev->id; memset(pm8001_dev, 0, sizeof(*pm8001_dev)); - pm8001_dev->id = id; pm8001_dev->dev_type = SAS_PHY_UNUSED; pm8001_dev->device_id = PM8001_MAX_DEVICES; pm8001_dev->sas_device = NULL; @@ -717,6 +766,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev) spin_lock_irqsave(&pm8001_ha->lock, flags); } PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); + pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0; pm8001_free_dev(pm8001_dev); } else { pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n"); diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 3ccb7371902f..315f6a7523f0 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -56,7 +56,6 @@ #include <scsi/sas_ata.h> #include <linux/atomic.h> #include <linux/blk-mq.h> -#include <linux/blk-mq-pci.h> #include "pm8001_defs.h" #define DRV_NAME "pm80xx" @@ -96,6 +95,8 @@ extern struct list_head hba_list; extern const struct pm8001_dispatch pm8001_8001_dispatch; extern const struct pm8001_dispatch pm8001_80xx_dispatch; +extern uint pcs_event_log_severity; + struct pm8001_hba_info; struct pm8001_ccb_info; struct pm8001_device; @@ -717,6 +718,7 @@ int pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha); void pm8001_free_dev(struct pm8001_device *pm8001_dev); /* ctl shared API */ extern const struct attribute_group *pm8001_host_groups[]; +extern const struct attribute_group *pm8001_sdev_groups[]; #define PM8001_INVALID_TAG ((u32)-1) @@ -784,6 +786,8 @@ static inline void pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha, } void pm8001_setds_completion(struct domain_device *dev); void pm8001_tmf_aborted(struct sas_task *task); +void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *dev); #endif diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index a52ae6841939..5b373c53c036 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -568,13 +568,13 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version = pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION); - pm8001_dbg(pm8001_ha, DEV, + pm8001_dbg(pm8001_ha, INIT, "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n", pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature, pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev, pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev); - pm8001_dbg(pm8001_ha, DEV, + pm8001_dbg(pm8001_ha, INIT, "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n", pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset, pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset, @@ -582,7 +582,7 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset, pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset); - pm8001_dbg(pm8001_ha, DEV, + pm8001_dbg(pm8001_ha, INIT, "Main cfg table; ila rev:%x Inactive fw rev:%x\n", pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version, pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version); @@ -763,7 +763,8 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[IOP].phys_addr_lo; pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = + pcs_event_log_severity; pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; /* Enable higher IQs and OQs, 32 to 63, bit 16 */ @@ -2037,7 +2038,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) atomic_dec(&pm8001_dev->running_req); break; } - pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n ", + pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n", psspPayload->ssp_resp_iu.status); spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; @@ -2245,7 +2246,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, u32 param; u32 status; u32 tag; - int i, j; + int i, j, ata_tag = -1; u8 sata_addr_low[4]; u32 temp_sata_addr_low, temp_sata_addr_hi; u8 sata_addr_hi[4]; @@ -2255,6 +2256,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, u32 *sata_resp; struct pm8001_device *pm8001_dev; unsigned long flags; + struct ata_queued_cmd *qc; psataPayload = (struct sata_completion_resp *)(piomb + 4); status = le32_to_cpu(psataPayload->status); @@ -2266,8 +2268,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, pm8001_dev = ccb->device; if (t) { - if (t->dev && (t->dev->lldd_dev)) + if (t->dev && (t->dev->lldd_dev)) { pm8001_dev = t->dev->lldd_dev; + qc = t->uldd_task; + ata_tag = qc ? qc->tag : -1; + } } else { pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", ccb->ccb_tag); @@ -2275,16 +2280,14 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, return; } - if (pm8001_dev && unlikely(!t->lldd_task || !t->dev)) return; ts = &t->task_status; - if (status != IO_SUCCESS) { pm8001_dbg(pm8001_ha, FAIL, - "IO failed device_id %u status 0x%x tag %d\n", - pm8001_dev->device_id, status, tag); + "IO failed status %#x pm80xx tag %#x ata tag %d\n", + status, tag, ata_tag); } /* Print sas address of IO failed device */ @@ -2666,13 +2669,19 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, /* Check if this is NCQ error */ if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* tag value is invalid with this event */ + pm8001_dbg(pm8001_ha, FAIL, "NCQ ERROR for device %#x tag %#x\n", + dev_id, tag); + /* find device using device id */ pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); /* send read log extension by aborting the link - libata does what we want */ - if (pm8001_dev) + if (pm8001_dev) { + pm80xx_show_pending_commands(pm8001_ha, pm8001_dev); pm8001_handle_event(pm8001_ha, pm8001_dev, IO_XFER_ERROR_ABORTED_NCQ_MODE); + } return; } @@ -3335,10 +3344,11 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 phy_id = le32_to_cpu(pPayload->phyid) & 0xFF; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + u32 tag = le32_to_cpu(pPayload->tag); pm8001_dbg(pm8001_ha, INIT, - "phy start resp status:0x%x, phyid:0x%x\n", - status, phy_id); + "phy start resp status:0x%x, phyid:0x%x, tag 0x%x\n", + status, phy_id, tag); if (status == 0) phy->phy_state = PHY_LINK_DOWN; @@ -3347,6 +3357,8 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) complete(phy->enable_completion); phy->enable_completion = NULL; } + + pm8001_tag_free(pm8001_ha, tag); return 0; } @@ -3627,8 +3639,10 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 phyid = le32_to_cpu(pPayload->phyid) & 0xFF; struct pm8001_phy *phy = &pm8001_ha->phy[phyid]; - pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n", - phyid, status); + u32 tag = le32_to_cpu(pPayload->tag); + + pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x tag 0x%x\n", phyid, + status, tag); if (status == PHY_STOP_SUCCESS || status == PHY_STOP_ERR_DEVICE_ATTACHED) { phy->phy_state = PHY_LINK_DISABLE; @@ -3636,6 +3650,7 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) phy->sas_phy.linkrate = SAS_PHY_DISABLED; } + pm8001_tag_free(pm8001_ha, tag); return 0; } @@ -3654,10 +3669,9 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha, u32 tag = le32_to_cpu(pPayload->tag); pm8001_dbg(pm8001_ha, MSG, - "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n", - status, err_qlfr_pgcd); + "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x tag 0x%x\n", + status, err_qlfr_pgcd, tag); pm8001_tag_free(pm8001_ha, tag); - return 0; } @@ -4631,9 +4645,16 @@ static int pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) { struct phy_start_req payload; - u32 tag = 0x01; + int ret; + u32 tag; u32 opcode = OPC_INB_PHYSTART; + ret = pm8001_tag_alloc(pm8001_ha, &tag); + if (ret) { + pm8001_dbg(pm8001_ha, FAIL, "Tag allocation failed\n"); + return ret; + } + memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); @@ -4669,9 +4690,16 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) { struct phy_stop_req payload; - u32 tag = 0x01; + int ret; + u32 tag; u32 opcode = OPC_INB_PHYSTOP; + ret = pm8001_tag_alloc(pm8001_ha, &tag); + if (ret) { + pm8001_dbg(pm8001_ha, FAIL, "Tag allocation failed\n"); + return ret; + } + memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); payload.phy_id = cpu_to_le32(phy_id); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 0614b7e366b7..e0aeb206df8d 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -113,7 +113,7 @@ static struct pmcraid_chip_details pmcraid_chip_cfg[] = { /* * PCI device ids supported by pmcraid driver */ -static struct pci_device_id pmcraid_pci_table[] = { +static const struct pci_device_id pmcraid_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID), 0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0] }, @@ -125,7 +125,7 @@ MODULE_DEVICE_TABLE(pci, pmcraid_pci_table); /** - * pmcraid_slave_alloc - Prepare for commands to a device + * pmcraid_sdev_init - Prepare for commands to a device * @scsi_dev: scsi device struct * * This function is called by mid-layer prior to sending any command to the new @@ -136,7 +136,7 @@ MODULE_DEVICE_TABLE(pci, pmcraid_pci_table); * Return value: * 0 on success / -ENXIO if device does not exist */ -static int pmcraid_slave_alloc(struct scsi_device *scsi_dev) +static int pmcraid_sdev_init(struct scsi_device *scsi_dev) { struct pmcraid_resource_entry *temp, *res = NULL; struct pmcraid_instance *pinstance; @@ -197,8 +197,9 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev) } /** - * pmcraid_slave_configure - Configures a SCSI device + * pmcraid_sdev_configure - Configures a SCSI device * @scsi_dev: scsi device struct + * @lim: queue limits * * This function is executed by SCSI mid layer just after a device is first * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the @@ -209,7 +210,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev) * Return value: * 0 on success */ -static int pmcraid_slave_configure(struct scsi_device *scsi_dev) +static int pmcraid_sdev_configure(struct scsi_device *scsi_dev, + struct queue_limits *lim) { struct pmcraid_resource_entry *res = scsi_dev->hostdata; @@ -233,8 +235,7 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev) scsi_dev->allow_restart = 1; blk_queue_rq_timeout(scsi_dev->request_queue, PMCRAID_VSET_IO_TIMEOUT); - blk_queue_max_hw_sectors(scsi_dev->request_queue, - PMCRAID_VSET_MAX_SECTORS); + lim->max_hw_sectors = PMCRAID_VSET_MAX_SECTORS; } /* @@ -247,17 +248,17 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev) } /** - * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it + * pmcraid_sdev_destroy - Unconfigure a SCSI device before removing it * * @scsi_dev: scsi device struct * * This is called by mid-layer before removing a device. Pointer assignments - * done in pmcraid_slave_alloc will be reset to NULL here. + * done in pmcraid_sdev_init will be reset to NULL here. * * Return value * none */ -static void pmcraid_slave_destroy(struct scsi_device *scsi_dev) +static void pmcraid_sdev_destroy(struct scsi_device *scsi_dev) { struct pmcraid_resource_entry *res; @@ -494,7 +495,7 @@ static void pmcraid_clr_trans_op( } if (pinstance->reset_cmd != NULL) { - del_timer(&pinstance->reset_cmd->timer); + timer_delete(&pinstance->reset_cmd->timer); spin_lock_irqsave( pinstance->host->host_lock, lock_flags); pinstance->reset_cmd->cmd_done(pinstance->reset_cmd); @@ -1945,7 +1946,7 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd) } iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg); - ioread32(pinstance->int_regs.host_ioa_interrupt_reg), + ioread32(pinstance->int_regs.host_ioa_interrupt_reg); int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); pmcraid_info("Waiting for IOA to become operational %x:%x\n", @@ -1998,7 +1999,7 @@ static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance) cpu_to_le32(PMCRAID_DRIVER_ILID); /* In case the command timer is still running */ - del_timer(&cmd->timer); + timer_delete(&cmd->timer); /* If this is an IO command, complete it by invoking scsi_done * function. If this is one of the internal commands other @@ -3667,9 +3668,9 @@ static const struct scsi_host_template pmcraid_host_template = { .eh_device_reset_handler = pmcraid_eh_device_reset_handler, .eh_host_reset_handler = pmcraid_eh_host_reset_handler, - .slave_alloc = pmcraid_slave_alloc, - .slave_configure = pmcraid_slave_configure, - .slave_destroy = pmcraid_slave_destroy, + .sdev_init = pmcraid_sdev_init, + .sdev_configure = pmcraid_sdev_configure, + .sdev_destroy = pmcraid_sdev_destroy, .change_queue_depth = pmcraid_change_queue_depth, .can_queue = PMCRAID_MAX_IO_CMD, .this_id = -1, @@ -3981,7 +3982,7 @@ static void pmcraid_tasklet_function(unsigned long instance) list_del(&cmd->free_list); spin_unlock_irqrestore(&pinstance->pending_pool_lock, pending_lock_flags); - del_timer(&cmd->timer); + timer_delete(&cmd->timer); atomic_dec(&pinstance->outstanding_cmds); if (cmd->cmd_done == pmcraid_ioa_reset) { @@ -4008,7 +4009,7 @@ static void pmcraid_tasklet_function(unsigned long instance) * This routine un-registers registered interrupt handler and * also frees irqs/vectors. * - * Retun Value + * Return Value * None */ static @@ -4035,7 +4036,7 @@ static int pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) { struct pci_dev *pdev = pinstance->pdev; - unsigned int irq_flag = PCI_IRQ_LEGACY, flag; + unsigned int irq_flag = PCI_IRQ_INTX, flag; int num_hrrq, rc, i; irq_handler_t isr; diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index d592ee9170c1..1ed3171f1797 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -986,12 +986,6 @@ second_pass: return -ENODEV; } -static int ppa_adjust_queue(struct scsi_device *device) -{ - blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH); - return 0; -} - static const struct scsi_host_template ppa_template = { .module = THIS_MODULE, .proc_name = "ppa", @@ -1005,7 +999,6 @@ static const struct scsi_host_template ppa_template = { .this_id = -1, .sg_tablesize = SG_ALL, .can_queue = 1, - .slave_alloc = ppa_adjust_queue, .cmd_size = sizeof(struct scsi_pointer), }; @@ -1157,8 +1150,8 @@ static struct parport_driver ppa_driver = { .name = "ppa", .match_port = ppa_attach, .detach = ppa_detach, - .devmodel = true, }; module_parport_driver(ppa_driver); +MODULE_DESCRIPTION("IOMEGA PPA3 parallel port SCSI host adapter driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index 90495a832f34..92fe5c5c5bb0 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c @@ -61,7 +61,8 @@ enum lv1_atapi_in_out { }; -static int ps3rom_slave_configure(struct scsi_device *scsi_dev) +static int ps3rom_sdev_configure(struct scsi_device *scsi_dev, + struct queue_limits *lim) { struct ps3rom_private *priv = shost_priv(scsi_dev->host); struct ps3_storage_device *dev = priv->dev; @@ -325,7 +326,7 @@ done: static const struct scsi_host_template ps3rom_host_template = { .name = DEVICE_NAME, - .slave_configure = ps3rom_slave_configure, + .sdev_configure = ps3rom_sdev_configure, .queuecommand = ps3rom_queuecommand, .can_queue = 1, .this_id = 7, diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index 5058e01b65a2..98afdfe63600 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -363,6 +363,7 @@ struct qedf_ctx { #define QEDF_IN_RECOVERY 5 #define QEDF_DBG_STOP_IO 6 #define QEDF_PROBING 8 +#define QEDF_STAG_IN_PROGRESS 9 unsigned long flags; /* Miscellaneous state flags */ int fipvlan_retries; u8 num_queues; diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c index 8d8c760eee43..769da92ee20d 100644 --- a/drivers/scsi/qedf/qedf_attr.c +++ b/drivers/scsi/qedf/qedf_attr.c @@ -104,7 +104,7 @@ void qedf_capture_grc_dump(struct qedf_ctx *qedf) static ssize_t qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj, - struct bin_attribute *ba, char *buf, loff_t off, + const struct bin_attribute *ba, char *buf, loff_t off, size_t count) { ssize_t ret = 0; @@ -124,7 +124,7 @@ qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj, static ssize_t qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj, - struct bin_attribute *ba, char *buf, loff_t off, + const struct bin_attribute *ba, char *buf, loff_t off, size_t count) { struct fc_lport *lport = NULL; @@ -160,14 +160,14 @@ qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj, return count; } -static struct bin_attribute sysfs_grcdump_attr = { +static const struct bin_attribute sysfs_grcdump_attr = { .attr = { .name = "grcdump", .mode = S_IRUSR | S_IWUSR, }, .size = 0, - .read = qedf_sysfs_read_grcdump, - .write = qedf_sysfs_write_grcdump, + .read_new = qedf_sysfs_read_grcdump, + .write_new = qedf_sysfs_write_grcdump, }; static struct sysfs_bin_attrs bin_file_entries[] = { diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h index 5ec2b817c694..eeb6c841dacb 100644 --- a/drivers/scsi/qedf/qedf_dbg.h +++ b/drivers/scsi/qedf/qedf_dbg.h @@ -100,7 +100,7 @@ struct Scsi_Host; struct sysfs_bin_attrs { char *name; - struct bin_attribute *attr; + const struct bin_attribute *attr; }; extern int qedf_alloc_grc_dump_buf(uint8_t **buf, uint32_t len); diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c index 451fd236bfd0..96174353e389 100644 --- a/drivers/scsi/qedf/qedf_debugfs.c +++ b/drivers/scsi/qedf/qedf_debugfs.c @@ -170,7 +170,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer, if (!count || *ppos) return 0; - kern_buf = memdup_user(buffer, count); + kern_buf = memdup_user_nul(buffer, count); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index bf921caaf6ae..fcfc3bed02c6 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -310,7 +310,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) if (!free_sqes) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, - "Returning NULL, free_sqes=%d.\n ", + "Returning NULL, free_sqes=%d.\n", free_sqes); goto out_failed; } @@ -2324,9 +2324,6 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, u64 tm_lun, io_req->fcport = fcport; io_req->cmd_type = QEDF_TASK_MGMT_CMD; - /* Record which cpu this request is associated with */ - io_req->cpu = smp_processor_id(); - /* Set TM flags */ io_req->io_req_flags = QEDF_READ; io_req->data_xfer_len = 0; @@ -2349,6 +2346,9 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, u64 tm_lun, spin_lock_irqsave(&fcport->rport_lock, flags); + /* Record which cpu this request is associated with */ + io_req->cpu = smp_processor_id(); + sqe_idx = qedf_get_sqe_idx(fcport); sqe = &fcport->sq[sqe_idx]; memset(sqe, 0, sizeof(struct fcoe_wqe)); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index a58353b7b4e8..6b1ebab36fa3 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -318,11 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did, */ if (resp == fc_lport_flogi_resp) { qedf->flogi_cnt++; + qedf->flogi_pending++; + + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n"); + qedf->flogi_pending = 0; + } + if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) { schedule_delayed_work(&qedf->stag_work, 2); return NULL; } - qedf->flogi_pending++; + return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp, arg, timeout); } @@ -692,7 +699,7 @@ static u32 qedf_get_login_failures(void *cookie) } static struct qed_fcoe_cb_ops qedf_cb_ops = { - { + .common = { .link_update = qedf_link_update, .bw_update = qedf_bw_update, .schedule_recovery_handler = qedf_schedule_recovery_handler, @@ -912,13 +919,14 @@ void qedf_ctx_soft_reset(struct fc_lport *lport) struct qedf_ctx *qedf; struct qed_link_output if_link; + qedf = lport_priv(lport); + if (lport->vport) { + clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); printk_ratelimited("Cannot issue host reset on NPIV port.\n"); return; } - qedf = lport_priv(lport); - qedf->flogi_pending = 0; /* For host reset, essentially do a soft link up/down */ atomic_set(&qedf->link_state, QEDF_LINK_DOWN); @@ -938,6 +946,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport) if (!if_link.link_up) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Physical link is not up.\n"); + clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); return; } /* Flush and wait to make sure link down is processed */ @@ -950,6 +959,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport) "Queue link up work.\n"); queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 0); + clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); } /* Reset the host by gracefully logging out and then logging back in */ @@ -972,7 +982,8 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd) return SUCCESS; } -static int qedf_slave_configure(struct scsi_device *sdev) +static int qedf_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { if (qedf_queue_depth) { scsi_change_queue_depth(sdev, qedf_queue_depth); @@ -993,7 +1004,7 @@ static const struct scsi_host_template qedf_host_template = { .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */ .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */ .eh_host_reset_handler = qedf_eh_host_reset, - .slave_configure = qedf_slave_configure, + .sdev_configure = qedf_sdev_configure, .dma_boundary = QED_HW_DMA_BOUNDARY, .sg_tablesize = QEDF_MAX_BDS_PER_CMD, .can_queue = FCOE_PARAMS_NUM_TASKS, @@ -2276,7 +2287,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp) * on. */ if (!io_req) - /* If there is not io_req assocated with this CQE + /* If there is not io_req associated with this CQE * just queue it on CPU 0 */ cpu = 0; @@ -2728,6 +2739,7 @@ static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, sb_id, QED_SB_TYPE_STORAGE); if (ret) { + dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys); QEDF_ERR(&qedf->dbg_ctx, "Status block initialization failed (0x%x) for id = %d.\n", ret, sb_id); @@ -3362,9 +3374,8 @@ retry_probe: QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n", qedf->io_mempool); - sprintf(host_buf, "qedf_%u_link", - qedf->lport->host->host_no); - qedf->link_update_wq = create_workqueue(host_buf); + qedf->link_update_wq = alloc_workqueue("qedf_%u_link", WQ_MEM_RECLAIM, + 1, qedf->lport->host->host_no); INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump); @@ -3463,12 +3474,13 @@ retry_probe: } /* Start the Slowpath-process */ + memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params)); slowpath_params.int_mode = QED_INT_MODE_MSIX; slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER; slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; slowpath_params.drv_rev = QEDF_DRIVER_REV_VER; slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER; - strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE); + strscpy(slowpath_params.name, "qedf", sizeof(slowpath_params.name)); rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); @@ -3573,9 +3585,8 @@ retry_probe: ether_addr_copy(params.ll2_mac_address, qedf->mac); /* Start LL2 processing thread */ - snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no); - qedf->ll2_recv_wq = - create_workqueue(host_buf); + qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2", WQ_MEM_RECLAIM, 1, + host->host_no); if (!qedf->ll2_recv_wq) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); rc = -ENOMEM; @@ -3616,9 +3627,8 @@ retry_probe: } } - sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no); - qedf->timer_work_queue = - create_workqueue(host_buf); + qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer", + WQ_MEM_RECLAIM, 1, qedf->lport->host->host_no); if (!qedf->timer_work_queue) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " "workqueue.\n"); @@ -3630,7 +3640,8 @@ retry_probe: if (mode != QEDF_MODE_RECOVERY) { sprintf(host_buf, "qedf_%u_dpc", qedf->lport->host->host_no); - qedf->dpc_wq = create_workqueue(host_buf); + qedf->dpc_wq = + alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, host_buf); } INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler); @@ -3721,6 +3732,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode) { struct qedf_ctx *qedf; int rc; + int cnt = 0; if (!pdev) { QEDF_ERR(NULL, "pdev is NULL.\n"); @@ -3738,6 +3750,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode) return; } +stag_in_prog: + if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt); + cnt++; + + if (cnt < 5) { + msleep(500); + goto stag_in_prog; + } + } + if (mode != QEDF_MODE_RECOVERY) set_bit(QEDF_UNLOADING, &qedf->flags); @@ -3997,6 +4020,19 @@ void qedf_stag_change_work(struct work_struct *work) struct qedf_ctx *qedf = container_of(work, struct qedf_ctx, stag_work.work); + if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, + "Already is in recovery, hence not calling software context reset.\n"); + return; + } + + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n"); + return; + } + + set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); + printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.", dev_name(&qedf->pdev->dev), __func__, __LINE__, qedf->dbg_ctx.host_no); @@ -4141,7 +4177,7 @@ static int __init qedf_init(void) goto err3; } - qedf_io_wq = create_workqueue("qedf_io_wq"); + qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, "qedf_io_wq"); if (!qedf_io_wq) { QEDF_ERR(NULL, "Could not create qedf_io_wq.\n"); goto err4; diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c index 2ebef4d20b5b..2f3e044b818f 100644 --- a/drivers/scsi/qedi/qedi_dbg.c +++ b/drivers/scsi/qedi/qedi_dbg.c @@ -103,25 +103,3 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, ret: va_end(va); } - -int -qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) -{ - int ret = 0; - - for (; iter->name; iter++) { - ret = sysfs_create_bin_file(&shost->shost_gendev.kobj, - iter->attr); - if (ret) - pr_err("Unable to create sysfs %s attr, err(%d).\n", - iter->name, ret); - } - return ret; -} - -void -qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) -{ - for (; iter->name; iter++) - sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr); -} diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h index fdda12ef13b0..864951865869 100644 --- a/drivers/scsi/qedi/qedi_dbg.h +++ b/drivers/scsi/qedi/qedi_dbg.h @@ -87,18 +87,6 @@ void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, u32 info, const char *fmt, ...); -struct Scsi_Host; - -struct sysfs_bin_attrs { - char *name; - struct bin_attribute *attr; -}; - -int qedi_create_sysfs_attr(struct Scsi_Host *shost, - struct sysfs_bin_attrs *iter); -void qedi_remove_sysfs_attr(struct Scsi_Host *shost, - struct sysfs_bin_attrs *iter); - /* DebugFS related code */ struct qedi_list_of_funcs { char *oper_str; diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c index 8deb2001dc2f..37eed6a27816 100644 --- a/drivers/scsi/qedi/qedi_debugfs.c +++ b/drivers/scsi/qedi/qedi_debugfs.c @@ -120,15 +120,11 @@ static ssize_t qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - size_t cnt = 0; - - if (*ppos) - return 0; + char buf[64]; + int len; - cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover); - cnt = min_t(int, count, cnt - *ppos); - *ppos += cnt; - return cnt; + len = sprintf(buf, "do_not_recover=%d\n", qedi_do_not_recover); + return simple_read_from_buffer(buffer, count, ppos, buf, len); } static int diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h index 772218445a56..5e10441f2e22 100644 --- a/drivers/scsi/qedi/qedi_gbl.h +++ b/drivers/scsi/qedi/qedi_gbl.h @@ -45,7 +45,6 @@ int qedi_iscsi_cleanup_task(struct iscsi_task *task, void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd); void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, struct qedi_cmd *qedi_cmd); -void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt); void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid); void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct iscsi_eqe_data *data); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index cd0180b1f5b9..b168bb2178e9 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -369,6 +369,7 @@ static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys, sb_id, QED_SB_TYPE_STORAGE); if (ret) { + dma_free_coherent(&qedi->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys); QEDI_ERR(&qedi->dbg_ctx, "Status block initialization failed for id = %d.\n", sb_id); @@ -1876,14 +1877,6 @@ void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid) WARN_ON(1); } -void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt) -{ - *proto_itt = qedi->itt_map[tid].itt; - QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, - "Get itt map tid [0x%x with proto itt[0x%x]", - tid, *proto_itt); -} - struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid) { struct qedi_cmd *cmd = NULL; @@ -1960,13 +1953,11 @@ static int qedi_cpu_online(unsigned int cpu) struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); struct task_struct *thread; - thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p, - cpu_to_node(cpu), - "qedi_thread/%d", cpu); + thread = kthread_create_on_cpu(qedi_percpu_io_thread, (void *)p, + cpu, "qedi_thread/%d"); if (IS_ERR(thread)) return PTR_ERR(thread); - kthread_bind(thread, cpu); p->iothread = thread; wake_up_process(thread); return 0; @@ -2767,7 +2758,8 @@ retry_probe: } sprintf(host_buf, "host_%d", qedi->shost->host_no); - qedi->tmf_thread = create_singlethread_workqueue(host_buf); + qedi->tmf_thread = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, host_buf); if (!qedi->tmf_thread) { QEDI_ERR(&qedi->dbg_ctx, "Unable to start tmf thread!\n"); @@ -2775,8 +2767,9 @@ retry_probe: goto free_cid_que; } - sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no); - qedi->offload_thread = create_workqueue(host_buf); + qedi->offload_thread = alloc_workqueue("qedi_ofld%d", + WQ_MEM_RECLAIM, + 1, qedi->shost->host_no); if (!qedi->offload_thread) { QEDI_ERR(&qedi->dbg_ctx, "Unable to start offload thread!\n"); @@ -2866,7 +2859,7 @@ static void qedi_remove(struct pci_dev *pdev) __qedi_remove(pdev, QEDI_MODE_NORMAL); } -static struct pci_device_id qedi_pci_tbl[] = { +static const struct pci_device_id qedi_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) }, { 0 }, @@ -2875,7 +2868,7 @@ MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); static enum cpuhp_state qedi_cpuhp_state; -static struct pci_error_handlers qedi_err_handler = { +static const struct pci_error_handlers qedi_err_handler = { .error_detected = qedi_io_error_detected, }; diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 8958547ac111..078a9c80bce2 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -501,7 +501,7 @@ struct qla_boards { }; /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */ -static struct pci_device_id qla1280_pci_tbl[] = { +static const struct pci_device_id qla1280_pci_tbl[] = { {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020, @@ -1159,7 +1159,7 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target) /************************************************************************** - * qla1280_slave_configure + * qla1280_sdev_configure * * Description: * Determines the queue depth for a given device. There are two ways @@ -1170,7 +1170,7 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target) * default queue depth (dependent on the number of hardware SCBs). **************************************************************************/ static int -qla1280_slave_configure(struct scsi_device *device) +qla1280_sdev_configure(struct scsi_device *device, struct queue_limits *lim) { struct scsi_qla_host *ha; int default_depth = 3; @@ -2454,7 +2454,7 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) qla1280_debounce_register(®->istatus); wait_for_completion(&wait); - del_timer_sync(&ha->mailbox_timer); + timer_delete_sync(&ha->mailbox_timer); spin_lock_irq(ha->host->host_lock); @@ -2867,7 +2867,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", cpu_to_le32(upper_32_bits(dma_handle)), cpu_to_le32(lower_32_bits(dma_handle)), - cpu_to_le32(sg_dma_len(sg_next(s)))); + cpu_to_le32(sg_dma_len(s))); remseg--; } dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " @@ -4121,7 +4121,7 @@ static const struct scsi_host_template qla1280_driver_template = { .proc_name = "qla1280", .name = "Qlogic ISP 1280/12160", .info = qla1280_info, - .slave_configure = qla1280_slave_configure, + .sdev_configure = qla1280_sdev_configure, .queuecommand = qla1280_queuecommand, .eh_abort_handler = qla1280_eh_abort, .eh_device_reset_handler= qla1280_eh_device_reset, diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h index d309e2ca14de..dea2290b37d4 100644 --- a/drivers/scsi/qla1280.h +++ b/drivers/scsi/qla1280.h @@ -116,12 +116,12 @@ struct device_reg { uint16_t id_h; /* ID high */ uint16_t cfg_0; /* Configuration 0 */ #define ISP_CFG0_HWMSK 0x000f /* Hardware revision mask */ -#define ISP_CFG0_1020 BIT_0 /* ISP1020 */ -#define ISP_CFG0_1020A BIT_1 /* ISP1020A */ -#define ISP_CFG0_1040 BIT_2 /* ISP1040 */ -#define ISP_CFG0_1040A BIT_3 /* ISP1040A */ -#define ISP_CFG0_1040B BIT_4 /* ISP1040B */ -#define ISP_CFG0_1040C BIT_5 /* ISP1040C */ +#define ISP_CFG0_1020 1 /* ISP1020 */ +#define ISP_CFG0_1020A 2 /* ISP1020A */ +#define ISP_CFG0_1040 3 /* ISP1040 */ +#define ISP_CFG0_1040A 4 /* ISP1040A */ +#define ISP_CFG0_1040B 5 /* ISP1040B */ +#define ISP_CFG0_1040C 6 /* ISP1040C */ uint16_t cfg_1; /* Configuration 1 */ #define ISP_CFG1_F128 BIT_6 /* 128-byte FIFO threshold */ #define ISP_CFG1_F64 BIT_4|BIT_5 /* 128-byte FIFO threshold */ diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index a584708d3056..a8b4314bfd6e 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -7,29 +7,29 @@ config SCSI_QLA_FC select FW_LOADER select BTREE help - This qla2xxx driver supports all QLogic Fibre Channel - PCI and PCIe host adapters. + This qla2xxx driver supports all QLogic Fibre Channel + PCI and PCIe host adapters. - By default, firmware for the ISP parts will be loaded - via the Firmware Loader interface. + By default, firmware for the ISP parts will be loaded + via the Firmware Loader interface. - ISP Firmware Filename - ---------- ----------------- - 21xx ql2100_fw.bin - 22xx ql2200_fw.bin - 2300, 2312, 6312 ql2300_fw.bin - 2322, 6322 ql2322_fw.bin - 24xx, 54xx ql2400_fw.bin - 25xx ql2500_fw.bin + ISP Firmware Filename + ---------- ----------------- + 21xx ql2100_fw.bin + 22xx ql2200_fw.bin + 2300, 2312, 6312 ql2300_fw.bin + 2322, 6322 ql2322_fw.bin + 24xx, 54xx ql2400_fw.bin + 25xx ql2500_fw.bin - Upon request, the driver caches the firmware image until - the driver is unloaded. + Upon request, the driver caches the firmware image until + the driver is unloaded. - Firmware images can be retrieved from: + Firmware images can be retrieved from: - http://ldriver.qlogic.com/firmware/ + http://ldriver.qlogic.com/firmware/ - They are also included in the linux-firmware tree as well. + They are also included in the linux-firmware tree as well. config TCM_QLA2XXX tristate "TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs" @@ -38,13 +38,15 @@ config TCM_QLA2XXX select BTREE default n help - Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs + Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ + series target mode HBAs. if TCM_QLA2XXX config TCM_QLA2XXX_DEBUG bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs" default n help - Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs - This will include code to enable the SCSI command jammer + Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for + QLogic 24xx+ series target mode HBAs. + This will include code to enable the SCSI command jammer. endif diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 76eeba435fd0..dcb0c2af1fa7 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -17,7 +17,7 @@ static int qla24xx_vport_disable(struct fc_vport *, bool); static ssize_t qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -58,7 +58,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, static ssize_t qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -168,19 +168,19 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute sysfs_fw_dump_attr = { +static const struct bin_attribute sysfs_fw_dump_attr = { .attr = { .name = "fw_dump", .mode = S_IRUSR | S_IWUSR, }, .size = 0, - .read = qla2x00_sysfs_read_fw_dump, - .write = qla2x00_sysfs_write_fw_dump, + .read_new = qla2x00_sysfs_read_fw_dump, + .write_new = qla2x00_sysfs_write_fw_dump, }; static ssize_t qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -220,7 +220,7 @@ skip: static ssize_t qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -282,19 +282,19 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute sysfs_nvram_attr = { +static const struct bin_attribute sysfs_nvram_attr = { .attr = { .name = "nvram", .mode = S_IRUSR | S_IWUSR, }, .size = 512, - .read = qla2x00_sysfs_read_nvram, - .write = qla2x00_sysfs_write_nvram, + .read_new = qla2x00_sysfs_read_nvram, + .write_new = qla2x00_sysfs_write_nvram, }; static ssize_t qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -318,7 +318,7 @@ out: static ssize_t qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -344,19 +344,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute sysfs_optrom_attr = { +static const struct bin_attribute sysfs_optrom_attr = { .attr = { .name = "optrom", .mode = S_IRUSR | S_IWUSR, }, .size = 0, - .read = qla2x00_sysfs_read_optrom, - .write = qla2x00_sysfs_write_optrom, + .read_new = qla2x00_sysfs_read_optrom, + .write_new = qla2x00_sysfs_write_optrom, }; static ssize_t qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -529,18 +529,18 @@ out: return rval; } -static struct bin_attribute sysfs_optrom_ctl_attr = { +static const struct bin_attribute sysfs_optrom_ctl_attr = { .attr = { .name = "optrom_ctl", .mode = S_IWUSR, }, .size = 0, - .write = qla2x00_sysfs_write_optrom_ctl, + .write_new = qla2x00_sysfs_write_optrom_ctl, }; static ssize_t qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -587,7 +587,7 @@ skip: static ssize_t qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -642,19 +642,19 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute sysfs_vpd_attr = { +static const struct bin_attribute sysfs_vpd_attr = { .attr = { .name = "vpd", .mode = S_IRUSR | S_IWUSR, }, .size = 0, - .read = qla2x00_sysfs_read_vpd, - .write = qla2x00_sysfs_write_vpd, + .read_new = qla2x00_sysfs_read_vpd, + .write_new = qla2x00_sysfs_write_vpd, }; static ssize_t qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -679,18 +679,18 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute sysfs_sfp_attr = { +static const struct bin_attribute sysfs_sfp_attr = { .attr = { .name = "sfp", .mode = S_IRUSR | S_IWUSR, }, .size = SFP_DEV_SIZE, - .read = qla2x00_sysfs_read_sfp, + .read_new = qla2x00_sysfs_read_sfp, }; static ssize_t qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -823,19 +823,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute sysfs_reset_attr = { +static const struct bin_attribute sysfs_reset_attr = { .attr = { .name = "reset", .mode = S_IWUSR, }, .size = 0, - .write = qla2x00_sysfs_write_reset, + .write_new = qla2x00_sysfs_write_reset, }; static ssize_t qla2x00_issue_logo(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) + const struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); @@ -866,18 +866,18 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute sysfs_issue_logo_attr = { +static const struct bin_attribute sysfs_issue_logo_attr = { .attr = { .name = "issue_logo", .mode = S_IWUSR, }, .size = 0, - .write = qla2x00_issue_logo, + .write_new = qla2x00_issue_logo, }; static ssize_t qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -929,18 +929,18 @@ do_read: return count; } -static struct bin_attribute sysfs_xgmac_stats_attr = { +static const struct bin_attribute sysfs_xgmac_stats_attr = { .attr = { .name = "xgmac_stats", .mode = S_IRUSR, }, .size = 0, - .read = qla2x00_sysfs_read_xgmac_stats, + .read_new = qla2x00_sysfs_read_xgmac_stats, }; static ssize_t qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, @@ -987,18 +987,18 @@ do_read: return count; } -static struct bin_attribute sysfs_dcbx_tlv_attr = { +static const struct bin_attribute sysfs_dcbx_tlv_attr = { .attr = { .name = "dcbx_tlv", .mode = S_IRUSR, }, .size = 0, - .read = qla2x00_sysfs_read_dcbx_tlv, + .read_new = qla2x00_sysfs_read_dcbx_tlv, }; static struct sysfs_entry { char *name; - struct bin_attribute *attr; + const struct bin_attribute *attr; int type; } bin_file_entries[] = { { "fw_dump", &sysfs_fw_dump_attr, }, @@ -1068,13 +1068,6 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) /* Scsi_Host attributes. */ static ssize_t -qla2x00_driver_version_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); -} - -static ssize_t qla2x00_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2412,7 +2405,7 @@ qla2x00_dport_diagnostics_show(struct device *dev, static DEVICE_ATTR(dport_diagnostics, 0444, qla2x00_dport_diagnostics_show, NULL); -static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL); +static DEVICE_STRING_ATTR_RO(driver_version, S_IRUGO, qla2x00_version_str); static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL); @@ -2478,7 +2471,7 @@ static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL); static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL); static struct attribute *qla2x00_host_attrs[] = { - &dev_attr_driver_version.attr, + &dev_attr_driver_version.attr.attr, &dev_attr_fw_version.attr, &dev_attr_serial_num.attr, &dev_attr_isp_name.attr, @@ -3311,6 +3304,7 @@ struct fc_function_template qla2xxx_transport_vport_functions = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, + .show_host_supported_speeds = 1, .get_host_port_id = qla2x00_get_host_port_id, .show_host_port_id = 1, diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 19bb64bdd88b..10431a67d202 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -24,6 +24,7 @@ void qla2x00_bsg_job_done(srb_t *sp, int res) { struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct completion *comp = sp->comp; ql_dbg(ql_dbg_user, sp->vha, 0x7009, "%s: sp hdl %x, result=%x bsg ptr %p\n", @@ -35,6 +36,9 @@ void qla2x00_bsg_job_done(srb_t *sp, int res) bsg_reply->result = res; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + + if (comp) + complete(comp); } void qla2x00_bsg_sp_free(srb_t *sp) @@ -324,7 +328,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) "request_sg_cnt=%x reply_sg_cnt=%x.\n", bsg_job->request_payload.sg_cnt, bsg_job->reply_payload.sg_cnt); - rval = -EPERM; + rval = -ENOBUFS; goto done; } @@ -490,16 +494,6 @@ qla2x00_process_ct(struct bsg_job *bsg_job) goto done; } - if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || - (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { - ql_log(ql_log_warn, vha, 0x7011, - "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " - "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, - req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); - rval = -EAGAIN; - goto done_unmap_sg; - } - if (!vha->flags.online) { ql_log(ql_log_warn, vha, 0x7012, "Host is not online.\n"); @@ -3059,19 +3053,130 @@ skip_chip_chk: return ret; } -int -qla24xx_bsg_timeout(struct bsg_job *bsg_job) +static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job) { + bool found, do_bsg_done; struct fc_bsg_reply *bsg_reply = bsg_job->reply; scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); struct qla_hw_data *ha = vha->hw; - srb_t *sp; - int cnt, que; + srb_t *sp = NULL; + int cnt; unsigned long flags; struct req_que *req; + int rval; + DECLARE_COMPLETION_ONSTACK(comp); + uint32_t ratov_j; + + found = do_bsg_done = false; + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + req = qpair->req; + + for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { + sp = req->outstanding_cmds[cnt]; + if (sp && + (sp->type == SRB_CT_CMD || + sp->type == SRB_ELS_CMD_HST || + sp->type == SRB_ELS_CMD_HST_NOLOGIN) && + sp->u.bsg_job == bsg_job) { + + found = true; + sp->comp = ∁ + break; + } + } + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + if (!found) + return false; + + if (ha->flags.eeh_busy) { + /* skip over abort. EEH handling will return the bsg. Wait for it */ + rval = QLA_SUCCESS; + ql_dbg(ql_dbg_user, vha, 0x802c, + "eeh encounter. bsg %p sp=%p handle=%x \n", + bsg_job, sp, sp->handle); + } else { + rval = ha->isp_ops->abort_command(sp); + ql_dbg(ql_dbg_user, vha, 0x802c, + "Aborting bsg %p sp=%p handle=%x rval=%x\n", + bsg_job, sp, sp->handle, rval); + } + + switch (rval) { + case QLA_SUCCESS: + /* Wait for the command completion. */ + ratov_j = ha->r_a_tov / 10 * 4 * 1000; + ratov_j = msecs_to_jiffies(ratov_j); + + if (!wait_for_completion_timeout(&comp, ratov_j)) { + ql_log(ql_log_info, vha, 0x7089, + "bsg abort timeout. bsg=%p sp=%p handle %#x .\n", + bsg_job, sp, sp->handle); + + do_bsg_done = true; + } else { + /* fw had returned the bsg */ + ql_dbg(ql_dbg_user, vha, 0x708a, + "bsg abort success. bsg %p sp=%p handle=%#x\n", + bsg_job, sp, sp->handle); + do_bsg_done = false; + } + break; + default: + ql_log(ql_log_info, vha, 0x704f, + "bsg abort fail. bsg=%p sp=%p rval=%x.\n", + bsg_job, sp, rval); + + do_bsg_done = true; + break; + } - ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n", - __func__, bsg_job); + if (!do_bsg_done) + return true; + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + /* + * recheck to make sure it's still the same bsg_job due to + * qp_lock_ptr was released earlier. + */ + if (req->outstanding_cmds[cnt] && + req->outstanding_cmds[cnt]->u.bsg_job != bsg_job) { + /* fw had returned the bsg */ + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + return true; + } + req->outstanding_cmds[cnt] = NULL; + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + /* ref: INIT */ + sp->comp = NULL; + kref_put(&sp->cmd_kref, qla2x00_sp_release); + bsg_reply->result = -ENXIO; + bsg_reply->reply_payload_rcv_len = 0; + + ql_dbg(ql_dbg_user, vha, 0x7051, + "%s bsg_job_done : bsg %p result %#x sp %p.\n", + __func__, bsg_job, bsg_reply->result, sp); + + bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + + return true; +} + +int +qla24xx_bsg_timeout(struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct qla_hw_data *ha = vha->hw; + int i; + struct qla_qpair *qpair; + + ql_log(ql_log_info, vha, 0x708b, + "%s CMD timeout. bsg ptr %p msgcode %x vendor cmd %x\n", + __func__, bsg_job, bsg_request->msgcode, + bsg_request->rqst_data.h_vendor.vendor_cmd[0]); if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x9007, @@ -3079,48 +3184,21 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job) qla_pci_set_eeh_busy(vha); } + if (qla_bsg_found(ha->base_qpair, bsg_job)) + goto done; + /* find the bsg job from the active list of commands */ - spin_lock_irqsave(&ha->hardware_lock, flags); - for (que = 0; que < ha->max_req_queues; que++) { - req = ha->req_q_map[que]; - if (!req) + for (i = 0; i < ha->max_qpairs; i++) { + qpair = vha->hw->queue_pair_map[i]; + if (!qpair) continue; - - for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { - sp = req->outstanding_cmds[cnt]; - if (sp && - (sp->type == SRB_CT_CMD || - sp->type == SRB_ELS_CMD_HST || - sp->type == SRB_ELS_CMD_HST_NOLOGIN || - sp->type == SRB_FXIOCB_BCMD) && - sp->u.bsg_job == bsg_job) { - req->outstanding_cmds[cnt] = NULL; - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) { - ql_log(ql_log_warn, vha, 0x7089, - "mbx abort_command failed.\n"); - bsg_reply->result = -EIO; - } else { - ql_dbg(ql_dbg_user, vha, 0x708a, - "mbx abort_command success.\n"); - bsg_reply->result = 0; - } - spin_lock_irqsave(&ha->hardware_lock, flags); - goto done; - - } - } + if (qla_bsg_found(qpair, bsg_job)) + goto done; } - spin_unlock_irqrestore(&ha->hardware_lock, flags); + ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); - bsg_reply->result = -ENXIO; - return 0; done: - spin_unlock_irqrestore(&ha->hardware_lock, flags); - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 691ef827a5ab..5136549005e7 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -2706,59 +2706,6 @@ ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf, } /* - * This function is for formatting and logging log messages. - * It is to be used when vha is available. It formats the message - * and logs it to the messages file. All the messages will be logged - * irrespective of value of ql2xextended_error_logging. - * parameters: - * level: The level of the log messages to be printed in the - * messages file. - * vha: Pointer to the scsi_qla_host_t - * id: This is a unique id for the level. It identifies the - * part of the code from where the message originated. - * msg: The message to be displayed. - */ -void -ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, - const char *fmt, ...) -{ - va_list va; - struct va_format vaf; - char pbuf[128]; - - if (level > ql_errlev) - return; - - ql_ktrace(0, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt); - - if (!pbuf[0]) /* set by ql_ktrace */ - ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, - qpair ? qpair->vha : NULL, id); - - va_start(va, fmt); - - vaf.fmt = fmt; - vaf.va = &va; - - switch (level) { - case ql_log_fatal: /* FATAL LOG */ - pr_crit("%s%pV", pbuf, &vaf); - break; - case ql_log_warn: - pr_err("%s%pV", pbuf, &vaf); - break; - case ql_log_info: - pr_warn("%s%pV", pbuf, &vaf); - break; - default: - pr_info("%s%pV", pbuf, &vaf); - break; - } - - va_end(va); -} - -/* * This function is for formatting and logging debug information. * It is to be used when vha is available. It formats the message * and logs it to the messages file. diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 54f0a412226f..5f4a8c9ae6ba 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -334,9 +334,6 @@ ql_log(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...); void __attribute__((format (printf, 4, 5))) ql_log_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...); -void __attribute__((format (printf, 4, 5))) -ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...); - /* Debug Levels */ /* The 0x40000000 is the max value any debug level can have * as ql2xextended_error_logging is of type signed int diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 2f49baf131e2..cb95b7b12051 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2621,7 +2621,6 @@ typedef struct fc_port { struct kref sess_kref; struct qla_tgt *tgt; unsigned long expires; - struct list_head del_list_entry; struct work_struct free_work; struct work_struct reg_work; uint64_t jiffies_at_registration; @@ -3309,9 +3308,20 @@ struct fab_scan_rp { u8 node_name[8]; }; +enum scan_step { + FAB_SCAN_START, + FAB_SCAN_GPNFT_FCP, + FAB_SCAN_GNNFT_FCP, + FAB_SCAN_GPNFT_NVME, + FAB_SCAN_GNNFT_NVME, +}; + struct fab_scan { struct fab_scan_rp *l; u32 size; + u32 rscn_gen_start; + u32 rscn_gen_end; + enum scan_step step; u16 scan_retry; #define MAX_SCAN_RETRIES 5 enum scan_flags_t scan_flags; @@ -3537,9 +3547,8 @@ enum qla_work_type { QLA_EVT_RELOGIN, QLA_EVT_ASYNC_PRLO, QLA_EVT_ASYNC_PRLO_DONE, - QLA_EVT_GPNFT, - QLA_EVT_GPNFT_DONE, - QLA_EVT_GNNFT_DONE, + QLA_EVT_SCAN_CMD, + QLA_EVT_SCAN_FINISH, QLA_EVT_GFPNID, QLA_EVT_SP_RETRY, QLA_EVT_IIDMA, @@ -4089,6 +4098,8 @@ struct qla_hw_data { uint32_t npiv_supported :1; uint32_t pci_channel_io_perm_failure :1; uint32_t fce_enabled :1; + uint32_t user_enabled_fce :1; + uint32_t fce_dump_buf_alloced :1; uint32_t fac_supported :1; uint32_t chip_reset_done :1; @@ -5030,6 +5041,7 @@ typedef struct scsi_qla_host { /* Counter to detect races between ELS and RSCN events */ atomic_t generation_tick; + atomic_t rscn_gen; /* Time when global fcport update has been scheduled */ int total_fcport_update_gen; /* List of pending LOGOs, protected by tgt_mutex */ diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 55ff3d7482b3..08273520c777 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -274,7 +274,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n", iocbs_used, ha->base_qpair->fwres.iocbs_limit); - seq_printf(s, "estimate exchange used[%d] high water limit [%d] n", + seq_printf(s, "estimate exchange used[%d] high water limit [%d]\n", exch_used, ha->base_qpair->fwres.exch_limit); if (ql2xenforce_iocb_limit == 2) { @@ -409,26 +409,31 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused) mutex_lock(&ha->fce_mutex); - seq_puts(s, "FCE Trace Buffer\n"); - seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); - seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); - seq_puts(s, "FCE Enable Registers\n"); - seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", - ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], - ha->fce_mb[5], ha->fce_mb[6]); - - fce = (uint32_t *) ha->fce; - fce_start = (unsigned long long) ha->fce_dma; - for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) { - if (cnt % 8 == 0) - seq_printf(s, "\n%llx: ", - (unsigned long long)((cnt * 4) + fce_start)); - else - seq_putc(s, ' '); - seq_printf(s, "%08x", *fce++); - } + if (ha->flags.user_enabled_fce) { + seq_puts(s, "FCE Trace Buffer\n"); + seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); + seq_printf(s, "Base = %llx\n\n", (unsigned long long)ha->fce_dma); + seq_puts(s, "FCE Enable Registers\n"); + seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", + ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], + ha->fce_mb[5], ha->fce_mb[6]); + + fce = (uint32_t *)ha->fce; + fce_start = (unsigned long long)ha->fce_dma; + for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) { + if (cnt % 8 == 0) + seq_printf(s, "\n%llx: ", + (unsigned long long)((cnt * 4) + fce_start)); + else + seq_putc(s, ' '); + seq_printf(s, "%08x", *fce++); + } - seq_puts(s, "\nEnd\n"); + seq_puts(s, "\nEnd\n"); + } else { + seq_puts(s, "FCE Trace is currently not enabled\n"); + seq_puts(s, "\techo [ 1 | 0 ] > fce\n"); + } mutex_unlock(&ha->fce_mutex); @@ -467,7 +472,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file) struct qla_hw_data *ha = vha->hw; int rval; - if (ha->flags.fce_enabled) + if (ha->flags.fce_enabled || !ha->fce) goto out; mutex_lock(&ha->fce_mutex); @@ -488,11 +493,88 @@ out: return single_release(inode, file); } +static ssize_t +qla2x00_dfs_fce_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) +{ + struct seq_file *s = file->private_data; + struct scsi_qla_host *vha = s->private; + struct qla_hw_data *ha = vha->hw; + char *buf; + int rc = 0; + unsigned long enable; + + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) { + ql_dbg(ql_dbg_user, vha, 0xd034, + "this adapter does not support FCE."); + return -EINVAL; + } + + buf = memdup_user_nul(buffer, count); + if (IS_ERR(buf)) { + ql_dbg(ql_dbg_user, vha, 0xd037, + "fail to copy user buffer."); + return PTR_ERR(buf); + } + + enable = kstrtoul(buf, 0, 0); + rc = count; + + mutex_lock(&ha->fce_mutex); + + if (enable) { + if (ha->flags.user_enabled_fce) { + mutex_unlock(&ha->fce_mutex); + goto out_free; + } + ha->flags.user_enabled_fce = 1; + if (!ha->fce) { + rc = qla2x00_alloc_fce_trace(vha); + if (rc) { + ha->flags.user_enabled_fce = 0; + mutex_unlock(&ha->fce_mutex); + goto out_free; + } + + /* adjust fw dump buffer to take into account of this feature */ + if (!ha->flags.fce_dump_buf_alloced) + qla2x00_alloc_fw_dump(vha); + } + + if (!ha->flags.fce_enabled) + qla_enable_fce_trace(vha); + + ql_dbg(ql_dbg_user, vha, 0xd045, "User enabled FCE .\n"); + } else { + if (!ha->flags.user_enabled_fce) { + mutex_unlock(&ha->fce_mutex); + goto out_free; + } + ha->flags.user_enabled_fce = 0; + if (ha->flags.fce_enabled) { + qla2x00_disable_fce_trace(vha, NULL, NULL); + ha->flags.fce_enabled = 0; + } + + qla2x00_free_fce_trace(ha); + /* no need to re-adjust fw dump buffer */ + + ql_dbg(ql_dbg_user, vha, 0xd04f, "User disabled FCE .\n"); + } + + mutex_unlock(&ha->fce_mutex); +out_free: + kfree(buf); + return rc; +} + static const struct file_operations dfs_fce_ops = { .open = qla2x00_dfs_fce_open, .read = seq_read, .llseek = seq_lseek, .release = qla2x00_dfs_fce_release, + .write = qla2x00_dfs_fce_write, }; static int @@ -626,8 +708,6 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto out; - if (!ha->fce) - goto out; if (qla2x00_dfs_root) goto create_dir; diff --git a/drivers/scsi/qla2xxx/qla_dsd.h b/drivers/scsi/qla2xxx/qla_dsd.h index 20788054b91b..52e060f83b37 100644 --- a/drivers/scsi/qla2xxx/qla_dsd.h +++ b/drivers/scsi/qla2xxx/qla_dsd.h @@ -1,7 +1,7 @@ #ifndef _QLA_DSD_H_ #define _QLA_DSD_H_ -#include <asm/unaligned.h> +#include <linux/unaligned.h> /* 32-bit data segment descriptor (8 bytes) */ struct dsd32 { diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 7309310d2ab9..03e50e8fc08d 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -11,6 +11,9 @@ /* * Global Function Prototypes in qla_init.c source file. */ +int qla2x00_alloc_fce_trace(scsi_qla_host_t *); +void qla2x00_free_fce_trace(struct qla_hw_data *ha); +void qla_enable_fce_trace(scsi_qla_host_t *); extern int qla2x00_initialize_adapter(scsi_qla_host_t *); extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport); @@ -161,10 +164,8 @@ extern int ql2xsmartsan; extern int ql2xallocfwdump; extern int ql2xextended_error_logging; extern int ql2xextended_error_logging_ktrace; -extern int ql2xiidmaenable; extern int ql2xmqsupport; extern int ql2xfwloadbin; -extern int ql2xetsenable; extern int ql2xshiftctondsd; extern int ql2xdbwr; extern int ql2xasynctmfenable; @@ -717,7 +718,6 @@ extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t); extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t); extern int qla2x00_fdmi_register(scsi_qla_host_t *); extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *); -extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *); extern size_t qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t); extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *, struct ct_sns_rsp *, const char *); @@ -728,9 +728,9 @@ int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *); void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *); int qla2x00_mgmt_svr_login(scsi_qla_host_t *); int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool); -int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *); -void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *); -void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *); +int qla_fab_async_scan(scsi_qla_host_t *, srb_t *); +void qla_fab_scan_start(struct scsi_qla_host *); +void qla_fab_scan_finish(scsi_qla_host_t *, srb_t *); int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *); void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *); @@ -819,7 +819,6 @@ extern int qlafx00_rescan_isp(scsi_qla_host_t *); /* PCI related functions */ extern int qla82xx_pci_config(struct scsi_qla_host *); extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int); -extern int qla82xx_pci_region_offset(struct pci_dev *, int); extern int qla82xx_iospace_config(struct qla_hw_data *); /* Initialization related functions */ @@ -863,7 +862,6 @@ extern int qla82xx_rd_32(struct qla_hw_data *, ulong); /* ISP 8021 IDC */ extern void qla82xx_clear_drv_active(struct qla_hw_data *); -extern uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t); extern int qla82xx_idc_lock(struct qla_hw_data *); extern void qla82xx_idc_unlock(struct qla_hw_data *); extern int qla82xx_device_state_handler(scsi_qla_host_t *); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 1cf9d200d563..51c7cea71f90 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1710,7 +1710,7 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); alen = scnprintf( eiter->a.orom_version, sizeof(eiter->a.orom_version), - "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]); + "%d.%02d", ha->efi_revision[1], ha->efi_revision[0]); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); @@ -2626,96 +2626,6 @@ qla2x00_port_speed_capability(uint16_t speed) } /** - * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query. - * @vha: HA context - * @list: switch info entries to populate - * - * Returns 0 on success. - */ -int -qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) -{ - int rval; - uint16_t i; - struct qla_hw_data *ha = vha->hw; - ms_iocb_entry_t *ms_pkt; - struct ct_sns_req *ct_req; - struct ct_sns_rsp *ct_rsp; - struct ct_arg arg; - - if (!IS_IIDMA_CAPABLE(ha)) - return QLA_FUNCTION_FAILED; - if (!ha->flags.gpsc_supported) - return QLA_FUNCTION_FAILED; - - rval = qla2x00_mgmt_svr_login(vha); - if (rval) - return rval; - - arg.iocb = ha->ms_iocb; - arg.req_dma = ha->ct_sns_dma; - arg.rsp_dma = ha->ct_sns_dma; - arg.req_size = GPSC_REQ_SIZE; - arg.rsp_size = GPSC_RSP_SIZE; - arg.nport_handle = vha->mgmt_svr_loop_id; - - for (i = 0; i < ha->max_fibre_devices; i++) { - /* Issue GFPN_ID */ - /* Prepare common MS IOCB */ - ms_pkt = qla24xx_prep_ms_iocb(vha, &arg); - - /* Prepare CT request */ - ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD, - GPSC_RSP_SIZE); - ct_rsp = &ha->ct_sns->p.rsp; - - /* Prepare CT arguments -- port_name */ - memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name, - WWN_SIZE); - - /* Execute MS IOCB */ - rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, - sizeof(ms_iocb_entry_t)); - if (rval != QLA_SUCCESS) { - /*EMPTY*/ - ql_dbg(ql_dbg_disc, vha, 0x2059, - "GPSC issue IOCB failed (%d).\n", rval); - } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, - "GPSC")) != QLA_SUCCESS) { - /* FM command unsupported? */ - if (rval == QLA_INVALID_COMMAND && - (ct_rsp->header.reason_code == - CT_REASON_INVALID_COMMAND_CODE || - ct_rsp->header.reason_code == - CT_REASON_COMMAND_UNSUPPORTED)) { - ql_dbg(ql_dbg_disc, vha, 0x205a, - "GPSC command unsupported, disabling " - "query.\n"); - ha->flags.gpsc_supported = 0; - rval = QLA_FUNCTION_FAILED; - break; - } - rval = QLA_FUNCTION_FAILED; - } else { - list->fp_speed = qla2x00_port_speed_capability( - be16_to_cpu(ct_rsp->rsp.gpsc.speed)); - ql_dbg(ql_dbg_disc, vha, 0x205b, - "GPSC ext entry - fpn " - "%8phN speeds=%04x speed=%04x.\n", - list[i].fabric_port_name, - be16_to_cpu(ct_rsp->rsp.gpsc.speeds), - be16_to_cpu(ct_rsp->rsp.gpsc.speed)); - } - - /* Last device exit. */ - if (list[i].d_id.b.rsvd_1 != 0) - break; - } - - return (rval); -} - -/** * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query. * * @vha: HA context @@ -3168,7 +3078,30 @@ static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn) return rc; } -void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) +static bool qla_ok_to_clear_rscn(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + u32 rscn_gen; + + rscn_gen = atomic_read(&vha->rscn_gen); + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2017, + "%s %d %8phC rscn_gen %x start %x end %x current %x\n", + __func__, __LINE__, fcport->port_name, fcport->rscn_gen, + vha->scan.rscn_gen_start, vha->scan.rscn_gen_end, rscn_gen); + + if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_start, + vha->scan.rscn_gen_end)) + /* rscn came in before fabric scan */ + return true; + + if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_end, rscn_gen)) + /* rscn came in after fabric scan */ + return false; + + /* rare: fcport's scan_needed + rscn_gen must be stale */ + return true; +} + +void qla_fab_scan_finish(scsi_qla_host_t *vha, srb_t *sp) { fc_port_t *fcport; u32 i, rc; @@ -3281,10 +3214,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) (fcport->scan_needed && fcport->port_type != FCT_INITIATOR && fcport->port_type != FCT_NVME_INITIATOR)) { + fcport->scan_needed = 0; qlt_schedule_sess_for_deletion(fcport); } fcport->d_id.b24 = rp->id.b24; - fcport->scan_needed = 0; break; } @@ -3325,7 +3258,9 @@ login_logout: do_delete = true; } - fcport->scan_needed = 0; + if (qla_ok_to_clear_rscn(vha, fcport)) + fcport->scan_needed = 0; + if (((qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) || @@ -3355,7 +3290,9 @@ login_logout: fcport->port_name, fcport->loop_id, fcport->login_retry); } - fcport->scan_needed = 0; + + if (qla_ok_to_clear_rscn(vha, fcport)) + fcport->scan_needed = 0; qla24xx_fcport_handle_login(vha, fcport); } } @@ -3379,14 +3316,11 @@ out: } } -static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha, +static int qla2x00_post_next_scan_work(struct scsi_qla_host *vha, srb_t *sp, int cmd) { struct qla_work_evt *e; - if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE) - return QLA_PARAMETER_ERROR; - e = qla2x00_alloc_work(vha, cmd); if (!e) return QLA_FUNCTION_FAILED; @@ -3396,37 +3330,15 @@ static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha, return qla2x00_post_work(vha, e); } -static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha, - srb_t *sp, int cmd) -{ - struct qla_work_evt *e; - - if (cmd != QLA_EVT_GPNFT) - return QLA_PARAMETER_ERROR; - - e = qla2x00_alloc_work(vha, cmd); - if (!e) - return QLA_FUNCTION_FAILED; - - e->u.gpnft.fc4_type = FC4_TYPE_NVME; - e->u.gpnft.sp = sp; - - return qla2x00_post_work(vha, e); -} - static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, struct srb *sp) { struct qla_hw_data *ha = vha->hw; int num_fibre_dev = ha->max_fibre_devices; - struct ct_sns_req *ct_req = - (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; struct ct_sns_gpnft_rsp *ct_rsp = (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; struct ct_sns_gpn_ft_data *d; struct fab_scan_rp *rp; - u16 cmd = be16_to_cpu(ct_req->command); - u8 fc4_type = sp->gen2; int i, j, k; port_id_t id; u8 found; @@ -3445,85 +3357,83 @@ static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, if (id.b24 == 0 || wwn == 0) continue; - if (fc4_type == FC4_TYPE_FCP_SCSI) { - if (cmd == GPN_FT_CMD) { - rp = &vha->scan.l[j]; - rp->id = id; - memcpy(rp->port_name, d->port_name, 8); - j++; - rp->fc4type = FS_FC4TYPE_FCP; - } else { - for (k = 0; k < num_fibre_dev; k++) { - rp = &vha->scan.l[k]; - if (id.b24 == rp->id.b24) { - memcpy(rp->node_name, - d->port_name, 8); - break; - } + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2025, + "%s %06x %8ph \n", + __func__, id.b24, d->port_name); + + switch (vha->scan.step) { + case FAB_SCAN_GPNFT_FCP: + rp = &vha->scan.l[j]; + rp->id = id; + memcpy(rp->port_name, d->port_name, 8); + j++; + rp->fc4type = FS_FC4TYPE_FCP; + break; + case FAB_SCAN_GNNFT_FCP: + for (k = 0; k < num_fibre_dev; k++) { + rp = &vha->scan.l[k]; + if (id.b24 == rp->id.b24) { + memcpy(rp->node_name, + d->port_name, 8); + break; } } - } else { - /* Search if the fibre device supports FC4_TYPE_NVME */ - if (cmd == GPN_FT_CMD) { - found = 0; - - for (k = 0; k < num_fibre_dev; k++) { - rp = &vha->scan.l[k]; - if (!memcmp(rp->port_name, - d->port_name, 8)) { - /* - * Supports FC-NVMe & FCP - */ - rp->fc4type |= FS_FC4TYPE_NVME; - found = 1; - break; - } + break; + case FAB_SCAN_GPNFT_NVME: + found = 0; + + for (k = 0; k < num_fibre_dev; k++) { + rp = &vha->scan.l[k]; + if (!memcmp(rp->port_name, d->port_name, 8)) { + /* + * Supports FC-NVMe & FCP + */ + rp->fc4type |= FS_FC4TYPE_NVME; + found = 1; + break; } + } - /* We found new FC-NVMe only port */ - if (!found) { - for (k = 0; k < num_fibre_dev; k++) { - rp = &vha->scan.l[k]; - if (wwn_to_u64(rp->port_name)) { - continue; - } else { - rp->id = id; - memcpy(rp->port_name, - d->port_name, 8); - rp->fc4type = - FS_FC4TYPE_NVME; - break; - } - } - } - } else { + /* We found new FC-NVMe only port */ + if (!found) { for (k = 0; k < num_fibre_dev; k++) { rp = &vha->scan.l[k]; - if (id.b24 == rp->id.b24) { - memcpy(rp->node_name, - d->port_name, 8); + if (wwn_to_u64(rp->port_name)) { + continue; + } else { + rp->id = id; + memcpy(rp->port_name, d->port_name, 8); + rp->fc4type = FS_FC4TYPE_NVME; break; } } } + break; + case FAB_SCAN_GNNFT_NVME: + for (k = 0; k < num_fibre_dev; k++) { + rp = &vha->scan.l[k]; + if (id.b24 == rp->id.b24) { + memcpy(rp->node_name, d->port_name, 8); + break; + } + } + break; + default: + break; } } } -static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) +static void qla_async_scan_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; - struct ct_sns_req *ct_req = - (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; - u16 cmd = be16_to_cpu(ct_req->command); - u8 fc4_type = sp->gen2; unsigned long flags; int rc; /* gen2 field is holding the fc4type */ - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async done-%s res %x FC4Type %x\n", - sp->name, res, sp->gen2); + ql_dbg(ql_dbg_disc, vha, 0x2026, + "Async done-%s res %x step %x\n", + sp->name, res, vha->scan.step); sp->rc = res; if (res) { @@ -3547,8 +3457,7 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) * sp for GNNFT_DONE work. This will allow all * the resource to get freed up. */ - rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, - QLA_EVT_GNNFT_DONE); + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH); if (rc) { /* Cleanup here to prevent memory leak */ qla24xx_sp_unmap(vha, sp); @@ -3573,28 +3482,30 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) qla2x00_find_free_fcp_nvme_slot(vha, sp); - if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled && - cmd == GNN_FT_CMD) { - spin_lock_irqsave(&vha->work_lock, flags); - vha->scan.scan_flags &= ~SF_SCANNING; - spin_unlock_irqrestore(&vha->work_lock, flags); + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); - sp->rc = res; - rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT); - if (rc) { - qla24xx_sp_unmap(vha, sp); - set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - } - return; - } + switch (vha->scan.step) { + case FAB_SCAN_GPNFT_FCP: + case FAB_SCAN_GPNFT_NVME: + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_CMD); + break; + case FAB_SCAN_GNNFT_FCP: + if (vha->flags.nvme_enabled) + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_CMD); + else + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH); - if (cmd == GPN_FT_CMD) { - rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, - QLA_EVT_GPNFT_DONE); - } else { - rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, - QLA_EVT_GNNFT_DONE); + break; + case FAB_SCAN_GNNFT_NVME: + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH); + break; + default: + /* should not be here */ + WARN_ON(1); + rc = QLA_FUNCTION_FAILED; + break; } if (rc) { @@ -3605,127 +3516,16 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) } } -/* - * Get WWNN list for fc4_type - * - * It is assumed the same SRB is re-used from GPNFT to avoid - * mem free & re-alloc - */ -static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, - u8 fc4_type) -{ - int rval = QLA_FUNCTION_FAILED; - struct ct_sns_req *ct_req; - struct ct_sns_pkt *ct_sns; - unsigned long flags; - - if (!vha->flags.online) { - spin_lock_irqsave(&vha->work_lock, flags); - vha->scan.scan_flags &= ~SF_SCANNING; - spin_unlock_irqrestore(&vha->work_lock, flags); - goto done_free_sp; - } - - if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) { - ql_log(ql_log_warn, vha, 0xffff, - "%s: req %p rsp %p are not setup\n", - __func__, sp->u.iocb_cmd.u.ctarg.req, - sp->u.iocb_cmd.u.ctarg.rsp); - spin_lock_irqsave(&vha->work_lock, flags); - vha->scan.scan_flags &= ~SF_SCANNING; - spin_unlock_irqrestore(&vha->work_lock, flags); - WARN_ON(1); - set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - goto done_free_sp; - } - - ql_dbg(ql_dbg_disc, vha, 0xfffff, - "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n", - __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size, - sp->u.iocb_cmd.u.ctarg.req_size); - - sp->type = SRB_CT_PTHRU_CMD; - sp->name = "gnnft"; - sp->gen1 = vha->hw->base_qpair->chip_reset; - sp->gen2 = fc4_type; - qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, - qla2x00_async_gpnft_gnnft_sp_done); - - memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); - memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); - - ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; - /* CT_IU preamble */ - ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, - sp->u.iocb_cmd.u.ctarg.rsp_size); - - /* GPN_FT req */ - ct_req->req.gpn_ft.port_type = fc4_type; - - sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; - sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s hdl=%x FC4Type %x.\n", sp->name, - sp->handle, ct_req->req.gpn_ft.port_type); - - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) { - goto done_free_sp; - } - - return rval; - -done_free_sp: - if (sp->u.iocb_cmd.u.ctarg.req) { - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.req_allocated_size, - sp->u.iocb_cmd.u.ctarg.req, - sp->u.iocb_cmd.u.ctarg.req_dma); - sp->u.iocb_cmd.u.ctarg.req = NULL; - } - if (sp->u.iocb_cmd.u.ctarg.rsp) { - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, - sp->u.iocb_cmd.u.ctarg.rsp, - sp->u.iocb_cmd.u.ctarg.rsp_dma); - sp->u.iocb_cmd.u.ctarg.rsp = NULL; - } - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); - - spin_lock_irqsave(&vha->work_lock, flags); - vha->scan.scan_flags &= ~SF_SCANNING; - if (vha->scan.scan_flags == 0) { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s: schedule\n", __func__); - vha->scan.scan_flags |= SF_QUEUED; - schedule_delayed_work(&vha->scan.scan_work, 5); - } - spin_unlock_irqrestore(&vha->work_lock, flags); - - - return rval; -} /* GNNFT */ - -void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp) -{ - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, - "%s enter\n", __func__); - qla24xx_async_gnnft(vha, sp, sp->gen2); -} - /* Get WWPN list for certain fc4_type */ -int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) +int qla_fab_async_scan(scsi_qla_host_t *vha, srb_t *sp) { int rval = QLA_FUNCTION_FAILED; struct ct_sns_req *ct_req; struct ct_sns_pkt *ct_sns; - u32 rspsz; + u32 rspsz = 0; unsigned long flags; - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x200c, "%s enter\n", __func__); if (!vha->flags.online) @@ -3734,22 +3534,21 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) spin_lock_irqsave(&vha->work_lock, flags); if (vha->scan.scan_flags & SF_SCANNING) { spin_unlock_irqrestore(&vha->work_lock, flags); - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2012, "%s: scan active\n", __func__); return rval; } vha->scan.scan_flags |= SF_SCANNING; + if (!sp) + vha->scan.step = FAB_SCAN_START; + spin_unlock_irqrestore(&vha->work_lock, flags); - if (fc4_type == FC4_TYPE_FCP_SCSI) { - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + switch (vha->scan.step) { + case FAB_SCAN_START: + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2018, "%s: Performing FCP Scan\n", __func__); - if (sp) { - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); - } - /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) { @@ -3765,7 +3564,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { - ql_log(ql_log_warn, vha, 0xffff, + ql_log(ql_log_warn, vha, 0x201a, "Failed to allocate ct_sns request.\n"); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; @@ -3773,7 +3572,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) qla2x00_rel_sp(sp); return rval; } - sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; rspsz = sizeof(struct ct_sns_gpnft_rsp) + vha->hw->max_fibre_devices * @@ -3785,7 +3583,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz; if (!sp->u.iocb_cmd.u.ctarg.rsp) { - ql_log(ql_log_warn, vha, 0xffff, + ql_log(ql_log_warn, vha, 0x201b, "Failed to allocate ct_sns request.\n"); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; @@ -3805,35 +3603,95 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) "%s scan list size %d\n", __func__, vha->scan.size); memset(vha->scan.l, 0, vha->scan.size); - } else if (!sp) { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "NVME scan did not provide SP\n"); + + vha->scan.step = FAB_SCAN_GPNFT_FCP; + break; + case FAB_SCAN_GPNFT_FCP: + vha->scan.step = FAB_SCAN_GNNFT_FCP; + break; + case FAB_SCAN_GNNFT_FCP: + vha->scan.step = FAB_SCAN_GPNFT_NVME; + break; + case FAB_SCAN_GPNFT_NVME: + vha->scan.step = FAB_SCAN_GNNFT_NVME; + break; + case FAB_SCAN_GNNFT_NVME: + default: + /* should not be here */ + WARN_ON(1); + goto done_free_sp; + } + + if (!sp) { + ql_dbg(ql_dbg_disc, vha, 0x201c, + "scan did not provide SP\n"); return rval; } + if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) { + ql_log(ql_log_warn, vha, 0x201d, + "%s: req %p rsp %p are not setup\n", + __func__, sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.rsp); + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); + WARN_ON(1); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + goto done_free_sp; + } + + rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; + memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); + memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); + sp->type = SRB_CT_PTHRU_CMD; - sp->name = "gpnft"; sp->gen1 = vha->hw->base_qpair->chip_reset; - sp->gen2 = fc4_type; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, - qla2x00_async_gpnft_gnnft_sp_done); - - rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; - memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); - memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); + qla_async_scan_sp_done); ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; - /* CT_IU preamble */ - ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); - /* GPN_FT req */ - ct_req->req.gpn_ft.port_type = fc4_type; + /* CT_IU preamble */ + switch (vha->scan.step) { + case FAB_SCAN_GPNFT_FCP: + sp->name = "gpnft"; + ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); + ct_req->req.gpn_ft.port_type = FC4_TYPE_FCP_SCSI; + sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; + break; + case FAB_SCAN_GNNFT_FCP: + sp->name = "gnnft"; + ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, rspsz); + ct_req->req.gpn_ft.port_type = FC4_TYPE_FCP_SCSI; + sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; + break; + case FAB_SCAN_GPNFT_NVME: + sp->name = "gpnft"; + ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); + ct_req->req.gpn_ft.port_type = FC4_TYPE_NVME; + sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; + break; + case FAB_SCAN_GNNFT_NVME: + sp->name = "gnnft"; + ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, rspsz); + ct_req->req.gpn_ft.port_type = FC4_TYPE_NVME; + sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; + break; + default: + /* should not be here */ + WARN_ON(1); + goto done_free_sp; + } sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s hdl=%x FC4Type %x.\n", sp->name, - sp->handle, ct_req->req.gpn_ft.port_type); + ql_dbg(ql_dbg_disc, vha, 0x2003, + "%s: step %d, rsp size %d, req size %d hdl %x %s FC4TYPE %x \n", + __func__, vha->scan.step, sp->u.iocb_cmd.u.ctarg.rsp_size, + sp->u.iocb_cmd.u.ctarg.req_size, sp->handle, sp->name, + ct_req->req.gpn_ft.port_type); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { @@ -3864,7 +3722,7 @@ done_free_sp: spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; if (vha->scan.scan_flags == 0) { - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2007, "%s: Scan scheduled.\n", __func__); vha->scan.scan_flags |= SF_QUEUED; schedule_delayed_work(&vha->scan.scan_work, 5); @@ -3875,6 +3733,15 @@ done_free_sp: return rval; } +void qla_fab_scan_start(struct scsi_qla_host *vha) +{ + int rval; + + rval = qla_fab_async_scan(vha, NULL); + if (rval) + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); +} + void qla_scan_work_fn(struct work_struct *work) { struct fab_scan *s = container_of(to_delayed_work(work), diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 8377624d76c9..0c2dd782b675 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -67,7 +67,7 @@ void qla2x00_sp_free(srb_t *sp) { struct srb_iocb *iocb = &sp->u.iocb_cmd; - del_timer(&iocb->timer); + timer_delete(&iocb->timer); qla2x00_rel_sp(sp); } @@ -423,7 +423,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) sp->type = SRB_LOGOUT_CMD; sp->name = "logout"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, - qla2x00_async_logout_sp_done), + qla2x00_async_logout_sp_done); ql_dbg(ql_dbg_disc, vha, 0x2070, "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n", @@ -1842,10 +1842,18 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, return qla2x00_post_work(vha, e); } +static void qla_rscn_gen_tick(scsi_qla_host_t *vha, u32 *ret_rscn_gen) +{ + *ret_rscn_gen = atomic_inc_return(&vha->rscn_gen); + /* memory barrier */ + wmb(); +} + void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport; unsigned long flags; + u32 rscn_gen; switch (ea->id.b.rsvd_1) { case RSCN_PORT_ADDR: @@ -1875,15 +1883,16 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) * Otherwise we're already in the middle of a relogin */ fcport->scan_needed = 1; - fcport->rscn_gen++; + qla_rscn_gen_tick(vha, &fcport->rscn_gen); } } else { fcport->scan_needed = 1; - fcport->rscn_gen++; + qla_rscn_gen_tick(vha, &fcport->rscn_gen); } } break; case RSCN_AREA_ADDR: + qla_rscn_gen_tick(vha, &rscn_gen); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) @@ -1891,11 +1900,12 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) { fcport->scan_needed = 1; - fcport->rscn_gen++; + fcport->rscn_gen = rscn_gen; } } break; case RSCN_DOM_ADDR: + qla_rscn_gen_tick(vha, &rscn_gen); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) @@ -1903,19 +1913,20 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) { fcport->scan_needed = 1; - fcport->rscn_gen++; + fcport->rscn_gen = rscn_gen; } } break; case RSCN_FAB_ADDR: default: + qla_rscn_gen_tick(vha, &rscn_gen); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) continue; fcport->scan_needed = 1; - fcport->rscn_gen++; + fcport->rscn_gen = rscn_gen; } break; } @@ -1924,6 +1935,7 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) if (vha->scan.scan_flags == 0) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__); vha->scan.scan_flags |= SF_QUEUED; + vha->scan.rscn_gen_start = atomic_read(&vha->rscn_gen); schedule_delayed_work(&vha->scan.scan_work, 5); } spin_unlock_irqrestore(&vha->work_lock, flags); @@ -2669,7 +2681,7 @@ exit: return rval; } -static void qla_enable_fce_trace(scsi_qla_host_t *vha) +void qla_enable_fce_trace(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; @@ -3705,25 +3717,24 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) return rval; } -static void -qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) +int qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) { dma_addr_t tc_dma; void *tc; struct qla_hw_data *ha = vha->hw; if (!IS_FWI2_CAPABLE(ha)) - return; + return -EINVAL; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) - return; + return -EINVAL; if (ha->fce) { ql_dbg(ql_dbg_init, vha, 0x00bd, "%s: FCE Mem is already allocated.\n", __func__); - return; + return -EIO; } /* Allocate memory for Fibre Channel Event Buffer. */ @@ -3733,7 +3744,7 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) ql_log(ql_log_warn, vha, 0x00be, "Unable to allocate (%d KB) for FCE.\n", FCE_SIZE / 1024); - return; + return -ENOMEM; } ql_dbg(ql_dbg_init, vha, 0x00c0, @@ -3742,6 +3753,16 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) ha->fce_dma = tc_dma; ha->fce = tc; ha->fce_bufs = FCE_NUM_BUFFERS; + return 0; +} + +void qla2x00_free_fce_trace(struct qla_hw_data *ha) +{ + if (!ha->fce) + return; + dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, ha->fce_dma); + ha->fce = NULL; + ha->fce_dma = 0; } static void @@ -3832,9 +3853,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) if (ha->tgt.atio_ring) mq_size += ha->tgt.atio_q_length * sizeof(request_t); - qla2x00_alloc_fce_trace(vha); - if (ha->fce) + if (ha->fce) { fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; + ha->flags.fce_dump_buf_alloced = 1; + } qla2x00_alloc_eft_trace(vha); if (ha->eft) eft_size = EFT_SIZE; @@ -6393,10 +6415,9 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) qlt_do_generation_tick(vha, &discovery_gen); if (USE_ASYNC_SCAN(ha)) { - rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI, - NULL); - if (rval) - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + /* start of scan begins here */ + vha->scan.rscn_gen_end = atomic_read(&vha->rscn_gen); + qla_fab_scan_start(vha); } else { list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->scan_state = QLA_FCPORT_SCAN; @@ -8207,15 +8228,21 @@ qla28xx_get_aux_images( struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status; bool valid_pri_image = false, valid_sec_image = false; bool active_pri_image = false, active_sec_image = false; + int rc; if (!ha->flt_region_aux_img_status_pri) { ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n"); goto check_sec_image; } - qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, + rc = qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, ha->flt_region_aux_img_status_pri, sizeof(pri_aux_image_status) >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x01a1, + "Unable to read Primary aux image(%x).\n", rc); + goto check_sec_image; + } qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) { @@ -8246,9 +8273,15 @@ check_sec_image: goto check_valid_image; } - qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, + rc = qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, ha->flt_region_aux_img_status_sec, sizeof(sec_aux_image_status) >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x01a2, + "Unable to read Secondary aux image(%x).\n", rc); + goto check_valid_image; + } + qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) { @@ -8306,6 +8339,7 @@ qla27xx_get_active_image(struct scsi_qla_host *vha, struct qla27xx_image_status pri_image_status, sec_image_status; bool valid_pri_image = false, valid_sec_image = false; bool active_pri_image = false, active_sec_image = false; + int rc; if (!ha->flt_region_img_status_pri) { ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n"); @@ -8347,8 +8381,14 @@ check_sec_image: goto check_valid_image; } - qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), + rc = qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x01a3, + "Unable to read Secondary image status(%x).\n", rc); + goto check_valid_image; + } + qla27xx_print_image(vha, "Secondary image", &sec_image_status); if (qla27xx_check_image_status_signature(&sec_image_status)) { @@ -8420,11 +8460,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, "FW: Loading firmware from flash (%x).\n", faddr); dcode = (uint32_t *)req->ring; - qla24xx_read_flash_data(vha, dcode, faddr, 8); - if (qla24xx_risc_firmware_invalid(dcode)) { + rval = qla24xx_read_flash_data(vha, dcode, faddr, 8); + if (rval || qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_fatal, vha, 0x008c, - "Unable to verify the integrity of flash firmware " - "image.\n"); + "Unable to verify the integrity of flash firmware image (rval %x).\n", rval); ql_log(ql_log_fatal, vha, 0x008d, "Firmware data: %08x %08x %08x %08x.\n", dcode[0], dcode[1], dcode[2], dcode[3]); @@ -8438,7 +8477,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, for (j = 0; j < segments; j++) { ql_dbg(ql_dbg_init, vha, 0x008d, "-> Loading segment %u...\n", j); - qla24xx_read_flash_data(vha, dcode, faddr, 10); + rval = qla24xx_read_flash_data(vha, dcode, faddr, 10); + if (rval) { + ql_log(ql_log_fatal, vha, 0x016a, + "-> Unable to read segment addr + size .\n"); + return QLA_FUNCTION_FAILED; + } risc_addr = be32_to_cpu((__force __be32)dcode[2]); risc_size = be32_to_cpu((__force __be32)dcode[3]); if (!*srisc_addr) { @@ -8454,7 +8498,13 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, ql_dbg(ql_dbg_init, vha, 0x008e, "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n", fragment, risc_addr, faddr, dlen); - qla24xx_read_flash_data(vha, dcode, faddr, dlen); + rval = qla24xx_read_flash_data(vha, dcode, faddr, dlen); + if (rval) { + ql_log(ql_log_fatal, vha, 0x016b, + "-> Unable to read fragment(faddr %#x dlen %#lx).\n", + faddr, dlen); + return QLA_FUNCTION_FAILED; + } for (i = 0; i < dlen; i++) dcode[i] = swab32(dcode[i]); @@ -8483,7 +8533,14 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, fwdt->length = 0; dcode = (uint32_t *)req->ring; - qla24xx_read_flash_data(vha, dcode, faddr, 7); + + rval = qla24xx_read_flash_data(vha, dcode, faddr, 7); + if (rval) { + ql_log(ql_log_fatal, vha, 0x016c, + "-> Unable to read template size.\n"); + goto failed; + } + risc_size = be32_to_cpu((__force __be32)dcode[2]); ql_dbg(ql_dbg_init, vha, 0x0161, "-> fwdt%u template array at %#x (%#x dwords)\n", @@ -8509,11 +8566,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, } dcode = fwdt->template; - qla24xx_read_flash_data(vha, dcode, faddr, risc_size); + rval = qla24xx_read_flash_data(vha, dcode, faddr, risc_size); - if (!qla27xx_fwdt_template_valid(dcode)) { + if (rval || !qla27xx_fwdt_template_valid(dcode)) { ql_log(ql_log_warn, vha, 0x0165, - "-> fwdt%u failed template validate\n", j); + "-> fwdt%u failed template validate (rval %x)\n", + j, rval); goto failed; } diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index a4a56ab0ba74..ef4b3cc1cd77 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -631,3 +631,11 @@ static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha) } return 0; } + +static inline bool val_is_in_range(u32 val, u32 start, u32 end) +{ + if (val >= start && val <= end) + return true; + else + return false; +} diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 0b41e8a06602..3224044f1775 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2572,7 +2572,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) static void qla2x00_async_done(struct srb *sp, int res) { - if (del_timer(&sp->u.iocb_cmd.timer)) { + if (timer_delete(&sp->u.iocb_cmd.timer)) { /* * Successfully cancelled the timeout handler * ref: TMR @@ -2645,7 +2645,7 @@ static void qla2x00_els_dcmd_sp_free(srb_t *sp) elsio->u.els_logo.els_logo_pyld, elsio->u.els_logo.els_logo_pyld_dma); - del_timer(&elsio->timer); + timer_delete(&elsio->timer); qla2x00_rel_sp(sp); } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index d48007e18288..fe98c76e9be3 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3014,12 +3014,6 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, } } -struct scsi_dif_tuple { - __be16 guard; /* Checksum */ - __be16 app_tag; /* APPL identifier */ - __be32 ref_tag; /* Target LBA or indirect LBA */ -}; - /* * Checks the guard or meta-data for the type of error * detected by the HBA. In case of errors, we set the diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index b67416951a5f..8b71ac0b1d99 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -20,7 +20,7 @@ void qla2x00_vp_stop_timer(scsi_qla_host_t *vha) { if (vha->vp_idx && vha->timer_active) { - del_timer_sync(&vha->timer); + timer_delete_sync(&vha->timer); vha->timer_active = 0; } } @@ -180,7 +180,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); list_for_each_entry(fcport, &vha->vp_fcports, list) - fcport->logout_on_delete = 0; + fcport->logout_on_delete = 1; if (!vha->hw->flags.edif_enabled) qla2x00_wait_for_sess_deletion(vha); @@ -506,6 +506,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) return(NULL); } + vha->irq_offset = QLA_BASE_VECTORS; host = vha->host; fc_vport->dd_data = vha; /* New host info */ diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 083f94e43fba..82a7e21ddc83 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -1909,10 +1909,8 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) if (fx_type == FXDISC_GET_CONFIG_INFO) { struct config_info_data *pinfo = (struct config_info_data *) fdisc->u.fxiocb.rsp_addr; - strscpy(vha->hw->model_number, pinfo->model_num, - ARRAY_SIZE(vha->hw->model_number)); - strscpy(vha->hw->model_desc, pinfo->model_description, - ARRAY_SIZE(vha->hw->model_desc)); + memtostr(vha->hw->model_number, pinfo->model_num); + memtostr(vha->hw->model_desc, pinfo->model_description); memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name, sizeof(vha->hw->mr.symbolic_name)); memcpy(&vha->hw->mr.serial_num, pinfo->serial_num, diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h index 4f63aff333db..3a2bd953a976 100644 --- a/drivers/scsi/qla2xxx/qla_mr.h +++ b/drivers/scsi/qla2xxx/qla_mr.h @@ -282,8 +282,8 @@ struct register_host_info { #define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32) struct config_info_data { - uint8_t model_num[16]; - uint8_t model_description[80]; + uint8_t model_num[16] __nonstring; + uint8_t model_description[80] __nonstring; uint8_t reserved0[160]; uint8_t symbolic_name[64]; uint8_t serial_num[32]; diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index a8ddf356e662..8ee2e337c9e1 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -8,7 +8,6 @@ #include <linux/delay.h> #include <linux/nvme.h> #include <linux/nvme-fc.h> -#include <linux/blk-mq-pci.h> #include <linux/blk-mq.h> static struct nvme_fc_port_template qla_nvme_fc_transport; @@ -49,7 +48,10 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) return 0; } - if (!vha->nvme_local_port && qla_nvme_register_hba(vha)) + if (qla_nvme_register_hba(vha)) + return 0; + + if (!vha->nvme_local_port) return 0; if (!(fcport->nvme_prli_service_param & @@ -838,7 +840,7 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport, { struct scsi_qla_host *vha = lport->private; - blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset); + blk_mq_map_hw_queues(map, &vha->hw->pdev->dev, vha->irq_offset); } static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 6dfb70edb9a6..0cd3db8ed4ef 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1099,11 +1099,6 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) unsigned offset, n; struct qla_hw_data *ha = vha->hw; - struct crb_addr_pair { - long addr; - long data; - }; - /* Halt all the individual PEGs and other blocks of the ISP */ qla82xx_rom_lock(ha); @@ -1595,25 +1590,6 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha) return (u8 *)&ha->hablob->fw->data[offset]; } -/* PCI related functions */ -int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) -{ - unsigned long val = 0; - u32 control; - - switch (region) { - case 0: - val = 0; - break; - case 1: - pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control); - val = control + QLA82XX_MSIX_TBL_SPACE; - break; - } - return val; -} - - int qla82xx_iospace_config(struct qla_hw_data *ha) { @@ -2934,32 +2910,6 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) } } -/* -* qla82xx_wait_for_state_change -* Wait for device state to change from given current state -* -* Note: -* IDC lock must not be held upon entry -* -* Return: -* Changed device state. -*/ -uint32_t -qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state) -{ - struct qla_hw_data *ha = vha->hw; - uint32_t dev_state; - - do { - msleep(1000); - qla82xx_idc_lock(ha); - dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - qla82xx_idc_unlock(ha); - } while (dev_state == curr_state); - - return dev_state; -} - void qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1e2f52210f60..288ce04fc2b1 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -13,7 +13,6 @@ #include <linux/mutex.h> #include <linux/kobject.h> #include <linux/slab.h> -#include <linux/blk-mq-pci.h> #include <linux/refcount.h> #include <linux/crash_dump.h> #include <linux/trace_events.h> @@ -177,12 +176,6 @@ MODULE_PARM_DESC(ql2xenablehba_err_chk, " 1 -- Error isolation enabled only for DIX Type 0\n" " 2 -- Error isolation enabled for all Types\n"); -int ql2xiidmaenable = 1; -module_param(ql2xiidmaenable, int, S_IRUGO); -MODULE_PARM_DESC(ql2xiidmaenable, - "Enables iIDMA settings " - "Default is 1 - perform iIDMA. 0 - no iIDMA."); - int ql2xmqsupport = 1; module_param(ql2xmqsupport, int, S_IRUGO); MODULE_PARM_DESC(ql2xmqsupport, @@ -200,12 +193,6 @@ MODULE_PARM_DESC(ql2xfwloadbin, " 1 -- load firmware from flash.\n" " 0 -- use default semantics.\n"); -int ql2xetsenable; -module_param(ql2xetsenable, int, S_IRUGO); -MODULE_PARM_DESC(ql2xetsenable, - "Enables firmware ETS burst." - "Default is 0 - skip ETS enablement."); - int ql2xdbwr = 1; module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xdbwr, @@ -402,7 +389,7 @@ qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) static __inline__ void qla2x00_stop_timer(scsi_qla_host_t *vha) { - del_timer_sync(&vha->timer); + timer_delete_sync(&vha->timer); vha->timer_active = 0; } @@ -1875,14 +1862,9 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { - /* - * perform lockless completion during driver unload - */ if (qla2x00_chip_is_down(vha)) { req->outstanding_cmds[cnt] = NULL; - spin_unlock_irqrestore(qp->qp_lock_ptr, flags); sp->done(sp, res); - spin_lock_irqsave(qp->qp_lock_ptr, flags); continue; } @@ -1939,7 +1921,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) } static int -qla2xxx_slave_alloc(struct scsi_device *sdev) +qla2xxx_sdev_init(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); @@ -1952,20 +1934,17 @@ qla2xxx_slave_alloc(struct scsi_device *sdev) } static int -qla2xxx_slave_configure(struct scsi_device *sdev) +qla2xxx_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { scsi_qla_host_t *vha = shost_priv(sdev->host); struct req_que *req = vha->req; - if (IS_T10_PI_CAPABLE(vha->hw)) - blk_queue_update_dma_alignment(sdev->request_queue, 0x7); - scsi_change_queue_depth(sdev, req->max_q_depth); return 0; } static void -qla2xxx_slave_destroy(struct scsi_device *sdev) +qla2xxx_sdev_destroy(struct scsi_device *sdev) { sdev->hostdata = NULL; } @@ -3509,11 +3488,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); - ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); + ha->dpc_lp_wq = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, wq_name); INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); - ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); + ha->dpc_hp_wq = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, wq_name); INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); INIT_WORK(&ha->idc_state_handler, qla83xx_idc_state_handler_work); @@ -3575,6 +3556,9 @@ skip_dpc: QLA_SG_ALL : 128; } + if (IS_T10_PI_CAPABLE(base_vha->hw)) + host->dma_alignment = 0x7; + ret = scsi_add_host(host, &pdev->dev); if (ret) goto probe_failed; @@ -4689,7 +4673,7 @@ static void qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) { u32 temp; - struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb; + struct init_cb_81xx *icb = (struct init_cb_81xx *)vha->hw->init_cb; *ret_cnt = FW_DEF_EXCHANGES_CNT; if (max_cnt > vha->hw->max_exchg) @@ -5563,15 +5547,11 @@ qla2x00_do_work(struct scsi_qla_host *vha) qla2x00_async_prlo_done(vha, e->u.logio.fcport, e->u.logio.data); break; - case QLA_EVT_GPNFT: - qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type, - e->u.gpnft.sp); - break; - case QLA_EVT_GPNFT_DONE: - qla24xx_async_gpnft_done(vha, e->u.iosb.sp); + case QLA_EVT_SCAN_CMD: + qla_fab_async_scan(vha, e->u.iosb.sp); break; - case QLA_EVT_GNNFT_DONE: - qla24xx_async_gnnft_done(vha, e->u.iosb.sp); + case QLA_EVT_SCAN_FINISH: + qla_fab_scan_finish(vha, e->u.iosb.sp); break; case QLA_EVT_GFPNID: qla24xx_async_gfpnid(vha, e->u.fcport.fcport); @@ -6909,12 +6889,15 @@ qla2x00_do_dpc(void *data) set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop()) { + while (1) { ql_dbg(ql_dbg_dpc, base_vha, 0x4000, "DPC handler sleeping.\n"); schedule(); + if (kthread_should_stop()) + break; + if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags)) qla_pci_set_eeh_busy(base_vha); @@ -6927,15 +6910,16 @@ qla2x00_do_dpc(void *data) goto end_loop; } + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + /* don't do any work. Wait to be terminated by kthread_stop */ + goto end_loop; + ha->dpc_active = 1; ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, "DPC handler waking up, dpc_flags=0x%lx.\n", base_vha->dpc_flags); - if (test_bit(UNLOADING, &base_vha->dpc_flags)) - break; - if (IS_P3P_TYPE(ha)) { if (IS_QLA8044(ha)) { if (test_and_clear_bit(ISP_UNRECOVERABLE, @@ -7248,9 +7232,6 @@ end_loop: */ ha->dpc_active = 0; - /* Cleanup any residual CTX SRBs. */ - qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); - return 0; } @@ -8077,7 +8058,8 @@ static void qla2xxx_map_queues(struct Scsi_Host *shost) if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) blk_mq_map_queues(qmap); else - blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); + blk_mq_map_hw_queues(qmap, &vha->hw->pdev->dev, + vha->irq_offset); } struct scsi_host_template qla2xxx_driver_template = { @@ -8093,10 +8075,10 @@ struct scsi_host_template qla2xxx_driver_template = { .eh_bus_reset_handler = qla2xxx_eh_bus_reset, .eh_host_reset_handler = qla2xxx_eh_host_reset, - .slave_configure = qla2xxx_slave_configure, + .sdev_configure = qla2xxx_sdev_configure, - .slave_alloc = qla2xxx_slave_alloc, - .slave_destroy = qla2xxx_slave_destroy, + .sdev_init = qla2xxx_sdev_init, + .sdev_destroy = qla2xxx_sdev_destroy, .scan_finished = qla2xxx_scan_finished, .scan_start = qla2xxx_scan_start, .change_queue_depth = scsi_change_queue_depth, @@ -8122,7 +8104,7 @@ static const struct pci_error_handlers qla2xxx_err_handler = { .reset_done = qla_pci_reset_done, }; -static struct pci_device_id qla2xxx_pci_tbl[] = { +static const struct pci_device_id qla2xxx_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, @@ -8156,9 +8138,6 @@ MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); static struct pci_driver qla2xxx_pci_driver = { .name = QLA2XXX_DRIVER_NAME, - .driver = { - .owner = THIS_MODULE, - }, .id_table = qla2xxx_pci_tbl, .probe = qla2x00_probe_one, .remove = qla2x00_remove_one, diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index c092a6b1ced4..9e7a407ba1b9 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -555,6 +555,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) struct qla_flt_location *fltl = (void *)req->ring; uint32_t *dcode = (uint32_t *)req->ring; uint8_t *buf = (void *)req->ring, *bcode, last_image; + int rc; /* * FLT-location structure resides after the last PCI region. @@ -584,14 +585,24 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) pcihdr = 0; do { /* Verify PCI expansion ROM header. */ - qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); + rc = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); + if (rc) { + ql_log(ql_log_info, vha, 0x016d, + "Unable to read PCI Expansion Rom Header (%x).\n", rc); + return QLA_FUNCTION_FAILED; + } bcode = buf + (pcihdr % 4); if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) goto end; /* Locate PCI data structure. */ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); - qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); + rc = qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); + if (rc) { + ql_log(ql_log_info, vha, 0x0179, + "Unable to read PCI Data Structure (%x).\n", rc); + return QLA_FUNCTION_FAILED; + } bcode = buf + (pcihdr % 4); /* Validate signature of PCI data structure. */ @@ -606,7 +617,12 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) } while (!last_image); /* Now verify FLT-location structure. */ - qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2); + rc = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x017a, + "Unable to read FLT (%x).\n", rc); + return QLA_FUNCTION_FAILED; + } if (memcmp(fltl->sig, "QFLT", 4)) goto end; @@ -2120,8 +2136,8 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data) * @flash_id: Flash ID * * This function polls the device until bit 7 of what is read matches data - * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed - * out (a fatal error). The flash book recommeds reading bit 7 again after + * bit 7 or until data bit 5 becomes a 1. If that happens, the flash ROM timed + * out (a fatal error). The flash book recommends reading bit 7 again after * reading bit 5 as a 1. * * Returns 0 on success, else non-zero. @@ -2605,13 +2621,18 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { struct qla_hw_data *ha = vha->hw; + int rc; /* Suspend HBA. */ scsi_block_requests(vha->host); set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); /* Go with read. */ - qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2); + rc = qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x01a0, + "Unable to perform optrom read(%x).\n", rc); + } /* Resume HBA. */ clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); @@ -3412,7 +3433,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) struct active_regions active_regions = { }; if (IS_P3P_TYPE(ha)) - return ret; + return QLA_SUCCESS; if (!mbuf) return QLA_FUNCTION_FAILED; @@ -3432,20 +3453,31 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) do { /* Verify PCI expansion ROM header. */ - qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); + ret = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); + if (ret) { + ql_log(ql_log_info, vha, 0x017d, + "Unable to read PCI EXP Rom Header(%x).\n", ret); + return QLA_FUNCTION_FAILED; + } + bcode = mbuf + (pcihdr % 4); if (memcmp(bcode, "\x55\xaa", 2)) { /* No signature */ ql_log(ql_log_fatal, vha, 0x0059, "No matching ROM signature.\n"); - ret = QLA_FUNCTION_FAILED; - break; + return QLA_FUNCTION_FAILED; } /* Locate PCI data structure. */ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); - qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); + ret = qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); + if (ret) { + ql_log(ql_log_info, vha, 0x018e, + "Unable to read PCI Data Structure (%x).\n", ret); + return QLA_FUNCTION_FAILED; + } + bcode = mbuf + (pcihdr % 4); /* Validate signature of PCI data structure. */ @@ -3454,8 +3486,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) ql_log(ql_log_fatal, vha, 0x005a, "PCI data struct not found pcir_adr=%x.\n", pcids); ql_dump_buffer(ql_dbg_init, vha, 0x0059, dcode, 32); - ret = QLA_FUNCTION_FAILED; - break; + return QLA_FUNCTION_FAILED; } /* Read version */ @@ -3507,20 +3538,26 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) faddr = ha->flt_region_fw_sec; } - qla24xx_read_flash_data(vha, dcode, faddr, 8); - if (qla24xx_risc_firmware_invalid(dcode)) { - ql_log(ql_log_warn, vha, 0x005f, - "Unrecognized fw revision at %x.\n", - ha->flt_region_fw * 4); - ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32); + ret = qla24xx_read_flash_data(vha, dcode, faddr, 8); + if (ret) { + ql_log(ql_log_info, vha, 0x019e, + "Unable to read FW version (%x).\n", ret); + return ret; } else { - for (i = 0; i < 4; i++) - ha->fw_revision[i] = + if (qla24xx_risc_firmware_invalid(dcode)) { + ql_log(ql_log_warn, vha, 0x005f, + "Unrecognized fw revision at %x.\n", + ha->flt_region_fw * 4); + ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32); + } else { + for (i = 0; i < 4; i++) + ha->fw_revision[i] = be32_to_cpu((__force __be32)dcode[4+i]); - ql_dbg(ql_dbg_init, vha, 0x0060, - "Firmware revision (flash) %u.%u.%u (%x).\n", - ha->fw_revision[0], ha->fw_revision[1], - ha->fw_revision[2], ha->fw_revision[3]); + ql_dbg(ql_dbg_init, vha, 0x0060, + "Firmware revision (flash) %u.%u.%u (%x).\n", + ha->fw_revision[0], ha->fw_revision[1], + ha->fw_revision[2], ha->fw_revision[3]); + } } /* Check for golden firmware and get version if available */ @@ -3531,18 +3568,23 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version)); faddr = ha->flt_region_gold_fw; - qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8); - if (qla24xx_risc_firmware_invalid(dcode)) { - ql_log(ql_log_warn, vha, 0x0056, - "Unrecognized golden fw at %#x.\n", faddr); - ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32); + ret = qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8); + if (ret) { + ql_log(ql_log_info, vha, 0x019f, + "Unable to read Gold FW version (%x).\n", ret); return ret; - } - - for (i = 0; i < 4; i++) - ha->gold_fw_version[i] = - be32_to_cpu((__force __be32)dcode[4+i]); + } else { + if (qla24xx_risc_firmware_invalid(dcode)) { + ql_log(ql_log_warn, vha, 0x0056, + "Unrecognized golden fw at %#x.\n", faddr); + ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32); + return QLA_FUNCTION_FAILED; + } + for (i = 0; i < 4; i++) + ha->gold_fw_version[i] = + be32_to_cpu((__force __be32)dcode[4+i]); + } return ret; } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index d7551b1443e4..1e81582085e3 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -23,7 +23,7 @@ #include <linux/delay.h> #include <linux/list.h> #include <linux/workqueue.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> @@ -1454,50 +1454,6 @@ static struct fc_port *qlt_create_sess( return sess; } -/* - * max_gen - specifies maximum session generation - * at which this deletion requestion is still valid - */ -void -qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) -{ - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct fc_port *sess = fcport; - unsigned long flags; - - if (!vha->hw->tgt.tgt_ops) - return; - - if (!tgt) - return; - - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); - if (tgt->tgt_stop) { - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - return; - } - if (!sess->se_sess) { - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - return; - } - - if (max_gen - sess->generation < 0) { - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, - "Ignoring stale deletion request for se_sess %p / sess %p" - " for port %8phC, req_gen %d, sess_gen %d\n", - sess->se_sess, sess, sess->port_name, max_gen, - sess->generation); - return; - } - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); - - sess->local = 1; - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - qlt_schedule_sess_for_deletion(sess); -} - static inline int test_tgt_sess_count(struct qla_tgt *tgt) { struct qla_hw_data *ha = tgt->ha; @@ -5539,81 +5495,6 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } -int -qlt_free_qfull_cmds(struct qla_qpair *qpair) -{ - struct scsi_qla_host *vha = qpair->vha; - struct qla_hw_data *ha = vha->hw; - unsigned long flags; - struct qla_tgt_cmd *cmd, *tcmd; - struct list_head free_list, q_full_list; - int rc = 0; - - if (list_empty(&ha->tgt.q_full_list)) - return 0; - - INIT_LIST_HEAD(&free_list); - INIT_LIST_HEAD(&q_full_list); - - spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); - if (list_empty(&ha->tgt.q_full_list)) { - spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); - return 0; - } - - list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list); - spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); - - spin_lock_irqsave(qpair->qp_lock_ptr, flags); - list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) { - if (cmd->q_full) - /* cmd->state is a borrowed field to hold status */ - rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state); - else if (cmd->term_exchg) - rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio); - - if (rc == -ENOMEM) - break; - - if (cmd->q_full) - ql_dbg(ql_dbg_io, vha, 0x3006, - "%s: busy sent for ox_id[%04x]\n", __func__, - be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); - else if (cmd->term_exchg) - ql_dbg(ql_dbg_io, vha, 0x3007, - "%s: Term exchg sent for ox_id[%04x]\n", __func__, - be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); - else - ql_dbg(ql_dbg_io, vha, 0x3008, - "%s: Unexpected cmd in QFull list %p\n", __func__, - cmd); - - list_move_tail(&cmd->cmd_list, &free_list); - - /* piggy back on hardware_lock for protection */ - vha->hw->tgt.num_qfull_cmds_alloc--; - } - spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - - cmd = NULL; - - list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { - list_del(&cmd->cmd_list); - /* This cmd was never sent to TCM. There is no need - * to schedule free or call free_cmd - */ - qlt_free_cmd(cmd); - } - - if (!list_empty(&q_full_list)) { - spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); - list_splice(&q_full_list, &vha->hw->tgt.q_full_list); - spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); - } - - return rc; -} - static void qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, uint16_t status) @@ -7091,16 +6972,6 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, } void -qlt_83xx_iospace_config(struct qla_hw_data *ha) -{ - if (!QLA_TGT_MODE_ENABLED()) - return; - - ha->msix_count += 1; /* For ATIO Q */ -} - - -void qlt_modify_vp_config(struct scsi_qla_host *vha, struct vp_config_entry_24xx *vpmod) { diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 354fca2e7feb..15a59c125c53 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -1014,7 +1014,6 @@ extern int qlt_lport_register(void *, u64, u64, u64, extern void qlt_lport_deregister(struct scsi_qla_host *); extern void qlt_unreg_sess(struct fc_port *); extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); -extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); extern int __init qlt_init(void); extern void qlt_exit(void); extern void qlt_free_session_done(struct work_struct *); @@ -1082,8 +1081,6 @@ extern void qlt_mem_free(struct qla_hw_data *); extern int qlt_stop_phase1(struct qla_tgt *); extern void qlt_stop_phase2(struct qla_tgt *); extern irqreturn_t qla83xx_msix_atio_q(int, void *); -extern void qlt_83xx_iospace_config(struct qla_hw_data *); -extern int qlt_free_qfull_cmds(struct qla_qpair *); extern void qlt_logo_completion_handler(fc_port_t *, int); extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 7627fd807bc3..a491d6ee5c94 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -6,9 +6,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "10.02.09.200-k" +#define QLA2XXX_VERSION "10.02.09.400-k" #define QLA_DRIVER_MAJOR_VER 10 #define QLA_DRIVER_MINOR_VER 2 #define QLA_DRIVER_PATCH_VER 9 -#define QLA_DRIVER_BETA_VER 200 +#define QLA_DRIVER_BETA_VER 400 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 7e7460a747a4..ceaf1c7b1d17 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -24,7 +24,7 @@ #include <linux/string.h> #include <linux/configfs.h> #include <linux/ctype.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi_host.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c index abfa6ef60480..e3f85d6ea0db 100644 --- a/drivers/scsi/qla4xxx/ql4_attr.c +++ b/drivers/scsi/qla4xxx/ql4_attr.c @@ -10,7 +10,7 @@ static ssize_t qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj, - struct bin_attribute *ba, char *buf, loff_t off, + const struct bin_attribute *ba, char *buf, loff_t off, size_t count) { struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, @@ -28,7 +28,7 @@ qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj, static ssize_t qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj, - struct bin_attribute *ba, char *buf, loff_t off, + const struct bin_attribute *ba, char *buf, loff_t off, size_t count) { struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, @@ -104,19 +104,19 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj, return count; } -static struct bin_attribute sysfs_fw_dump_attr = { +static const struct bin_attribute sysfs_fw_dump_attr = { .attr = { .name = "fw_dump", .mode = S_IRUSR | S_IWUSR, }, .size = 0, - .read = qla4_8xxx_sysfs_read_fw_dump, - .write = qla4_8xxx_sysfs_write_fw_dump, + .read_new = qla4_8xxx_sysfs_read_fw_dump, + .write_new = qla4_8xxx_sysfs_write_fw_dump, }; static struct sysfs_entry { char *name; - struct bin_attribute *attr; + const struct bin_attribute *attr; } bin_file_entries[] = { { "fw_dump", &sysfs_fw_dump_attr }, { NULL }, diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 249f1d7021d4..75125d2021f5 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -1641,6 +1641,7 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, struct ql4_chap_table *chap_table; uint32_t chap_size = 0; dma_addr_t chap_dma; + ssize_t secret_len; chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); if (chap_table == NULL) { @@ -1652,9 +1653,13 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, chap_table->flags |= BIT_6; /* peer */ else chap_table->flags |= BIT_7; /* local */ - chap_table->secret_len = strlen(password); - strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1); - strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1); + + secret_len = strscpy(chap_table->secret, password, + sizeof(chap_table->secret)); + if (secret_len < MIN_CHAP_SECRET_LEN) + goto cleanup_chap_table; + chap_table->secret_len = (uint8_t)secret_len; + strscpy(chap_table->name, username, sizeof(chap_table->name)); chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE); if (is_qla40XX(ha)) { @@ -1679,6 +1684,8 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, memcpy((struct ql4_chap_table *)ha->chap_list + idx, chap_table, sizeof(struct ql4_chap_table)); } + +cleanup_chap_table: dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); if (rval != QLA_SUCCESS) ret = -EINVAL; @@ -2281,8 +2288,8 @@ int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param) mbox_cmd[0] = MBOX_CMD_SET_PARAM; if (param == SET_DRVR_VERSION) { mbox_cmd[1] = SET_DRVR_VERSION; - strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION, - MAX_DRVR_VER_LEN - 1); + strscpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION, + MAX_DRVR_VER_LEN); } else { ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n", __func__, param); diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 47adff9f0506..da2fc66ffedd 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -973,11 +973,6 @@ qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose) unsigned long off; unsigned offset, n; - struct crb_addr_pair { - long addr; - long data; - }; - /* Halt all the indiviual PEGs and other blocks of the ISP */ qla4_82xx_rom_lock(ha); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 675332e49a7b..d540d66e6ffc 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -160,7 +160,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); -static int qla4xxx_slave_alloc(struct scsi_device *device); +static int qla4xxx_sdev_init(struct scsi_device *device); static umode_t qla4_attr_is_visible(int param_type, int param); static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); @@ -234,7 +234,7 @@ static struct scsi_host_template qla4xxx_driver_template = { .eh_host_reset_handler = qla4xxx_eh_host_reset, .eh_timed_out = qla4xxx_eh_cmd_timed_out, - .slave_alloc = qla4xxx_slave_alloc, + .sdev_init = qla4xxx_sdev_init, .change_queue_depth = scsi_change_queue_depth, .this_id = -1, @@ -799,10 +799,10 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, chap_rec->chap_tbl_idx = i; strscpy(chap_rec->username, chap_table->name, - ISCSI_CHAP_AUTH_NAME_MAX_LEN); - strscpy(chap_rec->password, chap_table->secret, - QL4_CHAP_MAX_SECRET_LEN); - chap_rec->password_length = chap_table->secret_len; + sizeof(chap_rec->username)); + chap_rec->password_length = strscpy(chap_rec->password, + chap_table->secret, + sizeof(chap_rec->password)); if (chap_table->flags & BIT_7) /* local */ chap_rec->chap_type = CHAP_TYPE_OUT; @@ -4021,7 +4021,7 @@ static void qla4xxx_start_timer(struct scsi_qla_host *ha, static void qla4xxx_stop_timer(struct scsi_qla_host *ha) { - del_timer_sync(&ha->timer); + timer_delete_sync(&ha->timer); ha->timer_active = 0; } @@ -6291,8 +6291,8 @@ static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, tddb->tpgt = sess->tpgt; tddb->port = conn->persistent_port; - strscpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); - strscpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); + strscpy(tddb->iscsi_name, sess->targetname, sizeof(tddb->iscsi_name)); + strscpy(tddb->ip_addr, conn->persistent_address, sizeof(tddb->ip_addr)); } static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, @@ -7189,7 +7189,8 @@ exit_new_nt_list: * 1: if flashnode entry is non-persistent * 0: if flashnode entry is persistent **/ -static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) +static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, + const void *data) { struct iscsi_bus_flash_session *fnode_sess; @@ -7792,7 +7793,7 @@ static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, } strscpy(flash_tddb->iscsi_name, fnode_sess->targetname, - ISCSI_NAME_SIZE); + sizeof(flash_tddb->iscsi_name)); if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress); @@ -8806,7 +8807,7 @@ skip_retry_init: DEBUG2(printk("scsi: %s: Starting kernel thread for " "qla4xxx_dpc\n", __func__)); sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); - ha->dpc_thread = create_singlethread_workqueue(buf); + ha->dpc_thread = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, buf); if (!ha->dpc_thread) { ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); ret = -ENODEV; @@ -9052,7 +9053,7 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) } } -static int qla4xxx_slave_alloc(struct scsi_device *sdev) +static int qla4xxx_sdev_init(struct scsi_device *sdev) { struct iscsi_cls_session *cls_sess; struct iscsi_session *sess; @@ -9846,7 +9847,7 @@ static const struct pci_error_handlers qla4xxx_err_handler = { .resume = qla4xxx_pci_resume, }; -static struct pci_device_id qla4xxx_pci_tbl[] = { +static const struct pci_device_id qla4xxx_pci_tbl[] = { { .vendor = PCI_VENDOR_ID_QLOGIC, .device = PCI_DEVICE_ID_QLOGIC_ISP4010, diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 6177f4798f3a..c9984ef57f26 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -975,7 +975,8 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int host->sg_tablesize = QLOGICPTI_MAX_SG(num_free); } -static int qlogicpti_slave_configure(struct scsi_device *sdev) +static int qlogicpti_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct qlogicpti *qpti = shost_priv(sdev->host); int tgt = sdev->id; @@ -1292,7 +1293,7 @@ static const struct scsi_host_template qpti_template = { .name = "qlogicpti", .info = qlogicpti_info, .queuecommand = qlogicpti_queuecommand, - .slave_configure = qlogicpti_slave_configure, + .sdev_configure = qlogicpti_sdev_configure, .eh_abort_handler = qlogicpti_abort, .eh_host_reset_handler = qlogicpti_reset, .can_queue = QLOGICPTI_REQ_QUEUE_LEN, @@ -1463,7 +1464,7 @@ static struct platform_driver qpti_sbus_driver = { .of_match_table = qpti_match, }, .probe = qpti_sbus_probe, - .remove_new = qpti_sbus_remove, + .remove = qpti_sbus_remove, }; module_platform_driver(qpti_sbus_driver); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3e0c0381277a..518a252eb6aa 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -55,7 +55,7 @@ #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/mutex.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -350,6 +350,13 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page) if (result < SCSI_VPD_HEADER_SIZE) return 0; + if (result > sizeof(vpd)) { + dev_warn_once(&sdev->sdev_gendev, + "%s: long VPD page 0 length: %d bytes\n", + __func__, result); + result = sizeof(vpd); + } + result -= SCSI_VPD_HEADER_SIZE; if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result)) return 0; @@ -503,22 +510,34 @@ void scsi_attach_vpd(struct scsi_device *sdev) return; for (i = 4; i < vpd_buf->len; i++) { - if (vpd_buf->data[i] == 0x0) + switch (vpd_buf->data[i]) { + case 0x0: scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0); - if (vpd_buf->data[i] == 0x80) + break; + case 0x80: scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80); - if (vpd_buf->data[i] == 0x83) + break; + case 0x83: scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83); - if (vpd_buf->data[i] == 0x89) + break; + case 0x89: scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89); - if (vpd_buf->data[i] == 0xb0) + break; + case 0xb0: scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0); - if (vpd_buf->data[i] == 0xb1) + break; + case 0xb1: scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1); - if (vpd_buf->data[i] == 0xb2) + break; + case 0xb2: scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2); - if (vpd_buf->data[i] == 0xb7) + break; + case 0xb7: scsi_update_vpd_page(sdev, 0xb7, &sdev->vpd_pgb7); + break; + default: + break; + } } kfree(vpd_buf); } @@ -666,6 +685,13 @@ void scsi_cdl_check(struct scsi_device *sdev) sdev->use_10_for_rw = 0; sdev->cdl_supported = 1; + + /* + * If the device supports CDL, make sure that the current drive + * feature status is consistent with the user controlled + * cdl_enable state. + */ + scsi_cdl_enable(sdev, sdev->cdl_enable); } else { sdev->cdl_supported = 0; } @@ -681,26 +707,23 @@ void scsi_cdl_check(struct scsi_device *sdev) */ int scsi_cdl_enable(struct scsi_device *sdev, bool enable) { - struct scsi_mode_data data; - struct scsi_sense_hdr sshdr; - struct scsi_vpd *vpd; - bool is_ata = false; char buf[64]; + bool is_ata; int ret; if (!sdev->cdl_supported) return -EOPNOTSUPP; rcu_read_lock(); - vpd = rcu_dereference(sdev->vpd_pg89); - if (vpd) - is_ata = true; + is_ata = rcu_dereference(sdev->vpd_pg89); rcu_read_unlock(); /* * For ATA devices, CDL needs to be enabled with a SET FEATURES command. */ if (is_ata) { + struct scsi_mode_data data; + struct scsi_sense_hdr sshdr; char *buf_data; int len; @@ -709,16 +732,30 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable) if (ret) return -EINVAL; - /* Enable CDL using the ATA feature page */ + /* Enable or disable CDL using the ATA feature page */ len = min_t(size_t, sizeof(buf), data.length - data.header_length - data.block_descriptor_length); buf_data = buf + data.header_length + data.block_descriptor_length; - if (enable) - buf_data[4] = 0x02; - else - buf_data[4] = 0; + + /* + * If we want to enable CDL and CDL is already enabled on the + * device, do nothing. This avoids needlessly resetting the CDL + * statistics on the device as that is implied by the CDL enable + * action. Similar to this, there is no need to do anything if + * we want to disable CDL and CDL is already disabled. + */ + if (enable) { + if ((buf_data[4] & 0x03) == 0x02) + goto out; + buf_data[4] &= ~0x03; + buf_data[4] |= 0x02; + } else { + if ((buf_data[4] & 0x03) == 0x00) + goto out; + buf_data[4] &= ~0x03; + } ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3, &data, &sshdr); @@ -730,6 +767,7 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable) } } +out: sdev->cdl_enable = enable; return 0; diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c index 9c14fdf61037..e1a2a62b6910 100644 --- a/drivers/scsi/scsi_common.c +++ b/drivers/scsi/scsi_common.c @@ -9,9 +9,10 @@ #include <linux/errno.h> #include <linux/module.h> #include <uapi/linux/pr.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi_common.h> +MODULE_DESCRIPTION("SCSI functions used by both the initiator and the target code"); MODULE_LICENSE("GPL v2"); /* Command group 3 is reserved and should never be used. */ diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index acf0592d63da..aef33d1e346a 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -47,7 +47,7 @@ #include <net/checksum.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -69,6 +69,12 @@ static const char *sdebug_version_date = "20210520"; /* Additional Sense Code (ASC) */ #define NO_ADDITIONAL_SENSE 0x0 +#define OVERLAP_ATOMIC_COMMAND_ASC 0x0 +#define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23 +#define FILEMARK_DETECTED_ASCQ 0x1 +#define EOP_EOM_DETECTED_ASCQ 0x2 +#define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4 +#define EOD_DETECTED_ASCQ 0x5 #define LOGICAL_UNIT_NOT_READY 0x4 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8 #define UNRECOVERED_READ_ERR 0x11 @@ -78,8 +84,10 @@ static const char *sdebug_version_date = "20210520"; #define INVALID_FIELD_IN_CDB 0x24 #define INVALID_FIELD_IN_PARAM_LIST 0x26 #define WRITE_PROTECTED 0x27 +#define UA_READY_ASC 0x28 #define UA_RESET_ASC 0x29 #define UA_CHANGED_ASC 0x2a +#define TOO_MANY_IN_PARTITION_ASC 0x3b #define TARGET_CHANGED_ASC 0x3f #define LUNS_CHANGED_ASCQ 0x0e #define INSUFF_RES_ASC 0x55 @@ -103,6 +111,7 @@ static const char *sdebug_version_date = "20210520"; #define READ_BOUNDARY_ASCQ 0x7 #define ATTEMPT_ACCESS_GAP 0x9 #define INSUFF_ZONE_ASCQ 0xe +/* see drivers/scsi/sense_codes.h */ /* Additional Sense Code Qualifier (ASCQ) */ #define ACK_NAK_TO 0x3 @@ -152,6 +161,12 @@ static const char *sdebug_version_date = "20210520"; #define DEF_VIRTUAL_GB 0 #define DEF_VPD_USE_HOSTNO 1 #define DEF_WRITESAME_LENGTH 0xFFFF +#define DEF_ATOMIC_WR 0 +#define DEF_ATOMIC_WR_MAX_LENGTH 128 +#define DEF_ATOMIC_WR_ALIGN 2 +#define DEF_ATOMIC_WR_GRAN 2 +#define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH) +#define DEF_ATOMIC_WR_MAX_BNDRY 128 #define DEF_STRICT 0 #define DEF_STATISTICS false #define DEF_SUBMIT_QUEUES 1 @@ -164,6 +179,37 @@ static const char *sdebug_version_date = "20210520"; #define DEF_ZBC_MAX_OPEN_ZONES 8 #define DEF_ZBC_NR_CONV_ZONES 1 +/* Default parameters for tape drives */ +#define TAPE_DEF_DENSITY 0x0 +#define TAPE_BAD_DENSITY 0x65 +#define TAPE_DEF_BLKSIZE 0 +#define TAPE_MIN_BLKSIZE 512 +#define TAPE_MAX_BLKSIZE 1048576 +#define TAPE_EW 20 +#define TAPE_MAX_PARTITIONS 2 +#define TAPE_UNITS 10000 +#define TAPE_PARTITION_1_UNITS 1000 + +/* The tape block data definitions */ +#define TAPE_BLOCK_FM_FLAG ((u32)0x1 << 30) +#define TAPE_BLOCK_EOD_FLAG ((u32)0x2 << 30) +#define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30) +#define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK) +#define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK) +#define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK) +#define IS_TAPE_BLOCK_FM(a) ((a & TAPE_BLOCK_FM_FLAG) != 0) +#define IS_TAPE_BLOCK_EOD(a) ((a & TAPE_BLOCK_EOD_FLAG) != 0) + +struct tape_block { + u32 fl_size; + unsigned char data[4]; +}; + +/* Flags for sense data */ +#define SENSE_FLAG_FILEMARK 0x80 +#define SENSE_FLAG_EOM 0x40 +#define SENSE_FLAG_ILI 0x20 + #define SDEBUG_LUN_0_VAL 0 /* bit mask values for sdebug_opts */ @@ -207,7 +253,8 @@ static const char *sdebug_version_date = "20210520"; #define SDEBUG_UA_LUNS_CHANGED 5 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */ #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7 -#define SDEBUG_NUM_UAS 8 +#define SDEBUG_UA_NOT_READY_TO_READY 8 +#define SDEBUG_NUM_UAS 9 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this * sector on read commands: */ @@ -247,17 +294,20 @@ static const char *sdebug_version_date = "20210520"; #define FF_SA (F_SA_HIGH | F_SA_LOW) #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY) +/* Device selection bit mask */ +#define DS_ALL 0xffffffff +#define DS_SBC (1 << TYPE_DISK) +#define DS_SSC (1 << TYPE_TAPE) +#define DS_ZBC (1 << TYPE_ZBC) + +#define DS_NO_SSC (DS_ALL & ~DS_SSC) + #define SDEBUG_MAX_PARTS 4 #define SDEBUG_MAX_CMD_LEN 32 #define SDEB_XA_NOT_IN_USE XA_MARK_1 -static struct kmem_cache *queued_cmd_cache; - -#define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble) -#define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; } - /* Zone types (zbcr05 table 25) */ enum sdebug_z_type { ZBC_ZTYPE_CNV = 0x1, @@ -354,6 +404,19 @@ struct sdebug_dev_info { ktime_t create_ts; /* time since bootup that this device was created */ struct sdeb_zone_state *zstate; + /* For tapes */ + unsigned int tape_blksize; + unsigned int tape_density; + unsigned char tape_partition; + unsigned char tape_nbr_partitions; + unsigned char tape_pending_nbr_partitions; + unsigned int tape_pending_part_0_size; + unsigned int tape_pending_part_1_size; + unsigned char tape_dce; + unsigned int tape_location[TAPE_MAX_PARTITIONS]; + unsigned int tape_eop[TAPE_MAX_PARTITIONS]; + struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS]; + struct dentry *debugfs_entry; struct spinlock list_lock; struct list_head inject_err_list; @@ -374,7 +437,9 @@ struct sdebug_host_info { /* There is an xarray of pointers to this struct's objects, one per host */ struct sdeb_store_info { - rwlock_t macc_lck; /* for atomic media access on this store */ + rwlock_t macc_data_lck; /* for media data access on this store */ + rwlock_t macc_meta_lck; /* for atomic media meta access on this store */ + rwlock_t macc_sector_lck; /* per-sector media data access on this store */ u8 *storep; /* user data storage (ram) */ struct t10_pi_tuple *dif_storep; /* protection info */ void *map_storep; /* provisioning map */ @@ -398,16 +463,9 @@ struct sdebug_defer { enum sdeb_defer_type defer_t; }; -struct sdebug_queued_cmd { - /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue - * instance indicates this slot is in use. - */ - struct sdebug_defer sd_dp; - struct scsi_cmnd *scmd; -}; - struct sdebug_scsi_cmd { spinlock_t lock; + struct sdebug_defer sd_dp; }; static atomic_t sdebug_cmnd_count; /* number of incoming commands */ @@ -422,6 +480,7 @@ struct opcode_info_t { /* for terminating element */ u8 opcode; /* if num_attached > 0, preferred */ u16 sa; /* service action */ + u32 devsel; /* device type mask for this definition */ u32 flags; /* OR-ed set of SDEB_F_* */ int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); const struct opcode_info_t *arrp; /* num_attached elements or NULL */ @@ -463,22 +522,29 @@ enum sdeb_opcode_index { SDEB_I_PRE_FETCH = 29, /* 10, 16 */ SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */ SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */ - SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */ + SDEB_I_ATOMIC_WRITE_16 = 32, + SDEB_I_READ_BLOCK_LIMITS = 33, + SDEB_I_LOCATE = 34, + SDEB_I_WRITE_FILEMARKS = 35, + SDEB_I_SPACE = 36, + SDEB_I_FORMAT_MEDIUM = 37, + SDEB_I_ERASE = 38, + SDEB_I_LAST_ELEM_P1 = 39, /* keep this last (previous + 1) */ }; static const unsigned char opcode_ind_arr[256] = { /* 0x0; 0x0->0x1f: 6 byte cdbs */ SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE, - 0, 0, 0, 0, + SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0, SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0, - 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, - SDEB_I_RELEASE, - 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG, + SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0, + SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE, + 0, SDEB_I_ERASE, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG, SDEB_I_ALLOW_REMOVAL, 0, /* 0x20; 0x20->0x3f: 10 byte cdbs */ 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0, - SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY, + SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY, 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0, 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0, /* 0x40; 0x40->0x5f: 10 byte cdbs */ @@ -497,7 +563,8 @@ static const unsigned char opcode_ind_arr[256] = { 0, 0, 0, SDEB_I_VERIFY, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0, - 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16, + 0, 0, 0, 0, + SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16, /* 0xa0; 0xa0->0xbf: 12 byte cdbs */ SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN, SDEB_I_MAINT_OUT, 0, 0, 0, @@ -528,7 +595,9 @@ static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_read_tape(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_write_tape(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *); @@ -547,10 +616,19 @@ static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_read_position(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_erase(struct scsi_cmnd *, struct sdebug_dev_info *); static int sdebug_do_add_host(bool mk_new_store); static int sdebug_add_host_helper(int per_host_idx); @@ -559,121 +637,127 @@ static int sdebug_add_store(void); static void sdebug_erase_store(int idx, struct sdeb_store_info *sip); static void sdebug_erase_all_stores(bool apart_from_first); -static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp); - /* * The following are overflow arrays for cdbs that "hit" the same index in * the opcode_info_arr array. The most time sensitive (or commonly used) cdb * should be placed in opcode_info_arr[], the others should be placed here. */ static const struct opcode_info_t msense_iarr[] = { - {0, 0x1a, 0, F_D_IN, NULL, NULL, + {0, 0x1a, 0, DS_ALL, F_D_IN, NULL, NULL, {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t mselect_iarr[] = { - {0, 0x15, 0, F_D_OUT, NULL, NULL, + {0, 0x15, 0, DS_ALL, F_D_OUT, NULL, NULL, {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t read_iarr[] = { - {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */ + {0, 0x28, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */ + {0, 0x8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) disk */ {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */ + {0, 0x8, 0, DS_SSC, F_D_IN | FF_MEDIA_IO, resp_read_tape, NULL, /* READ(6) tape */ + {6, 0x03, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0xa8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */ {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7, 0, 0, 0, 0} }, }; static const struct opcode_info_t write_iarr[] = { - {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */ + {0, 0x2a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */ NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */ + {0, 0xa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) disk */ NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */ + {0, 0xa, 0, DS_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_tape, /* WRITE(6) tape */ + NULL, {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0} }, + {0, 0xaa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */ NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7, 0, 0, 0, 0} }, }; static const struct opcode_info_t verify_iarr[] = { - {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */ + {0, 0x2f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */ NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t sa_in_16_iarr[] = { - {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL, + {0, 0x9e, 0x12, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL, {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */ - {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL, + {0, 0x9e, 0x16, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL, {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0} }, /* GET STREAM STATUS */ }; static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */ - {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0, + {0, 0x7f, 0xb, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0, NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa, 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */ - {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat, + {0, 0x7f, 0x11, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat, NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8, 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */ }; static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */ - {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL, + {0, 0xa3, 0xc, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL, {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */ - {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL, + {0, 0xa3, 0xd, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL, {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */ }; static const struct opcode_info_t write_same_iarr[] = { - {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL, + {0, 0x93, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL, {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */ }; static const struct opcode_info_t reserve_iarr[] = { - {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */ + {0, 0x16, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RESERVE(6) */ {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t release_iarr[] = { - {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */ + {0, 0x17, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RELEASE(6) */ {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t sync_cache_iarr[] = { - {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL, + {0, 0x91, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL, {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */ }; static const struct opcode_info_t pre_fetch_iarr[] = { - {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL, + {0, 0x90, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL, {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */ + {0, 0x34, 0, DS_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_read_position, NULL, + {10, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0, 0, + 0, 0, 0, 0} }, /* READ POSITION (10) */ }; static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */ - {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL, + {0, 0x94, 0x1, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL, {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */ - {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL, + {0, 0x94, 0x2, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL, {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */ - {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL, + {0, 0x94, 0x4, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL, {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */ }; static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */ - {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL, + {0, 0x95, 0x6, DS_NO_SSC, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL, {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */ }; @@ -684,112 +768,132 @@ static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */ * REPORT SUPPORTED OPERATION CODES. */ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = { /* 0 */ - {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */ + {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */ + {0, 0x12, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */ {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL, + {0, 0xa0, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_report_luns, NULL, {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} }, /* REPORT LUNS */ - {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL, + {0, 0x3, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_requests, NULL, {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */ + {0, 0x0, 0, DS_ALL, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */ {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, /* 5 */ - {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */ + {ARRAY_SIZE(msense_iarr), 0x5a, 0, DS_ALL, F_D_IN, /* MODE SENSE(10) */ resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */ + {ARRAY_SIZE(mselect_iarr), 0x55, 0, DS_ALL, F_D_OUT, /* MODE SELECT(10) */ resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */ + {0, 0x4d, 0, DS_NO_SSC, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */ {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */ + {0, 0x25, 0, DS_NO_SSC, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */ {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */ + {ARRAY_SIZE(read_iarr), 0x88, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, /* READ(16) */ resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* 10 */ - {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO, + {ARRAY_SIZE(write_iarr), 0x8a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, write_iarr, /* WRITE(16) */ {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, - {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ + {0, 0x1b, 0, DS_ALL, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN, + {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */ {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} }, - {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat, + {0, 0x9f, 0x12, DS_NO_SSC, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat, NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */ - {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN, + {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, DS_ALL, F_SA_LOW | F_D_IN, resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */ maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} }, /* 15 */ - {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ + {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(verify_iarr), 0x8f, 0, + {ARRAY_SIZE(verify_iarr), 0x8f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */ verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, - {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO, + {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, DS_NO_SSC, F_SA_HIGH | F_D_IN | FF_MEDIA_IO, resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */ {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff, 0xff, 0xff} }, - {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT, + {ARRAY_SIZE(reserve_iarr), 0x56, 0, DS_ALL, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) <no response function> */ {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT, + {ARRAY_SIZE(release_iarr), 0x57, 0, DS_ALL, F_D_OUT, NULL, release_iarr, /* RELEASE(10) <no response function> */ {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* 20 */ - {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */ + {0, 0x1e, 0, DS_ALL, 0, NULL, NULL, /* ALLOW REMOVAL */ {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */ + {0, 0x1, 0, DS_SSC, 0, resp_rewind, NULL, {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ + {0, 0, 0, DS_NO_SSC, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */ + {0, 0x1d, 0, DS_ALL, F_D_OUT, NULL, NULL, /* SEND DIAGNOSTIC */ {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */ + {0, 0x42, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */ {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* 25 */ - {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL, + {0, 0x3b, 0, DS_NO_SSC, F_D_OUT_MAYBE, resp_write_buffer, NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* WRITE_BUFFER */ - {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, + {ARRAY_SIZE(write_same_iarr), 0x41, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS, + {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, sync_cache_iarr, {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* SYNC_CACHE (10) */ - {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL, + {0, 0x89, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL, {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */ - {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO, + {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, pre_fetch_iarr, {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* PRE-FETCH (10) */ + /* READ POSITION (10) */ /* 30 */ - {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS, + {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */ {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} }, - {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS, + {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */ {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} }, +/* 32 */ + {0, 0x9c, 0x0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, + resp_atomic_write, NULL, /* ATOMIC WRITE 16 */ + {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }, + {0, 0x05, 0, DS_SSC, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */ + {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x2b, 0, DS_SSC, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */ + {10, 0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0, + 0, 0, 0, 0} }, + {0, 0x10, 0, DS_SSC, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */ + {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x11, 0, DS_SSC, F_D_IN, resp_space, NULL, /* SPACE (6) */ + {6, 0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x4, 0, DS_SSC, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */ + {6, 0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x19, 0, DS_SSC, F_D_IN, resp_erase, NULL, /* ERASE (6) */ + {6, 0x03, 0x33, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +/* 39 */ /* sentinel */ - {0xff, 0, 0, 0, NULL, NULL, /* terminating element */ + {0xff, 0, 0, 0, 0, NULL, NULL, /* terminating element */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; @@ -835,6 +939,13 @@ static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY; static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC; static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH; +static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR; +static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH; +static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN; +static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN; +static unsigned int sdebug_atomic_wr_max_length_bndry = + DEF_ATOMIC_WR_MAX_LENGTH_BNDRY; +static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY; static int sdebug_uuid_ctl = DEF_UUID_CTL; static bool sdebug_random = DEF_RANDOM; static bool sdebug_per_host_store = DEF_PER_HOST_STORE; @@ -926,6 +1037,20 @@ static const int device_qfull_result = static const int condition_met_result = SAM_STAT_CONDITION_MET; static struct dentry *sdebug_debugfs_root; +static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain); + +static u32 sdebug_get_devsel(struct scsi_device *sdp) +{ + unsigned char devtype = sdp->type; + u32 devsel; + + if (devtype < 32) + devsel = (1 << devtype); + else + devsel = DS_ALL; + + return devsel; +} static void sdebug_err_free(struct rcu_head *head) { @@ -1148,6 +1273,8 @@ static int sdebug_target_alloc(struct scsi_target *starget) if (!targetip) return -ENOMEM; + async_synchronize_full_domain(&sdebug_async_domain); + targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev), sdebug_debugfs_root); @@ -1174,7 +1301,8 @@ static void sdebug_target_destroy(struct scsi_target *starget) targetip = (struct sdebug_target_info *)starget->hostdata; if (targetip) { starget->hostdata = NULL; - async_schedule(sdebug_tartget_cleanup_async, targetip); + async_schedule_domain(sdebug_tartget_cleanup_async, targetip, + &sdebug_async_domain); } } @@ -1188,6 +1316,11 @@ static inline bool scsi_debug_lbp(void) (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); } +static inline bool scsi_debug_atomic_write(void) +{ + return sdebug_fake_rw == 0 && sdebug_atomic_wr; +} + static void *lba2fake_store(struct sdeb_store_info *sip, unsigned long long lba) { @@ -1288,6 +1421,30 @@ static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq) my_name, key, asc, asq); } +/* Sense data that has information fields for tapes */ +static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq, + unsigned int information, unsigned char tape_flags) +{ + if (!scp->sense_buffer) { + sdev_printk(KERN_ERR, scp->device, + "%s: sense_buffer is NULL\n", __func__); + return; + } + memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + + scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq); + /* only fixed format so far */ + + scp->sense_buffer[0] |= 0x80; /* valid */ + scp->sense_buffer[2] |= tape_flags; + put_unaligned_be32(information, &scp->sense_buffer[3]); + + if (sdebug_verbose) + sdev_printk(KERN_INFO, scp->device, + "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n", + my_name, key, asc, asq); +} + static void mk_sense_invalid_opcode(struct scsi_cmnd *scp) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0); @@ -1450,6 +1607,12 @@ static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) if (sdebug_verbose) cp = "reported luns data has changed"; break; + case SDEBUG_UA_NOT_READY_TO_READY: + mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC, + 0); + if (sdebug_verbose) + cp = "not ready to ready transition/media change"; + break; default: pr_warn("unexpected unit attention code=%d\n", k); if (sdebug_verbose) @@ -1815,6 +1978,14 @@ static int inquiry_vpd_b0(unsigned char *arr) /* Maximum WRITE SAME Length */ put_unaligned_be64(sdebug_write_same_length, &arr[32]); + if (sdebug_atomic_wr) { + put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]); + put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]); + put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]); + put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]); + put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]); + } + return 0x3c; /* Mandatory page length for Logical Block Provisioning */ } @@ -1898,13 +2069,19 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) unsigned char *cmd = scp->cmnd; u32 alloc_len, n; int ret; - bool have_wlun, is_disk, is_zbc, is_disk_zbc; + bool have_wlun, is_disk, is_zbc, is_disk_zbc, is_tape; alloc_len = get_unaligned_be16(cmd + 3); arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); if (! arr) return DID_REQUEUE << 16; - is_disk = (sdebug_ptype == TYPE_DISK); + if (scp->device->type >= 32) { + is_disk = (sdebug_ptype == TYPE_DISK); + is_tape = (sdebug_ptype == TYPE_TAPE); + } else { + is_disk = (scp->device->type == TYPE_DISK); + is_tape = (scp->device->type == TYPE_TAPE); + } is_zbc = devip->zoned; is_disk_zbc = (is_disk || is_zbc); have_wlun = scsi_is_wlun(scp->device->lun); @@ -1913,7 +2090,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL)) pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */ else - pq_pdt = (sdebug_ptype & 0x1f); + pq_pdt = ((scp->device->type >= 32 ? + sdebug_ptype : scp->device->type) & 0x1f); arr[0] = pq_pdt; if (0x2 & cmd[1]) { /* CMDDT bit set */ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1); @@ -2036,7 +2214,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) if (is_disk) { /* SBC-4 no version claimed */ put_unaligned_be16(0x600, arr + n); n += 2; - } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */ + } else if (is_tape) { /* SSC-4 rev 3 */ put_unaligned_be16(0x525, arr + n); n += 2; } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */ @@ -2145,6 +2323,14 @@ static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) changing = (stopped_state != want_stop); if (changing) atomic_xchg(&devip->stopped, want_stop); + if (scp->device->type == TYPE_TAPE && !want_stop) { + int i; + + set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */ + for (i = 0; i < TAPE_MAX_PARTITIONS; i++) + devip->tape_location[i] = 0; + devip->tape_partition = 0; + } if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */ return SDEG_RES_IMMED_MASK; else @@ -2312,11 +2498,12 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, u8 reporting_opts, req_opcode, sdeb_i, supp; u16 req_sa, u; u32 alloc_len, a_len; - int k, offset, len, errsts, count, bump, na; + int k, offset, len, errsts, bump, na; const struct opcode_info_t *oip; const struct opcode_info_t *r_oip; u8 *arr; u8 *cmd = scp->cmnd; + u32 devsel = sdebug_get_devsel(scp->device); rctd = !!(cmd[2] & 0x80); reporting_opts = cmd[2] & 0x7; @@ -2339,34 +2526,30 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, } switch (reporting_opts) { case 0: /* all commands */ - /* count number of commands */ - for (count = 0, oip = opcode_info_arr; - oip->num_attached != 0xff; ++oip) { - if (F_INV_OP & oip->flags) - continue; - count += (oip->num_attached + 1); - } bump = rctd ? 20 : 8; - put_unaligned_be32(count * bump, arr); for (offset = 4, oip = opcode_info_arr; oip->num_attached != 0xff && offset < a_len; ++oip) { if (F_INV_OP & oip->flags) continue; + if ((devsel & oip->devsel) != 0) { + arr[offset] = oip->opcode; + put_unaligned_be16(oip->sa, arr + offset + 2); + if (rctd) + arr[offset + 5] |= 0x2; + if (FF_SA & oip->flags) + arr[offset + 5] |= 0x1; + put_unaligned_be16(oip->len_mask[0], arr + offset + 6); + if (rctd) + put_unaligned_be16(0xa, arr + offset + 8); + offset += bump; + } na = oip->num_attached; - arr[offset] = oip->opcode; - put_unaligned_be16(oip->sa, arr + offset + 2); - if (rctd) - arr[offset + 5] |= 0x2; - if (FF_SA & oip->flags) - arr[offset + 5] |= 0x1; - put_unaligned_be16(oip->len_mask[0], arr + offset + 6); - if (rctd) - put_unaligned_be16(0xa, arr + offset + 8); r_oip = oip; for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { if (F_INV_OP & oip->flags) continue; - offset += bump; + if ((devsel & oip->devsel) == 0) + continue; arr[offset] = oip->opcode; put_unaligned_be16(oip->sa, arr + offset + 2); if (rctd) @@ -2374,14 +2557,15 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, if (FF_SA & oip->flags) arr[offset + 5] |= 0x1; put_unaligned_be16(oip->len_mask[0], - arr + offset + 6); + arr + offset + 6); if (rctd) put_unaligned_be16(0xa, arr + offset + 8); + offset += bump; } oip = r_oip; - offset += bump; } + put_unaligned_be32(offset - 4, arr); break; case 1: /* one command: opcode only */ case 2: /* one command: opcode plus service action */ @@ -2407,13 +2591,15 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, return check_condition_result; } if (0 == (FF_SA & oip->flags) && - req_opcode == oip->opcode) + (devsel & oip->devsel) != 0 && + req_opcode == oip->opcode) supp = 3; else if (0 == (FF_SA & oip->flags)) { na = oip->num_attached; for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { - if (req_opcode == oip->opcode) + if (req_opcode == oip->opcode && + (devsel & oip->devsel) != 0) break; } supp = (k >= na) ? 1 : 3; @@ -2421,7 +2607,8 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, na = oip->num_attached; for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { - if (req_sa == oip->sa) + if (req_sa == oip->sa && + (devsel & oip->devsel) != 0) break; } supp = (k >= na) ? 1 : 3; @@ -2677,6 +2864,76 @@ static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol) return sizeof(sas_sha_m_pg); } +static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0, + 0xff, 0xff, 0x00, 0x00}; + +static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target) +{ /* Partition page for mode_sense (tape) */ + memcpy(p, partition_pg, sizeof(partition_pg)); + if (pcontrol == 1) + memset(p + 2, 0, sizeof(partition_pg) - 2); + return sizeof(partition_pg); +} + +static int process_medium_part_m_pg(struct sdebug_dev_info *devip, + unsigned char *new, int pg_len) +{ + int new_nbr, p0_size, p1_size; + + if ((new[4] & 0x80) != 0) { /* FDP */ + partition_pg[4] |= 0x80; + devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS; + devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS; + devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS; + } else { + new_nbr = new[3] + 1; + if (new_nbr > TAPE_MAX_PARTITIONS) + return 3; + if ((new[4] & 0x40) != 0) { /* SDP */ + p1_size = TAPE_PARTITION_1_UNITS; + p0_size = TAPE_UNITS - p1_size; + if (p0_size < 100) + return 4; + } else if ((new[4] & 0x20) != 0) { + if (new_nbr > 1) { + p0_size = get_unaligned_be16(new + 8); + p1_size = get_unaligned_be16(new + 10); + if (p1_size == 0xFFFF) + p1_size = TAPE_UNITS - p0_size; + else if (p0_size == 0xFFFF) + p0_size = TAPE_UNITS - p1_size; + if (p0_size < 100 || p1_size < 100) + return 8; + } else { + p0_size = TAPE_UNITS; + p1_size = 0; + } + } else + return 6; + devip->tape_pending_nbr_partitions = new_nbr; + devip->tape_pending_part_0_size = p0_size; + devip->tape_pending_part_1_size = p1_size; + partition_pg[3] = new_nbr; + devip->tape_pending_nbr_partitions = new_nbr; + } + + return 0; +} + +static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target, + unsigned char dce) +{ /* Compression page for mode_sense (tape) */ + unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 00, 00}; + + memcpy(p, compression_pg, sizeof(compression_pg)); + if (dce) + p[2] |= 0x80; + if (pcontrol == 1) + memset(p + 2, 0, sizeof(compression_pg) - 2); + return sizeof(compression_pg); +} + /* PAGE_SIZE is more than necessary but provides room for future expansion. */ #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE @@ -2691,7 +2948,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp, unsigned char *ap; unsigned char *arr __free(kfree); unsigned char *cmd = scp->cmnd; - bool dbd, llbaa, msense_6, is_disk, is_zbc; + bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape; arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC); if (!arr) @@ -2702,14 +2959,14 @@ static int resp_mode_sense(struct scsi_cmnd *scp, subpcode = cmd[3]; msense_6 = (MODE_SENSE == cmd[0]); llbaa = msense_6 ? false : !!(cmd[1] & 0x10); - is_disk = (sdebug_ptype == TYPE_DISK); + is_disk = (scp->device->type == TYPE_DISK); is_zbc = devip->zoned; - if ((is_disk || is_zbc) && !dbd) + is_tape = (scp->device->type == TYPE_TAPE); + if ((is_disk || is_zbc || is_tape) && !dbd) bd_len = llbaa ? 16 : 8; else bd_len = 0; alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7); - memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); if (0x3 == pcontrol) { /* Saving values not supported */ mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0); return check_condition_result; @@ -2743,15 +3000,25 @@ static int resp_mode_sense(struct scsi_cmnd *scp, put_unaligned_be32(0xffffffff, ap + 0); else put_unaligned_be32(sdebug_capacity, ap + 0); - put_unaligned_be16(sdebug_sector_size, ap + 6); + if (is_tape) { + ap[0] = devip->tape_density; + put_unaligned_be16(devip->tape_blksize, ap + 6); + } else + put_unaligned_be16(sdebug_sector_size, ap + 6); offset += bd_len; ap = arr + offset; } else if (16 == bd_len) { + if (is_tape) { + mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4); + return check_condition_result; + } put_unaligned_be64((u64)sdebug_capacity, ap + 0); put_unaligned_be32(sdebug_sector_size, ap + 12); offset += bd_len; ap = arr + offset; } + if (cmd[2] == 0) + goto only_bd; /* Only block descriptor requested */ /* * N.B. If len>0 before resp_*_pg() call, then form of that call should be: @@ -2807,6 +3074,18 @@ static int resp_mode_sense(struct scsi_cmnd *scp, } offset += len; break; + case 0xf: /* Compression Mode Page (tape) */ + if (!is_tape) + goto bad_pcode; + len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce); + offset += len; + break; + case 0x11: /* Partition Mode Page (tape) */ + if (!is_tape) + goto bad_pcode; + len = resp_partition_m_pg(ap, pcontrol, target); + offset += len; + break; case 0x19: /* if spc==1 then sas phy, control+discover */ if (subpcode > 0x2 && subpcode < 0xff) goto bad_subpcode; @@ -2852,6 +3131,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp, default: goto bad_pcode; } +only_bd: if (msense_6) arr[0] = offset - 1; else @@ -2895,8 +3175,34 @@ static int resp_mode_select(struct scsi_cmnd *scp, __func__, param_len, res); md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); - off = bd_len + (mselect6 ? 4 : 8); - if (md_len > 2 || off >= res) { + off = (mselect6 ? 4 : 8); + if (scp->device->type == TYPE_TAPE) { + int blksize; + + if (bd_len != 8) { + mk_sense_invalid_fld(scp, SDEB_IN_DATA, + mselect6 ? 3 : 6, -1); + return check_condition_result; + } + if (arr[off] == TAPE_BAD_DENSITY) { + mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); + return check_condition_result; + } + blksize = get_unaligned_be16(arr + off + 6); + if (blksize != 0 && + (blksize < TAPE_MIN_BLKSIZE || + blksize > TAPE_MAX_BLKSIZE || + (blksize % 4) != 0)) { + mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1); + return check_condition_result; + } + devip->tape_density = arr[off]; + devip->tape_blksize = blksize; + } + off += bd_len; + if (off >= res) + return 0; /* No page written, just descriptors */ + if (md_len > 2) { mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); return check_condition_result; } @@ -2934,6 +3240,25 @@ static int resp_mode_select(struct scsi_cmnd *scp, goto set_mode_changed_ua; } break; + case 0xf: /* Compression mode page */ + if (scp->device->type != TYPE_TAPE) + goto bad_pcode; + if ((arr[off + 2] & 0x40) != 0) { + devip->tape_dce = (arr[off + 2] & 0x80) != 0; + return 0; + } + break; + case 0x11: /* Medium Partition Mode Page (tape) */ + if (scp->device->type == TYPE_TAPE) { + int fld; + + fld = process_medium_part_m_pg(devip, &arr[off], pg_len); + if (fld == 0) + return 0; + mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1); + return check_condition_result; + } + break; case 0x1c: /* Informational Exceptions Mode page */ if (iec_m_pg[1] == arr[off + 1]) { memcpy(iec_m_pg + 2, arr + off + 2, @@ -2949,6 +3274,10 @@ static int resp_mode_select(struct scsi_cmnd *scp, set_mode_changed_ua: set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm); return 0; + +bad_pcode: + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); + return check_condition_result; } static int resp_temp_l_pg(unsigned char *arr) @@ -3088,6 +3417,298 @@ static int resp_log_sense(struct scsi_cmnd *scp, min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ)); } +enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6}; +static int resp_read_blklimits(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ]; + + arr[0] = 4; + put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1); + put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4); + return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ); +} + +static int resp_locate(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + unsigned char *cmd = scp->cmnd; + unsigned int i, pos; + struct tape_block *blp; + int partition; + + if ((cmd[1] & 0x02) != 0) { + if (cmd[8] >= devip->tape_nbr_partitions) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1); + return check_condition_result; + } + devip->tape_partition = cmd[8]; + } + pos = get_unaligned_be32(cmd + 3); + partition = devip->tape_partition; + + for (i = 0, blp = devip->tape_blocks[partition]; + i < pos && i < devip->tape_eop[partition]; i++, blp++) + if (IS_TAPE_BLOCK_EOD(blp->fl_size)) + break; + if (i < pos) { + devip->tape_location[partition] = i; + mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0); + return check_condition_result; + } + devip->tape_location[partition] = pos; + + return 0; +} + +static int resp_write_filemarks(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + unsigned char *cmd = scp->cmnd; + unsigned int i, count, pos; + u32 data; + int partition = devip->tape_partition; + + if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */ + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1); + return check_condition_result; + } + count = get_unaligned_be24(cmd + 2); + data = TAPE_BLOCK_FM_FLAG; + for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) { + if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */ + devip->tape_location[partition] = devip->tape_eop[partition] - 1; + mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE, + EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM); + return check_condition_result; + } + (devip->tape_blocks[partition] + pos)->fl_size = data; + } + (devip->tape_blocks[partition] + pos)->fl_size = + TAPE_BLOCK_EOD_FLAG; + devip->tape_location[partition] = pos; + + return 0; +} + +static int resp_space(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + unsigned char *cmd = scp->cmnd, code; + int i = 0, pos, count; + struct tape_block *blp; + int partition = devip->tape_partition; + + count = get_unaligned_be24(cmd + 2); + if ((count & 0x800000) != 0) /* extend negative to 32-bit count */ + count |= 0xff000000; + code = cmd[1] & 0x0f; + + pos = devip->tape_location[partition]; + if (code == 0) { /* blocks */ + if (count < 0) { + count = (-count); + pos -= 1; + for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count; + i++) { + if (pos < 0) + goto is_bop; + else if (IS_TAPE_BLOCK_FM(blp->fl_size)) + goto is_fm; + if (i > 0) { + pos--; + blp--; + } + } + } else if (count > 0) { + for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count; + i++, pos++, blp++) { + if (IS_TAPE_BLOCK_EOD(blp->fl_size)) + goto is_eod; + if (IS_TAPE_BLOCK_FM(blp->fl_size)) { + pos += 1; + goto is_fm; + } + if (pos >= devip->tape_eop[partition]) + goto is_eop; + } + } + } else if (code == 1) { /* filemarks */ + if (count < 0) { + count = (-count); + if (pos == 0) + goto is_bop; + else { + for (i = 0, blp = devip->tape_blocks[partition] + pos; + i < count && pos >= 0; i++, pos--, blp--) { + for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) && + pos >= 0; pos--, blp--) + ; /* empty */ + if (pos < 0) + goto is_bop; + } + } + pos += 1; + } else if (count > 0) { + for (i = 0, blp = devip->tape_blocks[partition] + pos; + i < count; i++, pos++, blp++) { + for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) && + !IS_TAPE_BLOCK_EOD(blp->fl_size) && + pos < devip->tape_eop[partition]; + pos++, blp++) + ; /* empty */ + if (IS_TAPE_BLOCK_EOD(blp->fl_size)) + goto is_eod; + if (pos >= devip->tape_eop[partition]) + goto is_eop; + } + } + } else if (code == 3) { /* EOD */ + for (blp = devip->tape_blocks[partition] + pos; + !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition]; + pos++, blp++) + ; /* empty */ + if (pos >= devip->tape_eop[partition]) + goto is_eop; + } else { + /* sequential filemarks not supported */ + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1); + return check_condition_result; + } + devip->tape_location[partition] = pos; + return 0; + +is_fm: + devip->tape_location[partition] = pos; + mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE, + FILEMARK_DETECTED_ASCQ, count - i, + SENSE_FLAG_FILEMARK); + return check_condition_result; + +is_eod: + devip->tape_location[partition] = pos; + mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE, + EOD_DETECTED_ASCQ, count - i, + 0); + return check_condition_result; + +is_bop: + devip->tape_location[partition] = 0; + mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE, + BEGINNING_OF_P_M_DETECTED_ASCQ, count - i, + SENSE_FLAG_EOM); + devip->tape_location[partition] = 0; + return check_condition_result; + +is_eop: + devip->tape_location[partition] = devip->tape_eop[partition] - 1; + mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE, + EOP_EOM_DETECTED_ASCQ, (unsigned int)i, + SENSE_FLAG_EOM); + return check_condition_result; +} + +enum {SDEBUG_READ_POSITION_ARR_SZ = 20}; +static int resp_read_position(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + int all_length; + unsigned char arr[20]; + unsigned int pos; + + all_length = get_unaligned_be16(cmd + 7); + if ((cmd[1] & 0xfe) != 0 || + all_length != 0) { /* only short form */ + mk_sense_invalid_fld(scp, SDEB_IN_CDB, + all_length ? 7 : 1, 0); + return check_condition_result; + } + memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ); + arr[1] = devip->tape_partition; + pos = devip->tape_location[devip->tape_partition]; + put_unaligned_be32(pos, arr + 4); + put_unaligned_be32(pos, arr + 8); + return fill_from_dev_buffer(scp, arr, SDEBUG_READ_POSITION_ARR_SZ); +} + +static int resp_rewind(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + devip->tape_location[devip->tape_partition] = 0; + + return 0; +} + +static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions, + int part_0_size, int part_1_size) +{ + int i; + + if (part_0_size + part_1_size > TAPE_UNITS) + return -1; + devip->tape_eop[0] = part_0_size; + devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG; + devip->tape_eop[1] = part_1_size; + devip->tape_blocks[1] = devip->tape_blocks[0] + + devip->tape_eop[0]; + devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG; + + for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++) + devip->tape_location[i] = 0; + + devip->tape_nbr_partitions = nbr_partitions; + devip->tape_partition = 0; + + partition_pg[3] = nbr_partitions - 1; + put_unaligned_be16(devip->tape_eop[0], partition_pg + 8); + put_unaligned_be16(devip->tape_eop[1], partition_pg + 10); + + return nbr_partitions; +} + +static int resp_format_medium(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + int res = 0; + unsigned char *cmd = scp->cmnd; + + if (cmd[2] > 2) { + mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1); + return check_condition_result; + } + if (cmd[2] != 0) { + if (devip->tape_pending_nbr_partitions > 0) { + res = partition_tape(devip, + devip->tape_pending_nbr_partitions, + devip->tape_pending_part_0_size, + devip->tape_pending_part_1_size); + } else + res = partition_tape(devip, devip->tape_nbr_partitions, + devip->tape_eop[0], devip->tape_eop[1]); + } else + res = partition_tape(devip, 1, TAPE_UNITS, 0); + if (res < 0) + return -EINVAL; + + devip->tape_pending_nbr_partitions = -1; + + return 0; +} + +static int resp_erase(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + int partition = devip->tape_partition; + int pos = devip->tape_location[partition]; + struct tape_block *blp; + + blp = devip->tape_blocks[partition] + pos; + blp->fl_size = TAPE_BLOCK_EOD_FLAG; + + return 0; +} + static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip) { return devip->nr_zones != 0; @@ -3377,16 +3998,238 @@ static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip, return xa_load(per_store_ap, devip->sdbg_host->si_idx); } +static inline void +sdeb_read_lock(rwlock_t *lock) +{ + if (sdebug_no_rwlock) + __acquire(lock); + else + read_lock(lock); +} + +static inline void +sdeb_read_unlock(rwlock_t *lock) +{ + if (sdebug_no_rwlock) + __release(lock); + else + read_unlock(lock); +} + +static inline void +sdeb_write_lock(rwlock_t *lock) +{ + if (sdebug_no_rwlock) + __acquire(lock); + else + write_lock(lock); +} + +static inline void +sdeb_write_unlock(rwlock_t *lock) +{ + if (sdebug_no_rwlock) + __release(lock); + else + write_unlock(lock); +} + +static inline void +sdeb_data_read_lock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_read_lock(&sip->macc_data_lck); +} + +static inline void +sdeb_data_read_unlock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_read_unlock(&sip->macc_data_lck); +} + +static inline void +sdeb_data_write_lock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_write_lock(&sip->macc_data_lck); +} + +static inline void +sdeb_data_write_unlock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_write_unlock(&sip->macc_data_lck); +} + +static inline void +sdeb_data_sector_read_lock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_read_lock(&sip->macc_sector_lck); +} + +static inline void +sdeb_data_sector_read_unlock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_read_unlock(&sip->macc_sector_lck); +} + +static inline void +sdeb_data_sector_write_lock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_write_lock(&sip->macc_sector_lck); +} + +static inline void +sdeb_data_sector_write_unlock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_write_unlock(&sip->macc_sector_lck); +} + +/* + * Atomic locking: + * We simplify the atomic model to allow only 1x atomic write and many non- + * atomic reads or writes for all LBAs. + + * A RW lock has a similar bahaviour: + * Only 1x writer and many readers. + + * So use a RW lock for per-device read and write locking: + * An atomic access grabs the lock as a writer and non-atomic grabs the lock + * as a reader. + */ + +static inline void +sdeb_data_lock(struct sdeb_store_info *sip, bool atomic) +{ + if (atomic) + sdeb_data_write_lock(sip); + else + sdeb_data_read_lock(sip); +} + +static inline void +sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic) +{ + if (atomic) + sdeb_data_write_unlock(sip); + else + sdeb_data_read_unlock(sip); +} + +/* Allow many reads but only 1x write per sector */ +static inline void +sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write) +{ + if (do_write) + sdeb_data_sector_write_lock(sip); + else + sdeb_data_sector_read_lock(sip); +} + +static inline void +sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write) +{ + if (do_write) + sdeb_data_sector_write_unlock(sip); + else + sdeb_data_sector_read_unlock(sip); +} + +static inline void +sdeb_meta_read_lock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __acquire(&sip->macc_meta_lck); + else + __acquire(&sdeb_fake_rw_lck); + } else { + if (sip) + read_lock(&sip->macc_meta_lck); + else + read_lock(&sdeb_fake_rw_lck); + } +} + +static inline void +sdeb_meta_read_unlock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __release(&sip->macc_meta_lck); + else + __release(&sdeb_fake_rw_lck); + } else { + if (sip) + read_unlock(&sip->macc_meta_lck); + else + read_unlock(&sdeb_fake_rw_lck); + } +} + +static inline void +sdeb_meta_write_lock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __acquire(&sip->macc_meta_lck); + else + __acquire(&sdeb_fake_rw_lck); + } else { + if (sip) + write_lock(&sip->macc_meta_lck); + else + write_lock(&sdeb_fake_rw_lck); + } +} + +static inline void +sdeb_meta_write_unlock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __release(&sip->macc_meta_lck); + else + __release(&sdeb_fake_rw_lck); + } else { + if (sip) + write_unlock(&sip->macc_meta_lck); + else + write_unlock(&sdeb_fake_rw_lck); + } +} + /* Returns number of bytes copied or -1 if error. */ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp, - u32 sg_skip, u64 lba, u32 num, bool do_write, - u8 group_number) + u32 sg_skip, u64 lba, u32 num, u8 group_number, + bool do_write, bool atomic) { int ret; - u64 block, rest = 0; + u64 block; enum dma_data_direction dir; struct scsi_data_buffer *sdb = &scp->sdb; u8 *fsp; + int i, total = 0; + + /* + * Even though reads are inherently atomic (in this driver), we expect + * the atomic flag only for writes. + */ + if (!do_write && atomic) + return -1; if (do_write) { dir = DMA_TO_DEVICE; @@ -3406,23 +4249,26 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp, fsp = sip->storep; block = do_div(lba, sdebug_store_sectors); - if (block + num > sdebug_store_sectors) - rest = block + num - sdebug_store_sectors; - ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, + /* Only allow 1x atomic write or multiple non-atomic writes at any given time */ + sdeb_data_lock(sip, atomic); + for (i = 0; i < num; i++) { + /* We shouldn't need to lock for atomic writes, but do it anyway */ + sdeb_data_sector_lock(sip, do_write); + ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, fsp + (block * sdebug_sector_size), - (num - rest) * sdebug_sector_size, sg_skip, do_write); - if (ret != (num - rest) * sdebug_sector_size) - return ret; - - if (rest) { - ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, - fsp, rest * sdebug_sector_size, - sg_skip + ((num - rest) * sdebug_sector_size), - do_write); + sdebug_sector_size, sg_skip, do_write); + sdeb_data_sector_unlock(sip, do_write); + total += ret; + if (ret != sdebug_sector_size) + break; + sg_skip += sdebug_sector_size; + if (++block >= sdebug_store_sectors) + block = 0; } + sdeb_data_unlock(sip, atomic); - return ret; + return total; } /* Returns number of bytes copied or -1 if error. */ @@ -3596,68 +4442,96 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec, return ret; } -static inline void -sdeb_read_lock(struct sdeb_store_info *sip) +static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { - if (sdebug_no_rwlock) { - if (sip) - __acquire(&sip->macc_lck); - else - __acquire(&sdeb_fake_rw_lck); - } else { - if (sip) - read_lock(&sip->macc_lck); - else - read_lock(&sdeb_fake_rw_lck); - } -} + u32 i, num, transfer, size; + u8 *cmd = scp->cmnd; + struct scsi_data_buffer *sdb = &scp->sdb; + int partition = devip->tape_partition; + u32 pos = devip->tape_location[partition]; + struct tape_block *blp; + bool fixed, sili; -static inline void -sdeb_read_unlock(struct sdeb_store_info *sip) -{ - if (sdebug_no_rwlock) { - if (sip) - __release(&sip->macc_lck); - else - __release(&sdeb_fake_rw_lck); - } else { - if (sip) - read_unlock(&sip->macc_lck); - else - read_unlock(&sdeb_fake_rw_lck); + if (cmd[0] != READ_6) { /* Only Read(6) supported */ + mk_sense_invalid_opcode(scp); + return illegal_condition_result; } -} - -static inline void -sdeb_write_lock(struct sdeb_store_info *sip) -{ - if (sdebug_no_rwlock) { - if (sip) - __acquire(&sip->macc_lck); - else - __acquire(&sdeb_fake_rw_lck); - } else { - if (sip) - write_lock(&sip->macc_lck); - else - write_lock(&sdeb_fake_rw_lck); + fixed = (cmd[1] & 0x1) != 0; + sili = (cmd[1] & 0x2) != 0; + if (fixed && sili) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1); + return check_condition_result; } -} -static inline void -sdeb_write_unlock(struct sdeb_store_info *sip) -{ - if (sdebug_no_rwlock) { - if (sip) - __release(&sip->macc_lck); - else - __release(&sdeb_fake_rw_lck); + transfer = get_unaligned_be24(cmd + 2); + if (fixed) { + num = transfer; + size = devip->tape_blksize; } else { - if (sip) - write_unlock(&sip->macc_lck); - else - write_unlock(&sdeb_fake_rw_lck); + if (transfer < TAPE_MIN_BLKSIZE || + transfer > TAPE_MAX_BLKSIZE) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); + return check_condition_result; + } + num = 1; + size = transfer; + } + + for (i = 0, blp = devip->tape_blocks[partition] + pos; + i < num && pos < devip->tape_eop[partition]; + i++, pos++, blp++) { + devip->tape_location[partition] = pos + 1; + if (IS_TAPE_BLOCK_FM(blp->fl_size)) { + mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE, + FILEMARK_DETECTED_ASCQ, fixed ? num - i : size, + SENSE_FLAG_FILEMARK); + scsi_set_resid(scp, (num - i) * size); + return check_condition_result; + } + /* Assume no REW */ + if (IS_TAPE_BLOCK_EOD(blp->fl_size)) { + mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE, + EOD_DETECTED_ASCQ, fixed ? num - i : size, + 0); + devip->tape_location[partition] = pos; + scsi_set_resid(scp, (num - i) * size); + return check_condition_result; + } + sg_zero_buffer(sdb->table.sgl, sdb->table.nents, + size, i * size); + sg_copy_buffer(sdb->table.sgl, sdb->table.nents, + &(blp->data), 4, i * size, false); + if (fixed) { + if (blp->fl_size != devip->tape_blksize) { + scsi_set_resid(scp, (num - i) * size); + mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE, + 0, num - i, + SENSE_FLAG_ILI); + return check_condition_result; + } + } else { + if (blp->fl_size != size) { + if (blp->fl_size < size) + scsi_set_resid(scp, size - blp->fl_size); + if (!sili) { + mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE, + 0, size - blp->fl_size, + SENSE_FLAG_ILI); + return check_condition_result; + } + } + } + } + if (pos >= devip->tape_eop[partition]) { + mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE, + EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size, + SENSE_FLAG_EOM); + devip->tape_location[partition] = pos - 1; + return check_condition_result; } + devip->tape_location[partition] = pos; + + return 0; } static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) @@ -3669,6 +4543,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) u64 lba; struct sdeb_store_info *sip = devip2sip(devip, true); u8 *cmd = scp->cmnd; + bool meta_data_locked = false; switch (cmd[0]) { case READ_16: @@ -3727,6 +4602,10 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) atomic_set(&sdeb_inject_pending, 0); } + /* + * When checking device access params, for reads we only check data + * versus what is set at init time, so no need to lock. + */ ret = check_device_access_params(scp, lba, num, false); if (ret) return ret; @@ -3746,29 +4625,33 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return check_condition_result; } - sdeb_read_lock(sip); + if (sdebug_dev_is_zoned(devip) || + (sdebug_dix && scsi_prot_sg_count(scp))) { + sdeb_meta_read_lock(sip); + meta_data_locked = true; + } /* DIX + T10 DIF */ if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { switch (prot_verify_read(scp, lba, num, ei_lba)) { case 1: /* Guard tag error */ if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */ - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); return check_condition_result; } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); return illegal_condition_result; } break; case 3: /* Reference tag error */ if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */ - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3); return check_condition_result; } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) { - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3); return illegal_condition_result; } @@ -3776,8 +4659,9 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } } - ret = do_device_access(sip, scp, 0, lba, num, false, 0); - sdeb_read_unlock(sip); + ret = do_device_access(sip, scp, 0, lba, num, 0, false, false); + if (meta_data_locked) + sdeb_meta_read_unlock(sip); if (unlikely(ret == -1)) return DID_ERROR << 16; @@ -3957,6 +4841,67 @@ static void unmap_region(struct sdeb_store_info *sip, sector_t lba, } } +static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + u32 i, num, transfer, size, written = 0; + u8 *cmd = scp->cmnd; + struct scsi_data_buffer *sdb = &scp->sdb; + int partition = devip->tape_partition; + int pos = devip->tape_location[partition]; + struct tape_block *blp; + bool fixed, ew; + + if (cmd[0] != WRITE_6) { /* Only Write(6) supported */ + mk_sense_invalid_opcode(scp); + return illegal_condition_result; + } + + fixed = (cmd[1] & 1) != 0; + transfer = get_unaligned_be24(cmd + 2); + if (fixed) { + num = transfer; + size = devip->tape_blksize; + } else { + if (transfer < TAPE_MIN_BLKSIZE || + transfer > TAPE_MAX_BLKSIZE) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); + return check_condition_result; + } + num = 1; + size = transfer; + } + + scsi_set_resid(scp, num * transfer); + for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false; + i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) { + blp->fl_size = size; + sg_copy_buffer(sdb->table.sgl, sdb->table.nents, + &(blp->data), 4, i * size, true); + written += size; + scsi_set_resid(scp, num * transfer - written); + ew |= (pos == devip->tape_eop[partition] - TAPE_EW); + } + + devip->tape_location[partition] = pos; + blp->fl_size = TAPE_BLOCK_EOD_FLAG; + if (pos >= devip->tape_eop[partition] - 1) { + mk_sense_info_tape(scp, VOLUME_OVERFLOW, + NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ, + fixed ? num - i : transfer, + SENSE_FLAG_EOM); + return check_condition_result; + } + if (ew) { /* early warning */ + mk_sense_info_tape(scp, NO_SENSE, + NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ, + fixed ? num - i : transfer, + SENSE_FLAG_EOM); + return check_condition_result; + } + + return 0; +} + static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { bool check_prot; @@ -3967,6 +4912,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) u64 lba; struct sdeb_store_info *sip = devip2sip(devip, true); u8 *cmd = scp->cmnd; + bool meta_data_locked = false; switch (cmd[0]) { case WRITE_16: @@ -4025,10 +4971,17 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) "to DIF device\n"); } - sdeb_write_lock(sip); + if (sdebug_dev_is_zoned(devip) || + (sdebug_dix && scsi_prot_sg_count(scp)) || + scsi_debug_lbp()) { + sdeb_meta_write_lock(sip); + meta_data_locked = true; + } + ret = check_device_access_params(scp, lba, num, true); if (ret) { - sdeb_write_unlock(sip); + if (meta_data_locked) + sdeb_meta_write_unlock(sip); return ret; } @@ -4037,22 +4990,22 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) switch (prot_verify_write(scp, lba, num, ei_lba)) { case 1: /* Guard tag error */ if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); return illegal_condition_result; } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); return check_condition_result; } break; case 3: /* Reference tag error */ if (scp->prot_flags & SCSI_PROT_REF_CHECK) { - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3); return illegal_condition_result; } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3); return check_condition_result; } @@ -4060,13 +5013,16 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } } - ret = do_device_access(sip, scp, 0, lba, num, true, group); + ret = do_device_access(sip, scp, 0, lba, num, group, true, false); if (unlikely(scsi_debug_lbp())) map_region(sip, lba, num); + /* If ZBC zone then bump its write pointer */ if (sdebug_dev_is_zoned(devip)) zbc_inc_wp(devip, lba, num); - sdeb_write_unlock(sip); + if (meta_data_locked) + sdeb_meta_write_unlock(sip); + if (unlikely(-1 == ret)) return DID_ERROR << 16; else if (unlikely(sdebug_verbose && @@ -4176,7 +5132,8 @@ static int resp_write_scat(struct scsi_cmnd *scp, goto err_out; } - sdeb_write_lock(sip); + /* Just keep it simple and always lock for now */ + sdeb_meta_write_lock(sip); sg_off = lbdof_blen; /* Spec says Buffer xfer Length field in number of LBs in dout */ cum_lb = 0; @@ -4219,7 +5176,11 @@ static int resp_write_scat(struct scsi_cmnd *scp, } } - ret = do_device_access(sip, scp, sg_off, lba, num, true, group); + /* + * Write ranges atomically to keep as close to pre-atomic + * writes behaviour as possible. + */ + ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true); /* If ZBC zone then bump its write pointer */ if (sdebug_dev_is_zoned(devip)) zbc_inc_wp(devip, lba, num); @@ -4258,7 +5219,7 @@ static int resp_write_scat(struct scsi_cmnd *scp, } ret = 0; err_out_unlock: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); err_out: kfree(lrdp); return ret; @@ -4277,14 +5238,16 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, scp->device->hostdata, true); u8 *fs1p; u8 *fsp; + bool meta_data_locked = false; - sdeb_write_lock(sip); + if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) { + sdeb_meta_write_lock(sip); + meta_data_locked = true; + } ret = check_device_access_params(scp, lba, num, true); - if (ret) { - sdeb_write_unlock(sip); - return ret; - } + if (ret) + goto out; if (unmap && scsi_debug_lbp()) { unmap_region(sip, lba, num); @@ -4295,6 +5258,7 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, /* if ndob then zero 1 logical block, else fetch 1 logical block */ fsp = sip->storep; fs1p = fsp + (block * lb_size); + sdeb_data_write_lock(sip); if (ndob) { memset(fs1p, 0, lb_size); ret = 0; @@ -4302,8 +5266,8 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, ret = fetch_to_dev_buffer(scp, fs1p, lb_size); if (-1 == ret) { - sdeb_write_unlock(sip); - return DID_ERROR << 16; + ret = DID_ERROR << 16; + goto out; } else if (sdebug_verbose && !ndob && (ret < lb_size)) sdev_printk(KERN_INFO, scp->device, "%s: %s: lb size=%u, IO sent=%d bytes\n", @@ -4320,10 +5284,12 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, /* If ZBC zone then bump its write pointer */ if (sdebug_dev_is_zoned(devip)) zbc_inc_wp(devip, lba, num); + sdeb_data_write_unlock(sip); + ret = 0; out: - sdeb_write_unlock(sip); - - return 0; + if (meta_data_locked) + sdeb_meta_write_unlock(sip); + return ret; } static int resp_write_same_10(struct scsi_cmnd *scp, @@ -4466,25 +5432,30 @@ static int resp_comp_write(struct scsi_cmnd *scp, return check_condition_result; } - sdeb_write_lock(sip); - ret = do_dout_fetch(scp, dnum, arr); if (ret == -1) { retval = DID_ERROR << 16; - goto cleanup; + goto cleanup_free; } else if (sdebug_verbose && (ret < (dnum * lb_size))) sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " "indicated=%u, IO sent=%d bytes\n", my_name, dnum * lb_size, ret); + + sdeb_data_write_lock(sip); + sdeb_meta_write_lock(sip); if (!comp_write_worker(sip, lba, num, arr, false)) { mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0); retval = check_condition_result; - goto cleanup; + goto cleanup_unlock; } + + /* Cover sip->map_storep (which map_region()) sets with data lock */ if (scsi_debug_lbp()) map_region(sip, lba, num); -cleanup: - sdeb_write_unlock(sip); +cleanup_unlock: + sdeb_meta_write_unlock(sip); + sdeb_data_write_unlock(sip); +cleanup_free: kfree(arr); return retval; } @@ -4528,7 +5499,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) desc = (void *)&buf[8]; - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); for (i = 0 ; i < descriptors ; i++) { unsigned long long lba = get_unaligned_be64(&desc[i].lba); @@ -4544,7 +5515,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) ret = 0; out: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); kfree(buf); return ret; @@ -4671,6 +5642,8 @@ static int resp_sync_cache(struct scsi_cmnd *scp, * a GOOD status otherwise. Model a disk with a big cache and yield * CONDITION MET. Actually tries to bring range in main memory into the * cache associated with the CPU(s). + * + * The pcode 0x34 is also used for READ POSITION by tape devices. */ static int resp_pre_fetch(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) @@ -4702,12 +5675,13 @@ static int resp_pre_fetch(struct scsi_cmnd *scp, rest = block + nblks - sdebug_store_sectors; /* Try to bring the PRE-FETCH range into CPU's cache */ - sdeb_read_lock(sip); + sdeb_data_read_lock(sip); prefetch_range(fsp + (sdebug_sector_size * block), (nblks - rest) * sdebug_sector_size); if (rest) prefetch_range(fsp, rest * sdebug_sector_size); - sdeb_read_unlock(sip); + + sdeb_data_read_unlock(sip); fini: if (cmd[1] & 0x2) res = SDEG_RES_IMMED_MASK; @@ -4866,7 +5840,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return check_condition_result; } /* Not changing store, so only need read access */ - sdeb_read_lock(sip); + sdeb_data_read_lock(sip); ret = do_dout_fetch(scp, a_num, arr); if (ret == -1) { @@ -4888,7 +5862,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) goto cleanup; } cleanup: - sdeb_read_unlock(sip); + sdeb_data_read_unlock(sip); kfree(arr); return ret; } @@ -4934,7 +5908,7 @@ static int resp_report_zones(struct scsi_cmnd *scp, return check_condition_result; } - sdeb_read_lock(sip); + sdeb_meta_read_lock(sip); desc = arr + 64; for (lba = zs_lba; lba < sdebug_capacity; @@ -5032,11 +6006,70 @@ static int resp_report_zones(struct scsi_cmnd *scp, ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len)); fini: - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); kfree(arr); return ret; } +static int resp_atomic_write(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + struct sdeb_store_info *sip; + u8 *cmd = scp->cmnd; + u16 boundary, len; + u64 lba, lba_tmp; + int ret; + + if (!scsi_debug_atomic_write()) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + + sip = devip2sip(devip, true); + + lba = get_unaligned_be64(cmd + 2); + boundary = get_unaligned_be16(cmd + 10); + len = get_unaligned_be16(cmd + 12); + + lba_tmp = lba; + if (sdebug_atomic_wr_align && + do_div(lba_tmp, sdebug_atomic_wr_align)) { + /* Does not meet alignment requirement */ + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + return check_condition_result; + } + + if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) { + /* Does not meet alignment requirement */ + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + return check_condition_result; + } + + if (boundary > 0) { + if (boundary > sdebug_atomic_wr_max_bndry) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); + return check_condition_result; + } + + if (len > sdebug_atomic_wr_max_length_bndry) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); + return check_condition_result; + } + } else { + if (len > sdebug_atomic_wr_max_length) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); + return check_condition_result; + } + } + + ret = do_device_access(sip, scp, 0, lba, len, 0, true, true); + if (unlikely(ret == -1)) + return DID_ERROR << 16; + if (unlikely(ret != len * sdebug_sector_size)) + return DID_ERROR << 16; + return 0; +} + /* Logic transplanted from tcmu-runner, file_zbc.c */ static void zbc_open_all(struct sdebug_dev_info *devip) { @@ -5063,8 +6096,7 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) mk_sense_invalid_opcode(scp); return check_condition_result; } - - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); if (all) { /* Check if all closed zones can be open */ @@ -5113,7 +6145,7 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) zbc_open_zone(devip, zsp, true); fini: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); return res; } @@ -5140,7 +6172,7 @@ static int resp_close_zone(struct scsi_cmnd *scp, return check_condition_result; } - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); if (all) { zbc_close_all(devip); @@ -5169,7 +6201,7 @@ static int resp_close_zone(struct scsi_cmnd *scp, zbc_close_zone(devip, zsp); fini: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); return res; } @@ -5212,7 +6244,7 @@ static int resp_finish_zone(struct scsi_cmnd *scp, return check_condition_result; } - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); if (all) { zbc_finish_all(devip); @@ -5241,7 +6273,7 @@ static int resp_finish_zone(struct scsi_cmnd *scp, zbc_finish_zone(devip, zsp, true); fini: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); return res; } @@ -5292,7 +6324,7 @@ static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return check_condition_result; } - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); if (all) { zbc_rwp_all(devip); @@ -5320,7 +6352,7 @@ static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) zbc_rwp_zone(devip, zsp); fini: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); return res; } @@ -5332,10 +6364,10 @@ static u32 get_tag(struct scsi_cmnd *cmnd) /* Queued (deferred) command completions converge here. */ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) { - struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp); + struct sdebug_scsi_cmd *sdsc = container_of(sd_dp, + typeof(*sdsc), sd_dp); + struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1; unsigned long flags; - struct scsi_cmnd *scp = sqcp->scmd; - struct sdebug_scsi_cmd *sdsc; bool aborted; if (sdebug_statistics) { @@ -5346,27 +6378,23 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) if (!scp) { pr_err("scmd=NULL\n"); - goto out; + return; } - sdsc = scsi_cmd_priv(scp); spin_lock_irqsave(&sdsc->lock, flags); aborted = sd_dp->aborted; if (unlikely(aborted)) sd_dp->aborted = false; - ASSIGN_QUEUED_CMD(scp, NULL); spin_unlock_irqrestore(&sdsc->lock, flags); if (aborted) { pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n"); blk_abort_request(scsi_cmd_to_rq(scp)); - goto out; + return; } scsi_done(scp); /* callback to mid level */ -out: - sdebug_free_queued_cmd(sqcp); } /* When high resolution timer goes off this function is called. */ @@ -5529,6 +6557,10 @@ static struct sdebug_dev_info *sdebug_device_create( } else { devip->zoned = false; } + if (sdebug_ptype == TYPE_TAPE) { + devip->tape_density = TAPE_DEF_DENSITY; + devip->tape_blksize = TAPE_DEF_BLKSIZE; + } devip->create_ts = ktime_get_boottime(); atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0)); spin_lock_init(&devip->list_lock); @@ -5573,23 +6605,24 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev) return open_devip; } -static int scsi_debug_slave_alloc(struct scsi_device *sdp) +static int scsi_debug_sdev_init(struct scsi_device *sdp) { if (sdebug_verbose) - pr_info("slave_alloc <%u %u %u %llu>\n", + pr_info("sdev_init <%u %u %u %llu>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); return 0; } -static int scsi_debug_slave_configure(struct scsi_device *sdp) +static int scsi_debug_sdev_configure(struct scsi_device *sdp, + struct queue_limits *lim) { struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; struct dentry *dentry; if (sdebug_verbose) - pr_info("slave_configure <%u %u %u %llu>\n", + pr_info("sdev_configure <%u %u %u %llu>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN) sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN; @@ -5598,6 +6631,21 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp) if (devip == NULL) return 1; /* no resources, will be marked offline */ } + if (sdebug_ptype == TYPE_TAPE) { + if (!devip->tape_blocks[0]) { + devip->tape_blocks[0] = + kcalloc(TAPE_UNITS, sizeof(struct tape_block), + GFP_KERNEL); + if (!devip->tape_blocks[0]) + return 1; + } + devip->tape_pending_nbr_partitions = -1; + if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) { + kfree(devip->tape_blocks[0]); + devip->tape_blocks[0] = NULL; + return 1; + } + } sdp->hostdata = devip; if (sdebug_no_uld) sdp->no_uld_attach = 1; @@ -5621,14 +6669,14 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp) return 0; } -static void scsi_debug_slave_destroy(struct scsi_device *sdp) +static void scsi_debug_sdev_destroy(struct scsi_device *sdp) { struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; struct sdebug_err_inject *err; if (sdebug_verbose) - pr_info("slave_destroy <%u %u %u %llu>\n", + pr_info("sdev_destroy <%u %u %u %llu>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); if (!devip) @@ -5643,31 +6691,41 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp) debugfs_remove(devip->debugfs_entry); + if (sdp->type == TYPE_TAPE) { + kfree(devip->tape_blocks[0]); + devip->tape_blocks[0] = NULL; + } + /* make this slot available for re-use */ devip->used = false; sdp->hostdata = NULL; } -/* Returns true if we require the queued memory to be freed by the caller. */ -static bool stop_qc_helper(struct sdebug_defer *sd_dp, - enum sdeb_defer_type defer_t) +/* Returns true if cancelled or not running callback. */ +static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd) { + struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd); + struct sdebug_defer *sd_dp = &sdsc->sd_dp; + enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t); + + lockdep_assert_held(&sdsc->lock); + if (defer_t == SDEB_DEFER_HRT) { int res = hrtimer_try_to_cancel(&sd_dp->hrt); switch (res) { - case 0: /* Not active, it must have already run */ case -1: /* -1 It's executing the CB */ return false; + case 0: /* Not active, it must have already run */ case 1: /* Was active, we've now cancelled */ default: return true; } } else if (defer_t == SDEB_DEFER_WQ) { /* Cancel if pending */ - if (cancel_work_sync(&sd_dp->ew.work)) + if (cancel_work(&sd_dp->ew.work)) return true; - /* Was not pending, so it must have run */ + /* callback may be running, so return false */ return false; } else if (defer_t == SDEB_DEFER_POLL) { return true; @@ -5676,28 +6734,6 @@ static bool stop_qc_helper(struct sdebug_defer *sd_dp, return false; } - -static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd) -{ - enum sdeb_defer_type l_defer_t; - struct sdebug_defer *sd_dp; - struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd); - struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd); - - lockdep_assert_held(&sdsc->lock); - - if (!sqcp) - return false; - sd_dp = &sqcp->sd_dp; - l_defer_t = READ_ONCE(sd_dp->defer_t); - ASSIGN_QUEUED_CMD(cmnd, NULL); - - if (stop_qc_helper(sd_dp, l_defer_t)) - sdebug_free_queued_cmd(sqcp); - - return true; -} - /* * Called from scsi_debug_abort() only, which is for timed-out cmd. */ @@ -5769,7 +6805,7 @@ static int sdebug_fail_abort(struct scsi_cmnd *cmnd) static int scsi_debug_abort(struct scsi_cmnd *SCpnt) { - bool ok = scsi_debug_abort_cmnd(SCpnt); + bool aborted = scsi_debug_abort_cmnd(SCpnt); u8 *cmd = SCpnt->cmnd; u8 opcode = cmd[0]; @@ -5778,7 +6814,8 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt) if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) sdev_printk(KERN_INFO, SCpnt->device, "%s: command%s found\n", __func__, - ok ? "" : " not"); + aborted ? "" : " not"); + if (sdebug_fail_abort(SCpnt)) { scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n", @@ -5786,6 +6823,9 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt) return FAILED; } + if (aborted == false) + return FAILED; + return SUCCESS; } @@ -5837,6 +6877,20 @@ static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd) return 0; } +static void scsi_tape_reset_clear(struct sdebug_dev_info *devip) +{ + int i; + + devip->tape_blksize = TAPE_DEF_BLKSIZE; + devip->tape_density = TAPE_DEF_DENSITY; + devip->tape_partition = 0; + devip->tape_dce = 0; + for (i = 0; i < TAPE_MAX_PARTITIONS; i++) + devip->tape_location[i] = 0; + devip->tape_pending_nbr_partitions = -1; + /* Don't reset partitioning? */ +} + static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt) { struct scsi_device *sdp = SCpnt->device; @@ -5850,8 +6904,11 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt) sdev_printk(KERN_INFO, sdp, "%s\n", __func__); scsi_debug_stop_all_queued(sdp); - if (devip) + if (devip) { set_bit(SDEBUG_UA_POR, devip->uas_bm); + if (SCpnt->device->type == TYPE_TAPE) + scsi_tape_reset_clear(devip); + } if (sdebug_fail_lun_reset(SCpnt)) { scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode); @@ -5889,6 +6946,8 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt) list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { if (devip->target == sdp->id) { set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); + if (SCpnt->device->type == TYPE_TAPE) + scsi_tape_reset_clear(devip); ++k; } } @@ -5920,6 +6979,8 @@ static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt) list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); + if (SCpnt->device->type == TYPE_TAPE) + scsi_tape_reset_clear(devip); ++k; } @@ -5943,6 +7004,8 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt) list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); + if (SCpnt->device->type == TYPE_TAPE) + scsi_tape_reset_clear(devip); ++k; } } @@ -6059,33 +7122,6 @@ static bool inject_on_this_cmd(void) #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */ - -void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp) -{ - if (sqcp) - kmem_cache_free(queued_cmd_cache, sqcp); -} - -static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd) -{ - struct sdebug_queued_cmd *sqcp; - struct sdebug_defer *sd_dp; - - sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC); - if (!sqcp) - return NULL; - - sd_dp = &sqcp->sd_dp; - - hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); - sd_dp->hrt.function = sdebug_q_cmd_hrt_complete; - INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete); - - sqcp->scmd = scmd; - - return sqcp; -} - /* Complete the processing of the thread that queued a SCSI command to this * driver. It either completes the command by calling cmnd_done() or * schedules a hr timer or work queue then returns 0. Returns @@ -6102,7 +7138,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd); unsigned long flags; u64 ns_from_boot = 0; - struct sdebug_queued_cmd *sqcp; struct scsi_device *sdp; struct sdebug_defer *sd_dp; @@ -6134,14 +7169,9 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, } } - sqcp = sdebug_alloc_queued_cmd(cmnd); - if (!sqcp) { - pr_err("%s no alloc\n", __func__); - return SCSI_MLQUEUE_HOST_BUSY; - } - sd_dp = &sqcp->sd_dp; + sd_dp = &sdsc->sd_dp; - if (polled) + if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)) ns_from_boot = ktime_get_boottime_ns(); /* one of the resp_*() response functions is called here */ @@ -6187,7 +7217,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, if (kt <= d) { /* elapsed duration >= kt */ /* call scsi_done() from this thread */ - sdebug_free_queued_cmd(sqcp); scsi_done(cmnd); return 0; } @@ -6200,13 +7229,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, if (polled) { spin_lock_irqsave(&sdsc->lock, flags); sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt); - ASSIGN_QUEUED_CMD(cmnd, sqcp); WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL); spin_unlock_irqrestore(&sdsc->lock, flags); } else { /* schedule the invocation of scsi_done() for a later time */ spin_lock_irqsave(&sdsc->lock, flags); - ASSIGN_QUEUED_CMD(cmnd, sqcp); WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT); hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED); /* @@ -6230,13 +7257,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, sd_dp->issuing_cpu = raw_smp_processor_id(); if (polled) { spin_lock_irqsave(&sdsc->lock, flags); - ASSIGN_QUEUED_CMD(cmnd, sqcp); sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot); WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL); spin_unlock_irqrestore(&sdsc->lock, flags); } else { spin_lock_irqsave(&sdsc->lock, flags); - ASSIGN_QUEUED_CMD(cmnd, sqcp); WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ); schedule_work(&sd_dp->ew.work); spin_unlock_irqrestore(&sdsc->lock, flags); @@ -6284,6 +7309,7 @@ module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO); module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO); module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO); module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO); +module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO); module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO); module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR); module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR); @@ -6318,6 +7344,11 @@ module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO); module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO); module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO); module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO); +module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO); +module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO); +module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO); +module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO); +module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO); module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO); module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR); module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int, @@ -6361,6 +7392,7 @@ MODULE_PARM_DESC(lbprz, MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); +MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)"); MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method"); MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); @@ -6392,6 +7424,11 @@ MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)" MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); +MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)"); +MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)"); +MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)"); +MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)"); +MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)"); MODULE_PARM_DESC(uuid_ctl, "1->use uuid for lu name, 0->don't, 2->all use same (def=0)"); MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)"); @@ -6516,7 +7553,7 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host) blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter, &data); if (f >= 0) { - seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n", + seq_printf(m, " BUSY: %s: %d,%d\n", "first,last bits", f, l); } } @@ -7563,6 +8600,7 @@ static int __init scsi_debug_init(void) return -EINVAL; } } + xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); if (want_store) { idx = sdebug_add_store(); @@ -7590,12 +8628,6 @@ static int __init scsi_debug_init(void) hosts_to_add = sdebug_add_host; sdebug_add_host = 0; - queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN); - if (!queued_cmd_cache) { - ret = -ENOMEM; - goto driver_unreg; - } - sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL); if (IS_ERR_OR_NULL(sdebug_debugfs_root)) pr_info("%s: failed to create initial debugfs directory\n", __func__); @@ -7622,8 +8654,6 @@ static int __init scsi_debug_init(void) return 0; -driver_unreg: - driver_unregister(&sdebug_driverfs_driver); bus_unreg: bus_unregister(&pseudo_lld_bus); dev_unreg: @@ -7639,7 +8669,6 @@ static void __exit scsi_debug_exit(void) for (; k; k--) sdebug_do_remove_host(true); - kmem_cache_destroy(queued_cmd_cache); driver_unregister(&sdebug_driverfs_driver); bus_unregister(&pseudo_lld_bus); root_device_unregister(pseudo_primary); @@ -7770,7 +8799,9 @@ static int sdebug_add_store(void) map_region(sip, 0, 2); } - rwlock_init(&sip->macc_lck); + rwlock_init(&sip->macc_data_lck); + rwlock_init(&sip->macc_meta_lck); + rwlock_init(&sip->macc_sector_lck); return (int)n_idx; err: sdebug_erase_store((int)n_idx, sip); @@ -8021,7 +9052,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque) struct sdebug_defer *sd_dp; u32 unique_tag = blk_mq_unique_tag(rq); u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag); - struct sdebug_queued_cmd *sqcp; unsigned long flags; int queue_num = data->queue_num; ktime_t time; @@ -8037,13 +9067,7 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque) time = ktime_get_boottime(); spin_lock_irqsave(&sdsc->lock, flags); - sqcp = TO_QUEUED_CMD(cmd); - if (!sqcp) { - spin_unlock_irqrestore(&sdsc->lock, flags); - return true; - } - - sd_dp = &sqcp->sd_dp; + sd_dp = &sdsc->sd_dp; if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) { spin_unlock_irqrestore(&sdsc->lock, flags); return true; @@ -8053,8 +9077,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque) spin_unlock_irqrestore(&sdsc->lock, flags); return true; } - - ASSIGN_QUEUED_CMD(cmd, NULL); spin_unlock_irqrestore(&sdsc->lock, flags); if (sdebug_statistics) { @@ -8063,8 +9085,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque) atomic_inc(&sdebug_miss_cpus); } - sdebug_free_queued_cmd(sqcp); - scsi_done(cmd); /* callback to mid level */ (*data->num_entries)++; return true; @@ -8201,6 +9221,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, u32 flags; u16 sa; u8 opcode = cmd[0]; + u32 devsel = sdebug_get_devsel(scp->device); bool has_wlun_rl; bool inject_now; int ret = 0; @@ -8280,12 +9301,14 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, else sa = get_unaligned_be16(cmd + 8); for (k = 0; k <= na; oip = r_oip->arrp + k++) { - if (opcode == oip->opcode && sa == oip->sa) + if (opcode == oip->opcode && sa == oip->sa && + (devsel & oip->devsel) != 0) break; } } else { /* since no service action only check opcode */ for (k = 0; k <= na; oip = r_oip->arrp + k++) { - if (opcode == oip->opcode) + if (opcode == oip->opcode && + (devsel & oip->devsel) != 0) break; } } @@ -8379,21 +9402,25 @@ err_out: static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd); + struct sdebug_defer *sd_dp = &sdsc->sd_dp; spin_lock_init(&sdsc->lock); + hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete); return 0; } -static struct scsi_host_template sdebug_driver_template = { +static const struct scsi_host_template sdebug_driver_template = { .show_info = scsi_debug_show_info, .write_info = scsi_debug_write_info, .proc_name = sdebug_proc_name, .name = "SCSI DEBUG", .info = scsi_debug_info, - .slave_alloc = scsi_debug_slave_alloc, - .slave_configure = scsi_debug_slave_configure, - .slave_destroy = scsi_debug_slave_destroy, + .sdev_init = scsi_debug_sdev_init, + .sdev_configure = scsi_debug_sdev_configure, + .sdev_destroy = scsi_debug_sdev_destroy, .ioctl = scsi_debug_ioctl, .queuecommand = scsi_debug_queuecommand, .change_queue_depth = sdebug_change_qdepth, @@ -8411,6 +9438,7 @@ static struct scsi_host_template sdebug_driver_template = { .max_sectors = -1U, .max_segment_size = -1U, .module = THIS_MODULE, + .skip_settle_delay = 1, .track_queue_depth = 1, .cmd_size = sizeof(struct sdebug_scsi_cmd), .init_cmd_priv = sdebug_init_cmd_priv, @@ -8427,17 +9455,17 @@ static int sdebug_driver_probe(struct device *dev) sdbg_host = dev_to_sdebug_host(dev); - sdebug_driver_template.can_queue = sdebug_max_queue; - sdebug_driver_template.cmd_per_lun = sdebug_max_queue; - if (!sdebug_clustering) - sdebug_driver_template.dma_boundary = PAGE_SIZE - 1; - hpnt = scsi_host_alloc(&sdebug_driver_template, 0); if (NULL == hpnt) { pr_err("scsi_host_alloc failed\n"); error = -ENODEV; return error; } + hpnt->can_queue = sdebug_max_queue; + hpnt->cmd_per_lun = sdebug_max_queue; + if (!sdebug_clustering) + hpnt->dma_boundary = PAGE_SIZE - 1; + if (submit_queues > nr_cpu_ids) { pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n", my_name, submit_queues, nr_cpu_ids); diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c index f795848b316c..eb52e39f37c9 100644 --- a/drivers/scsi/scsi_debugfs.c +++ b/drivers/scsi/scsi_debugfs.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bitops.h> +#include <linux/cleanup.h> #include <linux/seq_file.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> @@ -32,38 +33,43 @@ static int scsi_flags_show(struct seq_file *m, const unsigned long flags, return 0; } -void scsi_show_rq(struct seq_file *m, struct request *rq) +static const char *scsi_cmd_list_info(struct scsi_cmnd *cmd) { - struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq), *cmd2; struct Scsi_Host *shost = cmd->device->host; + struct scsi_cmnd *cmd2; + + guard(spinlock_irq)(shost->host_lock); + + list_for_each_entry(cmd2, &shost->eh_abort_list, eh_entry) + if (cmd == cmd2) + return "on eh_abort_list"; + + list_for_each_entry(cmd2, &shost->eh_cmd_q, eh_entry) + if (cmd == cmd2) + return "on eh_cmd_q"; + + return NULL; +} + +void scsi_show_rq(struct seq_file *m, struct request *rq) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); int alloc_ms = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc); int timeout_ms = jiffies_to_msecs(rq->timeout); - const char *list_info = NULL; char buf[80] = "(?)"; - spin_lock_irq(shost->host_lock); - list_for_each_entry(cmd2, &shost->eh_abort_list, eh_entry) { - if (cmd == cmd2) { - list_info = "on eh_abort_list"; - goto unlock; - } - } - list_for_each_entry(cmd2, &shost->eh_cmd_q, eh_entry) { - if (cmd == cmd2) { - list_info = "on eh_cmd_q"; - goto unlock; - } - } -unlock: - spin_unlock_irq(shost->host_lock); + if (cmd->flags & SCMD_INITIALIZED) { + const char *list_info = scsi_cmd_list_info(cmd); - __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len); - seq_printf(m, ", .cmd=%s, .retries=%d, .allowed=%d, .result = %#x, %s%s.flags=", - buf, cmd->retries, cmd->allowed, cmd->result, - list_info ? : "", list_info ? ", " : ""); + __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len); + seq_printf(m, ", .cmd=%s, .retries=%d, .allowed=%d, .result = %#x%s%s", + buf, cmd->retries, cmd->allowed, cmd->result, + list_info ? ", " : "", list_info ? : ""); + seq_printf(m, ", .timeout=%d.%03d, allocated %d.%03d s ago", + timeout_ms / 1000, timeout_ms % 1000, + alloc_ms / 1000, alloc_ms % 1000); + } + seq_printf(m, ", .flags="); scsi_flags_show(m, cmd->flags, scsi_cmd_flags, ARRAY_SIZE(scsi_cmd_flags)); - seq_printf(m, ", .timeout=%d.%03d, allocated %d.%03d s ago", - timeout_ms / 1000, timeout_ms % 1000, - alloc_ms / 1000, alloc_ms % 1000); } diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index ba7237e83863..a348df895dca 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -39,13 +39,12 @@ static LIST_HEAD(scsi_dev_info_list); static char scsi_dev_flags[256]; /* - * scsi_static_device_list: deprecated list of devices that require - * settings that differ from the default, includes black-listed (broken) - * devices. The entries here are added to the tail of scsi_dev_info_list - * via scsi_dev_info_list_init. + * scsi_static_device_list: list of devices that require settings that differ + * from the default, includes black-listed (broken) devices. The entries here + * are added to the tail of scsi_dev_info_list via scsi_dev_info_list_init. * - * Do not add to this list, use the command line or proc interface to add - * to the scsi_dev_info_list. This table will eventually go away. + * If possible, set the BLIST_* flags from inside a SCSI LLD rather than + * adding an entry to this list. */ static struct { char *vendor; @@ -293,14 +292,16 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length, size_t from_length; from_length = strlen(from); - /* This zero-pads the destination */ - strncpy(to, from, to_length); - if (from_length < to_length && !compatible) { - /* - * space pad the string if it is short. - */ - memset(&to[from_length], ' ', to_length - from_length); - } + + /* + * null pad and null terminate if compatible + * otherwise space pad + */ + if (compatible) + strscpy_pad(to, from, to_length); + else + memcpy_and_pad(to, to_length, from, from_length, ' '); + if (from_length > to_length) printk(KERN_WARNING "%s: %s string '%s' is too long\n", __func__, name, from); @@ -485,33 +486,6 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, } /** - * scsi_dev_info_list_del_keyed - remove one dev_info list entry. - * @vendor: vendor string - * @model: model (product) string - * @key: specify list to use - * - * Description: - * Remove and destroy one dev_info entry for @vendor, @model - * in list specified by @key. - * - * Returns: 0 OK, -error on failure. - **/ -int scsi_dev_info_list_del_keyed(char *vendor, char *model, - enum scsi_devinfo_key key) -{ - struct scsi_dev_info_list *found; - - found = scsi_dev_info_list_find(vendor, model, key); - if (IS_ERR(found)) - return PTR_ERR(found); - - list_del(&found->dev_info_list); - kfree(found); - return 0; -} -EXPORT_SYMBOL(scsi_dev_info_list_del_keyed); - -/** * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. * @dev_list: string of device flags to add * diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 612489afe8d2..376b8897ab90 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -48,7 +48,7 @@ #include <trace/events/scsi.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> /* * These should *probably* be handled by the host itself. @@ -547,6 +547,18 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd) scsi_report_sense(sdev, &sshdr); + if (sshdr.sense_key == UNIT_ATTENTION) { + /* + * Increment the counters for Power on/Reset or New Media so + * that all ULDs interested in these can see that those have + * happened, even if someone else gets the sense data. + */ + if (sshdr.asc == 0x28) + scmd->device->ua_new_media_ctr++; + else if (sshdr.asc == 0x29) + scmd->device->ua_por_ctr++; + } + if (scsi_sense_is_deferred(&sshdr)) return NEEDS_RETRY; @@ -711,6 +723,13 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd) return SUCCESS; case COMPLETED: + /* + * A command using command duration limits (CDL) with a + * descriptor set with policy 0xD may be completed with success + * and the sense data DATA CURRENTLY UNAVAILABLE, indicating + * that the command was in fact aborted because it exceeded its + * duration limit. Never retry these commands. + */ if (sshdr.asc == 0x55 && sshdr.ascq == 0x0a) { set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT); req->cmd_flags |= REQ_FAILFAST_DEV; @@ -2363,14 +2382,14 @@ int scsi_error_handler(void *data) return 0; } -/* - * Function: scsi_report_bus_reset() +/** + * scsi_report_bus_reset() - report bus reset observed * - * Purpose: Utility function used by low-level drivers to report that - * they have observed a bus reset on the bus being handled. + * Utility function used by low-level drivers to report that + * they have observed a bus reset on the bus being handled. * - * Arguments: shost - Host in question - * channel - channel on which reset was observed. + * @shost: Host in question + * @channel: channel on which reset was observed. * * Returns: Nothing * @@ -2395,15 +2414,15 @@ void scsi_report_bus_reset(struct Scsi_Host *shost, int channel) } EXPORT_SYMBOL(scsi_report_bus_reset); -/* - * Function: scsi_report_device_reset() +/** + * scsi_report_device_reset() - report device reset observed * - * Purpose: Utility function used by low-level drivers to report that - * they have observed a device reset on the device being handled. + * Utility function used by low-level drivers to report that + * they have observed a device reset on the device being handled. * - * Arguments: shost - Host in question - * channel - channel on which reset was observed - * target - target on which reset was observed + * @shost: Host in question + * @channel: channel on which reset was observed + * @target: target on which reset was observed * * Returns: Nothing * diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 6f6c5973c3ea..0ddc95bafc71 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -37,8 +37,10 @@ * @host: host to identify * @buffer: userspace buffer for identification * - * Return an identifying string at @buffer, if @buffer is non-NULL, filling - * to the length stored at * (int *) @buffer. + * Return: + * * if successful, %1 and an identifying string at @buffer, if @buffer + * is non-NULL, filling to the length stored at * (int *) @buffer. + * * <0 error code on failure. */ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer) { @@ -121,6 +123,16 @@ out: return result; } +/** + * scsi_set_medium_removal() - send command to allow or prevent medium removal + * @sdev: target scsi device + * @state: removal state to set (prevent or allow) + * + * Returns: + * * %0 if @sdev is not removable or not lockable or successful. + * * non-%0 is a SCSI result code if > 0 or kernel error code if < 0. + * * Sets @sdev->locked to the new state on success. + */ int scsi_set_medium_removal(struct scsi_device *sdev, char state) { char scsi_cmd[MAX_COMMAND_SIZE]; @@ -242,11 +254,15 @@ static int scsi_send_start_stop(struct scsi_device *sdev, int data) NORMAL_RETRIES); } -/* - * Check if the given command is allowed. +/** + * scsi_cmd_allowed() - Check if the given command is allowed. + * @cmd: SCSI command to check + * @open_for_write: is the file / block device opened for writing? * * Only a subset of commands are allowed for unprivileged users. Commands used * to format the media, update the firmware, etc. are not permitted. + * + * Return: %true if the cmd is allowed, otherwise @false. */ bool scsi_cmd_allowed(unsigned char *cmd, bool open_for_write) { @@ -585,7 +601,7 @@ static int sg_scsi_ioctl(struct request_queue *q, bool open_for_write, } if (bytes) { - err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO); + err = blk_rq_map_kern(rq, buffer, bytes, GFP_NOIO); if (err) goto error; } @@ -859,6 +875,8 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, bool open_for_write, * Description: The scsi_ioctl() function differs from most ioctls in that it * does not take a major/minor number as the dev field. Rather, it takes * a pointer to a &struct scsi_device. + * + * Return: varies depending on the @cmd */ int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd, void __user *arg) @@ -941,8 +959,15 @@ int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd, } EXPORT_SYMBOL(scsi_ioctl); -/* +/** + * scsi_ioctl_block_when_processing_errors - prevent commands from being queued + * @sdev: target scsi device + * @cmd: which ioctl is it + * @ndelay: no delay (non-blocking) + * * We can process a reset even when a device isn't fully operable. + * + * Return: %0 on success, <0 error code. */ int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev, int cmd, bool ndelay) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 5b3230ef51fe..144c72f0737a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -23,7 +23,7 @@ #include <linux/blk-mq.h> #include <linux/blk-integrity.h> #include <linux/ratelimit.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -32,7 +32,7 @@ #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> -#include <scsi/scsi_transport.h> /* __scsi_init_queue() */ +#include <scsi/scsi_transport.h> /* scsi_init_limits() */ #include <scsi/scsi_dh.h> #include <trace/events/scsi.h> @@ -184,6 +184,10 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) __scsi_queue_insert(cmd, reason, true); } +/** + * scsi_failures_reset_retries - reset all failures to zero + * @failures: &struct scsi_failures with specific failure modes set + */ void scsi_failures_reset_retries(struct scsi_failures *failures) { struct scsi_failure *failure; @@ -210,6 +214,9 @@ static int scsi_check_passthrough(struct scsi_cmnd *scmd, struct scsi_sense_hdr sshdr; enum sam_status status; + if (!scmd->result) + return 0; + if (!failures) return 0; @@ -306,8 +313,7 @@ retry: return PTR_ERR(req); if (bufflen) { - ret = blk_rq_map_kern(sdev->request_queue, req, - buffer, bufflen, GFP_NOIO); + ret = blk_rq_map_kern(req, buffer, bufflen, GFP_NOIO); if (ret) goto out; } @@ -631,8 +637,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (blk_update_request(req, error, bytes)) return true; - // XXX: - if (blk_queue_add_random(q)) + if (q->limits.features & BLK_FEAT_ADD_RANDOM) add_disk_randomness(req->q->disk); WARN_ON_ONCE(!blk_rq_is_passthrough(req) && @@ -866,13 +871,18 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) case 0x1a: /* start stop unit in progress */ case 0x1b: /* sanitize in progress */ case 0x1d: /* configuration in progress */ - case 0x24: /* depopulation in progress */ - case 0x25: /* depopulation restore in progress */ action = ACTION_DELAYED_RETRY; break; case 0x0a: /* ALUA state transition */ action = ACTION_DELAYED_REPREP; break; + /* + * Depopulation might take many hours, + * thus it is not worthwhile to retry. + */ + case 0x24: /* depopulation in progress */ + case 0x25: /* depopulation restore in progress */ + fallthrough; default: action = ACTION_FAIL; break; @@ -1138,11 +1148,11 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) * Next, walk the list, and fill in the addresses and sizes of * each segment. */ - count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg); + count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg); - if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { + if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) { unsigned int pad_len = - (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; + (rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1; last_sg->length += pad_len; cmd->extra_len += pad_len; @@ -1164,7 +1174,6 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) if (blk_integrity_rq(rq)) { struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; - int ivecs; if (WARN_ON_ONCE(!prot_sdb)) { /* @@ -1176,20 +1185,15 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) goto out_free_sgtables; } - ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); - - if (sg_alloc_table_chained(&prot_sdb->table, ivecs, + if (sg_alloc_table_chained(&prot_sdb->table, + rq->nr_integrity_segments, prot_sdb->table.sgl, SCSI_INLINE_PROT_SG_CNT)) { ret = BLK_STS_RESOURCE; goto out_free_sgtables; } - count = blk_rq_map_integrity_sg(rq->q, rq->bio, - prot_sdb->table.sgl); - BUG_ON(count > ivecs); - BUG_ON(count > queue_max_integrity_segments(rq->q)); - + count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl); cmd->prot_sdb = prot_sdb; cmd->prot_sdb->table.nents = count; } @@ -1221,6 +1225,15 @@ static void scsi_initialize_rq(struct request *rq) cmd->retries = 0; } +/** + * scsi_alloc_request - allocate a block request and partially + * initialize its &scsi_cmnd + * @q: the device's request queue + * @opf: the request operation code + * @flags: block layer allocation flags + * + * Return: &struct request pointer on success or %NULL on failure + */ struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf, blk_mq_req_flags_t flags) { @@ -1239,8 +1252,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request); */ static void scsi_cleanup_rq(struct request *rq) { + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + + cmd->flags = 0; + if (rq->rq_flags & RQF_DONTPREP) { - scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); + scsi_mq_uninit_cmd(cmd); rq->rq_flags &= ~RQF_DONTPREP; } } @@ -1655,13 +1672,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req) if (in_flight) __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); - /* - * Only clear the driver-private command data if the LLD does not supply - * a function to initialize that data. - */ - if (!shost->hostt->init_cmd_priv) - memset(cmd + 1, 0, shost->hostt->cmd_size); - cmd->prot_op = SCSI_PROT_NORMAL; if (blk_rq_bytes(req)) cmd->sc_data_direction = rq_dma_dir(req); @@ -1828,6 +1838,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, if (!scsi_host_queue_ready(q, shost, sdev, cmd)) goto out_dec_target_busy; + /* + * Only clear the driver-private command data if the LLD does not supply + * a function to initialize that data. + */ + if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv) + memset(cmd + 1, 0, shost->hostt->cmd_size); + if (!(req->rq_flags & RQF_DONTPREP)) { ret = scsi_prepare_cmd(req); if (ret != BLK_STS_OK) @@ -1869,7 +1886,6 @@ out_put_budget: case BLK_STS_OK: break; case BLK_STS_RESOURCE: - case BLK_STS_ZONE_RESOURCE: if (scsi_device_blocked(sdev)) ret = BLK_STS_DEV_RESOURCE; break; @@ -1964,42 +1980,40 @@ static void scsi_map_queues(struct blk_mq_tag_set *set) blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); } -void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) +void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim) { struct device *dev = shost->dma_dev; - /* - * this limit is imposed by hardware restrictions - */ - blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, - SG_MAX_SEGMENTS)); + memset(lim, 0, sizeof(*lim)); + lim->max_segments = + min_t(unsigned short, shost->sg_tablesize, SG_MAX_SEGMENTS); if (scsi_host_prot_dma(shost)) { shost->sg_prot_tablesize = min_not_zero(shost->sg_prot_tablesize, (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); - blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); + lim->max_integrity_segments = shost->sg_prot_tablesize; } - blk_queue_max_hw_sectors(q, shost->max_sectors); - blk_queue_segment_boundary(q, shost->dma_boundary); - dma_set_seg_boundary(dev, shost->dma_boundary); - - blk_queue_max_segment_size(q, shost->max_segment_size); - blk_queue_virt_boundary(q, shost->virt_boundary_mask); - dma_set_max_seg_size(dev, queue_max_segment_size(q)); + lim->max_hw_sectors = shost->max_sectors; + lim->seg_boundary_mask = shost->dma_boundary; + lim->max_segment_size = shost->max_segment_size; + lim->virt_boundary_mask = shost->virt_boundary_mask; + lim->dma_alignment = max_t(unsigned int, + shost->dma_alignment, dma_get_cache_alignment() - 1); /* - * Set a reasonable default alignment: The larger of 32-byte (dword), - * which is a common minimum for HBAs, and the minimum DMA alignment, - * which is set by the platform. - * - * Devices that require a bigger alignment can increase it later. + * Propagate the DMA formation properties to the dma-mapping layer as + * a courtesy service to the LLDDs. This needs to check that the buses + * actually support the DMA API first, though. */ - blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); + if (dev->dma_parms) { + dma_set_seg_boundary(dev, shost->dma_boundary); + dma_set_max_seg_size(dev, shost->max_segment_size); + } } -EXPORT_SYMBOL_GPL(__scsi_init_queue); +EXPORT_SYMBOL_GPL(scsi_init_limits); static const struct blk_mq_ops scsi_mq_ops_no_commit = { .get_budget = scsi_mq_get_budget, @@ -2072,9 +2086,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) tag_set->queue_depth = shost->can_queue; tag_set->cmd_size = cmd_size; tag_set->numa_node = dev_to_node(shost->dma_dev); - tag_set->flags = BLK_MQ_F_SHOULD_MERGE; - tag_set->flags |= - BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); + if (shost->hostt->tag_alloc_policy_rr) + tag_set->flags |= BLK_MQ_F_TAG_RR; if (shost->queuecommand_may_block) tag_set->flags |= BLK_MQ_F_BLOCKING; tag_set->driver_data = shost; @@ -2728,6 +2741,7 @@ int scsi_device_quiesce(struct scsi_device *sdev) { struct request_queue *q = sdev->request_queue; + unsigned int memflags; int err; /* @@ -2742,7 +2756,7 @@ scsi_device_quiesce(struct scsi_device *sdev) blk_set_pm_only(q); - blk_mq_freeze_queue(q); + memflags = blk_mq_freeze_queue(q); /* * Ensure that the effect of blk_set_pm_only() will be visible * for percpu_ref_tryget() callers that occur after the queue @@ -2750,7 +2764,7 @@ scsi_device_quiesce(struct scsi_device *sdev) * was called. See also https://lwn.net/Articles/573497/. */ synchronize_rcu(); - blk_mq_unfreeze_queue(q); + blk_mq_unfreeze_queue(q, memflags); mutex_lock(&sdev->state_mutex); err = scsi_device_set_state(sdev, SDEV_QUIESCE); @@ -3372,14 +3386,16 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) } EXPORT_SYMBOL(scsi_vpd_lun_id); -/* +/** * scsi_vpd_tpg_id - return a target port group identifier * @sdev: SCSI device + * @rel_id: pointer to return relative target port in if not %NULL * * Returns the Target Port Group identifier from the information - * froom VPD page 0x83 of the device. + * from VPD page 0x83 of the device. + * Optionally sets @rel_id to the relative target port on success. * - * Returns the identifier or error on failure. + * Return: the identifier or error on failure. */ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) { diff --git a/drivers/scsi/scsi_lib_test.c b/drivers/scsi/scsi_lib_test.c index 99834426a100..ae8af0e0047a 100644 --- a/drivers/scsi/scsi_lib_test.c +++ b/drivers/scsi/scsi_lib_test.c @@ -67,6 +67,13 @@ static void scsi_lib_test_multiple_sense(struct kunit *test) }; int i; + /* Success */ + sc.result = 0; + KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures)); + KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, NULL)); + /* Command failed but caller did not pass in a failures array */ + scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36); + KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, NULL)); /* Match end of array */ scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36); KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures)); diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 9fc397a9ce7a..5b2b19f5e8ec 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -79,8 +79,6 @@ extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, char *strflags, blist_flags_t flags, enum scsi_devinfo_key key); -extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, - enum scsi_devinfo_key key); extern int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name); extern int scsi_dev_info_remove_list(enum scsi_devinfo_key key); diff --git a/drivers/scsi/scsi_proto_test.c b/drivers/scsi/scsi_proto_test.c index 7fa0a78a2ad1..c093389edabb 100644 --- a/drivers/scsi/scsi_proto_test.c +++ b/drivers/scsi/scsi_proto_test.c @@ -3,7 +3,7 @@ * Copyright 2023 Google LLC */ #include <kunit/test.h> -#include <asm-generic/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi_proto.h> static void test_scsi_proto(struct kunit *test) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index ffd7e7e72933..4833b8fe251b 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -35,7 +35,7 @@ #include <linux/spinlock.h> #include <linux/async.h> #include <linux/slab.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -151,8 +151,9 @@ int scsi_complete_async_scans(void) struct async_scan_data *data; do { - if (list_empty(&scanning_hosts)) - return 0; + scoped_guard(spinlock, &async_scan_lock) + if (list_empty(&scanning_hosts)) + return 0; /* If we can't get memory immediately, that's OK. Just * sleep a little. Even if we never get memory, the async * scans will finish eventually. @@ -220,6 +221,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, int new_shift = sbitmap_calculate_shift(depth); bool need_alloc = !sdev->budget_map.map; bool need_free = false; + unsigned int memflags; int ret; struct sbitmap sb_backup; @@ -227,7 +229,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, /* * realloc if new shift is calculated, which is caused by setting - * up one new default queue depth after calling ->slave_configure + * up one new default queue depth after calling ->sdev_configure */ if (!need_alloc && new_shift != sdev->budget_map.shift) need_alloc = need_free = true; @@ -240,12 +242,12 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, * and here disk isn't added yet, so freezing is pretty fast */ if (need_free) { - blk_mq_freeze_queue(sdev->request_queue); + memflags = blk_mq_freeze_queue(sdev->request_queue); sb_backup = sdev->budget_map; } ret = sbitmap_init_node(&sdev->budget_map, scsi_device_max_queue_depth(sdev), - new_shift, GFP_KERNEL, + new_shift, GFP_NOIO, sdev->request_queue->node, false, true); if (!ret) sbitmap_resize(&sdev->budget_map, depth); @@ -256,7 +258,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, else sbitmap_free(&sb_backup); ret = 0; - blk_mq_unfreeze_queue(sdev->request_queue); + blk_mq_unfreeze_queue(sdev->request_queue, memflags); } return ret; } @@ -265,7 +267,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, * scsi_alloc_sdev - allocate and setup a scsi_Device * @starget: which target to allocate a &scsi_device for * @lun: which lun - * @hostdata: usually NULL and set by ->slave_alloc instead + * @hostdata: usually NULL and set by ->sdev_init instead * * Description: * Allocate, initialize for io, and return a pointer to a scsi_Device. @@ -283,6 +285,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, struct request_queue *q; int display_failure_msg = 1, ret; struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct queue_limits lim; sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, GFP_KERNEL); @@ -311,11 +314,11 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, sdev->sdev_gendev.parent = get_device(&starget->dev); sdev->sdev_target = starget; - /* usually NULL and set by ->slave_alloc instead */ + /* usually NULL and set by ->sdev_init instead */ sdev->hostdata = hostdata; /* if the device needs this changing, it may do so in the - * slave_configure function */ + * sdev_configure function */ sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED; /* @@ -332,7 +335,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, sdev->sg_reserved_size = INT_MAX; - q = blk_mq_alloc_queue(&sdev->host->tag_set, NULL, NULL); + scsi_init_limits(shost, &lim); + q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, sdev); if (IS_ERR(q)) { /* release fn is set up in scsi_sysfs_device_initialise, so * have to free and put manually here */ @@ -342,8 +346,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, } kref_get(&sdev->host->tagset_refcnt); sdev->request_queue = q; - q->queuedata = sdev; - __scsi_init_queue(sdev->host, q); depth = sdev->host->cmd_per_lun ?: 1; @@ -363,8 +365,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, scsi_sysfs_device_initialize(sdev); - if (shost->hostt->slave_alloc) { - ret = shost->hostt->slave_alloc(sdev); + if (shost->hostt->sdev_init) { + ret = shost->hostt->sdev_init(sdev); if (ret) { /* * if LLDD reports slave not present, don't clutter @@ -873,6 +875,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, blist_flags_t *bflags, int async) { + const struct scsi_host_template *hostt = sdev->host->hostt; + struct queue_limits lim; int ret; /* @@ -1004,19 +1008,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, sdev->select_no_atn = 1; /* - * Maximum 512 sector transfer length - * broken RA4x00 Compaq Disk Array - */ - if (*bflags & BLIST_MAX_512) - blk_queue_max_hw_sectors(sdev->request_queue, 512); - /* - * Max 1024 sector transfer length for targets that report incorrect - * max/optimal lengths and relied on the old block layer safe default - */ - else if (*bflags & BLIST_MAX_1024) - blk_queue_max_hw_sectors(sdev->request_queue, 1024); - - /* * Some devices may not want to have a start command automatically * issued when a device is added. */ @@ -1076,28 +1067,44 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, transport_configure_device(&sdev->sdev_gendev); - if (sdev->host->hostt->slave_configure) { - ret = sdev->host->hostt->slave_configure(sdev); - if (ret) { - /* - * if LLDD reports slave not present, don't clutter - * console with alloc failure messages - */ - if (ret != -ENXIO) { - sdev_printk(KERN_ERR, sdev, - "failed to configure device\n"); - } - return SCSI_SCAN_NO_RESPONSE; - } + /* + * No need to freeze the queue as it isn't reachable to anyone else yet. + */ + lim = queue_limits_start_update(sdev->request_queue); + if (*bflags & BLIST_MAX_512) + lim.max_hw_sectors = 512; + else if (*bflags & BLIST_MAX_1024) + lim.max_hw_sectors = 1024; + if (hostt->sdev_configure) + ret = hostt->sdev_configure(sdev, &lim); + if (ret) { + queue_limits_cancel_update(sdev->request_queue); /* - * The queue_depth is often changed in ->slave_configure. - * Set up budget map again since memory consumption of - * the map depends on actual queue depth. + * If the LLDD reports device not present, don't clutter the + * console with failure messages. */ - scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth); + if (ret != -ENXIO) + sdev_printk(KERN_ERR, sdev, + "failed to configure device\n"); + return SCSI_SCAN_NO_RESPONSE; + } + + ret = queue_limits_commit_update(sdev->request_queue, &lim); + if (ret) { + sdev_printk(KERN_ERR, sdev, "failed to apply queue limits.\n"); + return SCSI_SCAN_NO_RESPONSE; } + /* + * The queue_depth is often changed in ->sdev_configure. + * + * Set up budget map again since memory consumption of the map depends + * on actual queue depth. + */ + if (hostt->sdev_configure) + scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth); + if (sdev->scsi_level >= SCSI_3) scsi_attach_vpd(sdev); @@ -1629,6 +1636,24 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, } EXPORT_SYMBOL(__scsi_add_device); +/** + * scsi_add_device - creates a new SCSI (LU) instance + * @host: the &Scsi_Host instance where the device is located + * @channel: target channel number (rarely other than %0) + * @target: target id number + * @lun: LUN of target device + * + * Probe for a specific LUN and add it if found. + * + * Notes: This call is usually performed internally during a SCSI + * bus scan when an HBA is added (i.e. scsi_scan_host()). So it + * should only be called if the HBA becomes aware of a new SCSI + * device (LU) after scsi_scan_host() has completed. If successful + * this call can lead to sdev_init() and sdev_configure() callbacks + * into the LLD. + * + * Return: %0 on success or negative error code on failure + */ int scsi_add_device(struct Scsi_Host *host, uint channel, uint target, u64 lun) { @@ -2020,6 +2045,8 @@ static void do_scan_async(void *_data, async_cookie_t c) /** * scsi_scan_host - scan the given adapter * @shost: adapter to scan + * + * Notes: Should be called after scsi_add_host() **/ void scsi_scan_host(struct Scsi_Host *shost) { diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c index 093774d77534..055a03a83ad6 100644 --- a/drivers/scsi/scsi_sysctl.c +++ b/drivers/scsi/scsi_sysctl.c @@ -12,12 +12,14 @@ #include "scsi_priv.h" -static struct ctl_table scsi_table[] = { +static const struct ctl_table scsi_table[] = { { .procname = "logging_level", .data = &scsi_logging_level, .maxlen = sizeof(scsi_logging_level), .mode = 0644, - .proc_handler = proc_dointvec }, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX }, }; static struct ctl_table_header *scsi_table_header; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 775df00021e4..d772258e29ad 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -528,7 +528,7 @@ static struct class sdev_class = { }; /* all probing is done in the individual ->probe routines */ -static int scsi_bus_match(struct device *dev, struct device_driver *gendrv) +static int scsi_bus_match(struct device *dev, const struct device_driver *gendrv) { struct scsi_device *sdp; @@ -661,7 +661,7 @@ static int scsi_sdev_check_buf_bit(const char *buf) return 1; else if (buf[0] == '0') return 0; - else + else return -EINVAL; } else return -EINVAL; @@ -886,7 +886,7 @@ store_queue_type_field(struct device *dev, struct device_attribute *attr, if (!sdev->tagged_supported) return -EINVAL; - + sdev_printk(KERN_INFO, sdev, "ignoring write to deprecated queue_type attribute"); return count; @@ -898,7 +898,7 @@ static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, #define sdev_vpd_pg_attr(_page) \ static ssize_t \ show_vpd_##_page(struct file *filp, struct kobject *kobj, \ - struct bin_attribute *bin_attr, \ + const struct bin_attribute *bin_attr, \ char *buf, loff_t off, size_t count) \ { \ struct device *dev = kobj_to_dev(kobj); \ @@ -914,10 +914,10 @@ show_vpd_##_page(struct file *filp, struct kobject *kobj, \ rcu_read_unlock(); \ return ret; \ } \ -static struct bin_attribute dev_attr_vpd_##_page = { \ +static const struct bin_attribute dev_attr_vpd_##_page = { \ .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \ .size = 0, \ - .read = show_vpd_##_page, \ + .read_new = show_vpd_##_page, \ }; sdev_vpd_pg_attr(pg83); @@ -930,7 +930,7 @@ sdev_vpd_pg_attr(pgb7); sdev_vpd_pg_attr(pg0); static ssize_t show_inquiry(struct file *filep, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); @@ -943,13 +943,13 @@ static ssize_t show_inquiry(struct file *filep, struct kobject *kobj, sdev->inquiry_len); } -static struct bin_attribute dev_attr_inquiry = { +static const struct bin_attribute dev_attr_inquiry = { .attr = { .name = "inquiry", .mode = S_IRUGO, }, .size = 0, - .read = show_inquiry, + .read_new = show_inquiry, }; static ssize_t @@ -1274,7 +1274,7 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj, } static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj, - struct bin_attribute *attr, int i) + const struct bin_attribute *attr, int i) { struct device *dev = kobj_to_dev(kobj); struct scsi_device *sdev = to_scsi_device(dev); @@ -1348,7 +1348,7 @@ static struct attribute *scsi_sdev_attrs[] = { NULL }; -static struct bin_attribute *scsi_sdev_bin_attrs[] = { +static const struct bin_attribute *const scsi_sdev_bin_attrs[] = { &dev_attr_vpd_pg0, &dev_attr_vpd_pg83, &dev_attr_vpd_pg80, @@ -1362,7 +1362,7 @@ static struct bin_attribute *scsi_sdev_bin_attrs[] = { }; static struct attribute_group scsi_sdev_attr_group = { .attrs = scsi_sdev_attrs, - .bin_attrs = scsi_sdev_bin_attrs, + .bin_attrs_new = scsi_sdev_bin_attrs, .is_visible = scsi_sdev_attr_is_visible, .is_bin_visible = scsi_sdev_bin_attr_is_visible, }; @@ -1513,8 +1513,8 @@ void __scsi_remove_device(struct scsi_device *sdev) kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags); cancel_work_sync(&sdev->requeue_work); - if (sdev->host->hostt->slave_destroy) - sdev->host->hostt->slave_destroy(sdev); + if (sdev->host->hostt->sdev_destroy) + sdev->host->hostt->sdev_destroy(sdev); transport_destroy_device(dev); /* @@ -1609,13 +1609,14 @@ restart: } EXPORT_SYMBOL(scsi_remove_target); -int scsi_register_driver(struct device_driver *drv) +int __scsi_register_driver(struct device_driver *drv, struct module *owner) { drv->bus = &scsi_bus_type; + drv->owner = owner; return driver_register(drv); } -EXPORT_SYMBOL(scsi_register_driver); +EXPORT_SYMBOL(__scsi_register_driver); int scsi_register_interface(struct class_interface *intf) { diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 41a950075913..b3baae91e7a2 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c @@ -5,7 +5,7 @@ */ #include <linux/kernel.h> #include <linux/trace_seq.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <trace/events/scsi.h> #define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) @@ -326,6 +326,26 @@ out: } static const char * +scsi_trace_atomic_write16_out(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p); + unsigned int boundary_size; + unsigned int nr_blocks; + sector_t lba; + + lba = get_unaligned_be64(&cdb[2]); + boundary_size = get_unaligned_be16(&cdb[10]); + nr_blocks = get_unaligned_be16(&cdb[12]); + + trace_seq_printf(p, "lba=%llu txlen=%u boundary_size=%u", + lba, nr_blocks, boundary_size); + + trace_seq_putc(p, 0); + + return ret; +} + +static const char * scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) { switch (SERVICE_ACTION32(cdb)) { @@ -385,6 +405,8 @@ scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len) return scsi_trace_zbc_in(p, cdb, len); case ZBC_OUT: return scsi_trace_zbc_out(p, cdb, len); + case WRITE_ATOMIC_16: + return scsi_trace_atomic_write16_out(p, cdb, len); default: return scsi_trace_misc(p, cdb, len); } diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index b04075f19445..6b165a3ec6de 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -441,18 +441,13 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, fc_host->next_vport_number = 0; fc_host->npiv_vports_inuse = 0; - snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name), - "fc_wq_%d", shost->host_no); - fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name); + fc_host->work_q = alloc_workqueue("fc_wq_%d", 0, 0, shost->host_no); if (!fc_host->work_q) return -ENOMEM; fc_host->dev_loss_tmo = fc_dev_loss_tmo; - snprintf(fc_host->devloss_work_q_name, - sizeof(fc_host->devloss_work_q_name), - "fc_dl_%d", shost->host_no); - fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0, - fc_host->devloss_work_q_name); + fc_host->devloss_work_q = alloc_workqueue("fc_dl_%d", 0, 0, + shost->host_no); if (!fc_host->devloss_work_q) { destroy_workqueue(fc_host->work_q); fc_host->work_q = NULL; @@ -1255,7 +1250,7 @@ static ssize_t fc_rport_set_marginal_state(struct device *dev, */ if (rport->port_state == FC_PORTSTATE_ONLINE) rport->port_state = port_state; - else + else if (port_state != rport->port_state) return -EINVAL; } else if (port_state == FC_PORTSTATE_ONLINE) { /* @@ -1265,7 +1260,7 @@ static ssize_t fc_rport_set_marginal_state(struct device *dev, */ if (rport->port_state == FC_PORTSTATE_MARGINAL) rport->port_state = port_state; - else + else if (port_state != rport->port_state) return -EINVAL; } else return -EINVAL; @@ -3514,7 +3509,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) * state as the LLDD would not have had an rport * reference to pass us. * - * Take no action on the del_timer failure as the state + * Take no action on the timer_delete() failure as the state * machine state change will validate the * transaction. */ @@ -4276,6 +4271,7 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) { struct device *dev = &shost->shost_gendev; struct fc_internal *i = to_fc_internal(shost->transportt); + struct queue_limits lim; struct request_queue *q; char bsg_name[20]; @@ -4286,16 +4282,16 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) snprintf(bsg_name, sizeof(bsg_name), "fc_host%d", shost->host_no); - - q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout, - i->f->dd_bsg_size); + scsi_init_limits(shost, &lim); + lim.max_segments = min_not_zero(lim.max_segments, i->f->max_bsg_segments); + q = bsg_setup_queue(dev, bsg_name, &lim, fc_bsg_dispatch, + fc_bsg_job_timeout, i->f->dd_bsg_size); if (IS_ERR(q)) { dev_err(dev, "fc_host%d: bsg interface failed to initialize - setup queue\n", shost->host_no); return PTR_ERR(q); } - __scsi_init_queue(shost, q); blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); fc_host->rqst_q = q; return 0; @@ -4311,6 +4307,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) { struct device *dev = &rport->dev; struct fc_internal *i = to_fc_internal(shost->transportt); + struct queue_limits lim; struct request_queue *q; rport->rqst_q = NULL; @@ -4318,13 +4315,14 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) if (!i->f->bsg_request) return -ENOTSUPP; - q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep, + scsi_init_limits(shost, &lim); + lim.max_segments = min_not_zero(lim.max_segments, i->f->max_bsg_segments); + q = bsg_setup_queue(dev, dev_name(dev), &lim, fc_bsg_dispatch_prep, fc_bsg_job_timeout, i->f->dd_bsg_size); if (IS_ERR(q)) { dev_err(dev, "failed to setup bsg queue\n"); return PTR_ERR(q); } - __scsi_init_queue(shost, q); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); rport->rqst_q = q; return 0; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index af3ac6346796..0b8c91bf793f 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1204,7 +1204,7 @@ static const struct device_type iscsi_flashnode_conn_dev_type = { static const struct bus_type iscsi_flashnode_bus; int iscsi_flashnode_bus_match(struct device *dev, - struct device_driver *drv) + const struct device_driver *drv) { if (dev->bus == &iscsi_flashnode_bus) return 1; @@ -1324,7 +1324,7 @@ EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn); * 1 on success * 0 on failure */ -static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data) +static int iscsi_is_flashnode_conn_dev(struct device *dev, const void *data) { return dev->bus == &iscsi_flashnode_bus; } @@ -1335,7 +1335,7 @@ static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn) return 0; } -static int flashnode_match_index(struct device *dev, void *data) +static int flashnode_match_index(struct device *dev, const void *data) { struct iscsi_bus_flash_session *fnode_sess = NULL; int ret = 0; @@ -1344,7 +1344,7 @@ static int flashnode_match_index(struct device *dev, void *data) goto exit_match_index; fnode_sess = iscsi_dev_to_flash_session(dev); - ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0; + ret = (fnode_sess->target_id == *((const int *)data)) ? 1 : 0; exit_match_index: return ret; @@ -1389,8 +1389,8 @@ iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx) * %NULL on failure */ struct device * -iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, - int (*fn)(struct device *dev, void *data)) +iscsi_find_flashnode_sess(struct Scsi_Host *shost, const void *data, + device_match_t fn) { return device_find_child(&shost->shost_gendev, data, fn); } @@ -1535,6 +1535,7 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost) { struct device *dev = &shost->shost_gendev; struct iscsi_internal *i = to_iscsi_internal(shost->transportt); + struct queue_limits lim; struct request_queue *q; char bsg_name[20]; @@ -1542,13 +1543,14 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost) return -ENOTSUPP; snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no); - q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, NULL, 0); + scsi_init_limits(shost, &lim); + q = bsg_setup_queue(dev, bsg_name, &lim, iscsi_bsg_host_dispatch, NULL, + 0); if (IS_ERR(q)) { shost_printk(KERN_ERR, shost, "bsg interface failed to " "initialize - no request queue\n"); return PTR_ERR(q); } - __scsi_init_queue(shost, q); ihost->bsg_q = q; return 0; @@ -1603,7 +1605,6 @@ static DEFINE_MUTEX(rx_queue_mutex); static LIST_HEAD(sesslist); static DEFINE_SPINLOCK(sesslock); static LIST_HEAD(connlist); -static LIST_HEAD(connlist_err); static DEFINE_SPINLOCK(connlock); static uint32_t iscsi_conn_get_sid(struct iscsi_cls_conn *conn) @@ -2121,33 +2122,6 @@ destroy_wq: } EXPORT_SYMBOL_GPL(iscsi_add_session); -/** - * iscsi_create_session - create iscsi class session - * @shost: scsi host - * @transport: iscsi transport - * @dd_size: private driver data size - * @target_id: which target - * - * This can be called from a LLD or iscsi_transport. - */ -struct iscsi_cls_session * -iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport, - int dd_size, unsigned int target_id) -{ - struct iscsi_cls_session *session; - - session = iscsi_alloc_session(shost, transport, dd_size); - if (!session) - return NULL; - - if (iscsi_add_session(session, target_id)) { - iscsi_free_session(session); - return NULL; - } - return session; -} -EXPORT_SYMBOL_GPL(iscsi_create_session); - static void iscsi_conn_release(struct device *dev) { struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev); @@ -3208,11 +3182,14 @@ iscsi_set_host_param(struct iscsi_transport *transport, } /* see similar check in iscsi_if_set_param() */ - if (strlen(data) > ev->u.set_host_param.len) - return -EINVAL; + if (strlen(data) > ev->u.set_host_param.len) { + err = -EINVAL; + goto out; + } err = transport->set_host_param(shost, ev->u.set_host_param.param, data, ev->u.set_host_param.len); +out: scsi_host_put(shost); return err; } @@ -4103,7 +4080,7 @@ iscsi_if_rx(struct sk_buff *skb) } do { /* - * special case for GET_STATS: + * special case for GET_STATS, GET_CHAP and GET_HOST_STATS: * on success - sending reply and stats from * inside of if_recv_msg(), * on error - fall through. @@ -4112,6 +4089,8 @@ iscsi_if_rx(struct sk_buff *skb) break; if (ev->type == ISCSI_UEVENT_GET_CHAP && !err) break; + if (ev->type == ISCSI_UEVENT_GET_HOST_STATS && !err) + break; err = iscsi_if_send_reply(portid, nlh->nlmsg_type, ev, sizeof(*ev)); if (err == -EAGAIN && --retries < 0) { diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index d704c484a251..351b028ef893 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -197,7 +197,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) } if (rphy) { - q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev), + q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev), NULL, sas_smp_dispatch, NULL, 0); if (IS_ERR(q)) return PTR_ERR(q); @@ -206,7 +206,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) char name[20]; snprintf(name, sizeof(name), "sas_host%d", shost->host_no); - q = bsg_setup_queue(&shost->shost_gendev, name, + q = bsg_setup_queue(&shost->shost_gendev, name, NULL, sas_smp_dispatch, NULL, 0); if (IS_ERR(q)) return PTR_ERR(q); @@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev) } EXPORT_SYMBOL_GPL(sas_is_tlr_enabled); +/** + * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support + * @sdev: SCSI device + * + * Check if an ATA device supports NCQ priority using VPD page 89h (ATA + * Information). Since this VPD page is implemented only for ATA devices, + * this function always returns false for SCSI devices. + */ +bool sas_ata_ncq_prio_supported(struct scsi_device *sdev) +{ + struct scsi_vpd *vpd; + bool ncq_prio_supported = false; + + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + if (vpd && vpd->len >= 214) + ncq_prio_supported = (vpd->data[213] >> 4) & 1; + rcu_read_unlock(); + + return ncq_prio_supported; +} +EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported); + /* * SAS Phy attributes */ @@ -865,7 +888,8 @@ static void sas_port_delete_link(struct sas_port *port, sysfs_remove_link(&phy->dev.kobj, "port"); } -/** sas_port_alloc - allocate and initialize a SAS port structure +/** + * sas_port_alloc - allocate and initialize a SAS port structure * * @parent: parent device * @port_id: port number @@ -874,7 +898,7 @@ static void sas_port_delete_link(struct sas_port *port, * below the device specified by @parent which must be either a Scsi_Host * or a sas_expander_device. * - * Returns %NULL on error + * Returns: %NULL on error */ struct sas_port *sas_port_alloc(struct device *parent, int port_id) { @@ -909,7 +933,8 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id) } EXPORT_SYMBOL(sas_port_alloc); -/** sas_port_alloc_num - allocate and initialize a SAS port structure +/** + * sas_port_alloc_num - allocate and initialize a SAS port structure * * @parent: parent device * @@ -919,7 +944,7 @@ EXPORT_SYMBOL(sas_port_alloc); * the device tree below the device specified by @parent which must be * either a Scsi_Host or a sas_expander_device. * - * Returns %NULL on error + * Returns: %NULL on error */ struct sas_port *sas_port_alloc_num(struct device *parent) { diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 64852e6df3e3..fe47850a8258 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -985,7 +985,8 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) } -/** spi_dv_device - Do Domain Validation on the device +/** + * spi_dv_device - Do Domain Validation on the device * @sdev: scsi device to validate * * Performs the domain validation on the given device in the diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 64f6b22e8cc0..aeb58a9e6b7f 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -388,7 +388,7 @@ static void srp_reconnect_work(struct work_struct *work) "reconnect attempt %d failed (%d)\n", ++rport->failed_reconnects, res); delay = rport->reconnect_delay * - min(100, max(1, rport->failed_reconnects - 10)); + clamp(rport->failed_reconnects - 10, 1, 100); if (delay > 0) queue_delayed_work(system_long_wq, &rport->reconnect_work, delay * HZ); diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c index e2c7d8ef205f..19e6c3852d50 100644 --- a/drivers/scsi/scsicam.c +++ b/drivers/scsi/scsicam.c @@ -18,7 +18,7 @@ #include <linux/blkdev.h> #include <linux/pagemap.h> #include <linux/msdos_partition.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsicam.h> @@ -32,7 +32,7 @@ */ unsigned char *scsi_bios_ptable(struct block_device *dev) { - struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping; + struct address_space *mapping = bdev_whole(dev)->bd_mapping; unsigned char *res = NULL; struct folio *folio; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 58fdf679341d..3f6e87705b62 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -33,11 +33,11 @@ * than the level indicated above to trigger output. */ +#include <linux/bio-integrity.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> -#include <linux/bio.h> #include <linux/hdreg.h> #include <linux/errno.h> #include <linux/idr.h> @@ -57,12 +57,13 @@ #include <linux/pr.h> #include <linux/t10-pi.h> #include <linux/uaccess.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> +#include <scsi/scsi_devinfo.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> @@ -101,12 +102,13 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); #define SD_MINORS 16 -static void sd_config_discard(struct scsi_disk *, unsigned int); -static void sd_config_write_same(struct scsi_disk *); +static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim, + unsigned int mode); +static void sd_config_write_same(struct scsi_disk *sdkp, + struct queue_limits *lim); static int sd_revalidate_disk(struct gendisk *); static void sd_unlock_native_capacity(struct gendisk *disk); static void sd_shutdown(struct device *); -static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); static void scsi_disk_release(struct device *cdev); static DEFINE_IDA(sd_index_ida); @@ -119,17 +121,18 @@ static const char *sd_cache_types[] = { "write back, no read (daft)" }; -static void sd_set_flush_flag(struct scsi_disk *sdkp) +static void sd_set_flush_flag(struct scsi_disk *sdkp, + struct queue_limits *lim) { - bool wc = false, fua = false; - if (sdkp->WCE) { - wc = true; + lim->features |= BLK_FEAT_WRITE_CACHE; if (sdkp->DPOFUA) - fua = true; + lim->features |= BLK_FEAT_FUA; + else + lim->features &= ~BLK_FEAT_FUA; + } else { + lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); } - - blk_queue_write_cache(sdkp->disk->queue, wc, fua); } static ssize_t @@ -167,9 +170,17 @@ cache_type_store(struct device *dev, struct device_attribute *attr, wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; if (sdkp->cache_override) { + struct queue_limits lim; + sdkp->WCE = wce; sdkp->RCD = rcd; - sd_set_flush_flag(sdkp); + + lim = queue_limits_start_update(sdkp->disk->queue); + sd_set_flush_flag(sdkp, &lim); + ret = queue_limits_commit_update_frozen(sdkp->disk->queue, + &lim); + if (ret) + return ret; return count; } @@ -456,16 +467,12 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; - int mode; + struct queue_limits lim; + int mode, err; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (sd_is_zoned(sdkp)) { - sd_config_discard(sdkp, SD_LBP_DISABLE); - return count; - } - if (sdp->type != TYPE_DISK) return -EINVAL; @@ -473,8 +480,11 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr, if (mode < 0) return -EINVAL; - sd_config_discard(sdkp, mode); - + lim = queue_limits_start_update(sdkp->disk->queue); + sd_config_discard(sdkp, &lim, mode); + err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim); + if (err) + return err; return count; } static DEVICE_ATTR_RW(provisioning_mode); @@ -557,6 +567,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; + struct queue_limits lim; unsigned long max; int err; @@ -578,8 +589,11 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, sdkp->max_ws_blocks = max; } - sd_config_write_same(sdkp); - + lim = queue_limits_start_update(sdkp->disk->queue); + sd_config_write_same(sdkp, &lim); + err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim); + if (err) + return err; return count; } static DEVICE_ATTR_RW(max_write_same_blocks); @@ -795,14 +809,14 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM)) scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM; - if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) + if (bio_integrity_flagged(bio, BIP_CHECK_GUARD)) scmd->prot_flags |= SCSI_PROT_GUARD_CHECK; } if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */ scmd->prot_flags |= SCSI_PROT_REF_INCREMENT; - if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) + if (bio_integrity_flagged(bio, BIP_CHECK_REFTAG)) scmd->prot_flags |= SCSI_PROT_REF_CHECK; } @@ -822,25 +836,28 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, return protect; } -static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) +static void sd_disable_discard(struct scsi_disk *sdkp) +{ + sdkp->provisioning_mode = SD_LBP_DISABLE; + blk_queue_disable_discard(sdkp->disk->queue); +} + +static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim, + unsigned int mode) { - struct request_queue *q = sdkp->disk->queue; unsigned int logical_block_size = sdkp->device->sector_size; unsigned int max_blocks = 0; - q->limits.discard_alignment = - sdkp->unmap_alignment * logical_block_size; - q->limits.discard_granularity = - max(sdkp->physical_block_size, - sdkp->unmap_granularity * logical_block_size); + lim->discard_alignment = sdkp->unmap_alignment * logical_block_size; + lim->discard_granularity = max(sdkp->physical_block_size, + sdkp->unmap_granularity * logical_block_size); sdkp->provisioning_mode = mode; switch (mode) { case SD_LBP_FULL: case SD_LBP_DISABLE: - blk_queue_max_discard_sectors(q, 0); - return; + break; case SD_LBP_UNMAP: max_blocks = min_not_zero(sdkp->max_unmap_blocks, @@ -871,7 +888,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) break; } - blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); + lim->max_hw_discard_sectors = max_blocks * + (logical_block_size >> SECTOR_SHIFT); } static void *sd_set_special_bvec(struct request *rq, unsigned int data_len) @@ -917,6 +935,65 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) return scsi_alloc_sgtables(cmd); } +static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim) +{ + unsigned int logical_block_size = sdkp->device->sector_size, + physical_block_size_sectors, max_atomic, unit_min, unit_max; + + if ((!sdkp->max_atomic && !sdkp->max_atomic_with_boundary) || + sdkp->protection_type == T10_PI_TYPE2_PROTECTION) + return; + + physical_block_size_sectors = sdkp->physical_block_size / + sdkp->device->sector_size; + + unit_min = rounddown_pow_of_two(sdkp->atomic_granularity ? + sdkp->atomic_granularity : + physical_block_size_sectors); + + /* + * Only use atomic boundary when we have the odd scenario of + * sdkp->max_atomic == 0, which the spec does permit. + */ + if (sdkp->max_atomic) { + max_atomic = sdkp->max_atomic; + unit_max = rounddown_pow_of_two(sdkp->max_atomic); + sdkp->use_atomic_write_boundary = 0; + } else { + max_atomic = sdkp->max_atomic_with_boundary; + unit_max = rounddown_pow_of_two(sdkp->max_atomic_boundary); + sdkp->use_atomic_write_boundary = 1; + } + + /* + * Ensure compliance with granularity and alignment. For now, keep it + * simple and just don't support atomic writes for values mismatched + * with max_{boundary}atomic, physical block size, and + * atomic_granularity itself. + * + * We're really being distrustful by checking unit_max also... + */ + if (sdkp->atomic_granularity > 1) { + if (unit_min > 1 && unit_min % sdkp->atomic_granularity) + return; + if (unit_max > 1 && unit_max % sdkp->atomic_granularity) + return; + } + + if (sdkp->atomic_alignment > 1) { + if (unit_min > 1 && unit_min % sdkp->atomic_alignment) + return; + if (unit_max > 1 && unit_max % sdkp->atomic_alignment) + return; + } + + lim->atomic_write_hw_max = max_atomic * logical_block_size; + lim->atomic_write_hw_boundary = 0; + lim->atomic_write_hw_unit_min = unit_min * logical_block_size; + lim->atomic_write_hw_unit_max = unit_max * logical_block_size; + lim->features |= BLK_FEAT_ATOMIC_WRITES; +} + static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap) { @@ -999,9 +1076,16 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) return sd_setup_write_same10_cmnd(cmd, false); } -static void sd_config_write_same(struct scsi_disk *sdkp) +static void sd_disable_write_same(struct scsi_disk *sdkp) +{ + sdkp->device->no_write_same = 1; + sdkp->max_ws_blocks = 0; + blk_queue_disable_write_zeroes(sdkp->disk->queue); +} + +static void sd_config_write_same(struct scsi_disk *sdkp, + struct queue_limits *lim) { - struct request_queue *q = sdkp->disk->queue; unsigned int logical_block_size = sdkp->device->sector_size; if (sdkp->device->no_write_same) { @@ -1055,8 +1139,8 @@ static void sd_config_write_same(struct scsi_disk *sdkp) } out: - blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks * - (logical_block_size >> 9)); + lim->max_write_zeroes_sectors = + sdkp->max_ws_blocks * (logical_block_size >> SECTOR_SHIFT); } static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) @@ -1102,8 +1186,8 @@ static u8 sd_group_number(struct scsi_cmnd *cmd) if (!sdkp->rscs) return 0; - return min3((u32)rq->write_hint, (u32)sdkp->permanent_stream_count, - 0x3fu); + return min3((u32)rq->bio->bi_write_hint, + (u32)sdkp->permanent_stream_count, 0x3fu); } static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, @@ -1208,6 +1292,26 @@ static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd) return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1; } +static blk_status_t sd_setup_atomic_cmnd(struct scsi_cmnd *cmd, + sector_t lba, unsigned int nr_blocks, + bool boundary, unsigned char flags) +{ + cmd->cmd_len = 16; + cmd->cmnd[0] = WRITE_ATOMIC_16; + cmd->cmnd[1] = flags; + put_unaligned_be64(lba, &cmd->cmnd[2]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[12]); + if (boundary) + put_unaligned_be16(nr_blocks, &cmd->cmnd[10]); + else + put_unaligned_be16(0, &cmd->cmnd[10]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[12]); + cmd->cmnd[14] = 0; + cmd->cmnd[15] = 0; + + return BLK_STS_OK; +} + static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) { struct request *rq = scsi_cmd_to_rq(cmd); @@ -1260,12 +1364,6 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) } } - if (req_op(rq) == REQ_OP_ZONE_APPEND) { - ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks); - if (ret) - goto fail; - } - fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; dix = scsi_prot_sg_count(cmd); dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); @@ -1279,11 +1377,15 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, protect | fua, dld); + } else if (rq->cmd_flags & REQ_ATOMIC) { + ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks, + sdkp->use_atomic_write_boundary, + protect | fua); } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, protect | fua, dld); } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) || - sdp->use_10_for_rw || protect || rq->write_hint) { + sdp->use_10_for_rw || protect || rq->bio->bi_write_hint) { ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks, protect | fua); } else { @@ -1348,7 +1450,6 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd) return sd_setup_flush_cmnd(cmd); case REQ_OP_READ: case REQ_OP_WRITE: - case REQ_OP_ZONE_APPEND: return sd_setup_read_write_cmnd(cmd); case REQ_OP_ZONE_RESET: return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, @@ -1717,13 +1818,15 @@ static int sd_sync_cache(struct scsi_disk *sdkp) (sshdr.asc == 0x74 && sshdr.ascq == 0x71)) /* drive is password locked */ /* this is no error here */ return 0; + /* - * This drive doesn't support sync and there's not much - * we can do because this is called during shutdown - * or suspend so just return success so those operations - * can proceed. + * If a format is in progress or if the drive does not + * support sync, there is not much we can do because + * this is called during shutdown or suspend so just + * return success so those operations can proceed. */ - if (sshdr.sense_key == ILLEGAL_REQUEST) + if ((sshdr.asc == 0x04 && sshdr.ascq == 0x04) || + sshdr.sense_key == ILLEGAL_REQUEST) return 0; } @@ -2253,15 +2356,14 @@ static int sd_done(struct scsi_cmnd *SCpnt) case 0x24: /* INVALID FIELD IN CDB */ switch (SCpnt->cmnd[0]) { case UNMAP: - sd_config_discard(sdkp, SD_LBP_DISABLE); + sd_disable_discard(sdkp); break; case WRITE_SAME_16: case WRITE_SAME: if (SCpnt->cmnd[1] & 8) { /* UNMAP */ - sd_config_discard(sdkp, SD_LBP_DISABLE); + sd_disable_discard(sdkp); } else { - sdkp->device->no_write_same = 1; - sd_config_write_same(sdkp); + sd_disable_write_same(sdkp); req->rq_flags |= RQF_QUIET; } break; @@ -2273,7 +2375,7 @@ static int sd_done(struct scsi_cmnd *SCpnt) } out: - if (sd_is_zoned(sdkp)) + if (sdkp->device->type == TYPE_ZBC) good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr); SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, @@ -2467,11 +2569,13 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer return 0; } -static void sd_config_protection(struct scsi_disk *sdkp) +static void sd_config_protection(struct scsi_disk *sdkp, + struct queue_limits *lim) { struct scsi_device *sdp = sdkp->device; - sd_dif_config_host(sdkp); + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) + sd_dif_config_host(sdkp, lim); if (!sdkp->protection_type) return; @@ -2520,7 +2624,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, #define READ_CAPACITY_RETRIES_ON_RESET 10 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, - unsigned char *buffer) + struct queue_limits *lim, unsigned char *buffer) { unsigned char cmd[16]; struct scsi_sense_hdr sshdr; @@ -2594,7 +2698,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, /* Lowest aligned logical block */ alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; - blk_queue_alignment_offset(sdp->request_queue, alignment); + lim->alignment_offset = alignment; if (alignment && sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "physical block alignment offset: %u\n", alignment); @@ -2604,8 +2708,6 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, if (buffer[14] & 0x40) /* LBPRZ */ sdkp->lbprz = 1; - - sd_config_discard(sdkp, SD_LBP_WS16); } sdkp->capacity = lba + 1; @@ -2708,13 +2810,14 @@ static int sd_try_rc16_first(struct scsi_device *sdp) * read disk capacity */ static void -sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) +sd_read_capacity(struct scsi_disk *sdkp, struct queue_limits *lim, + unsigned char *buffer) { int sector_size; struct scsi_device *sdp = sdkp->device; if (sd_try_rc16_first(sdp)) { - sector_size = read_capacity_16(sdkp, sdp, buffer); + sector_size = read_capacity_16(sdkp, sdp, lim, buffer); if (sector_size == -EOVERFLOW) goto got_data; if (sector_size == -ENODEV) @@ -2734,7 +2837,7 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) int old_sector_size = sector_size; sd_printk(KERN_NOTICE, sdkp, "Very big device. " "Trying to use READ CAPACITY(16).\n"); - sector_size = read_capacity_16(sdkp, sdp, buffer); + sector_size = read_capacity_16(sdkp, sdp, lim, buffer); if (sector_size < 0) { sd_printk(KERN_NOTICE, sdkp, "Using 0xffffffff as device size\n"); @@ -2793,9 +2896,8 @@ got_data: */ sector_size = 512; } - blk_queue_logical_block_size(sdp->request_queue, sector_size); - blk_queue_physical_block_size(sdp->request_queue, - sdkp->physical_block_size); + lim->logical_block_size = sector_size; + lim->physical_block_size = sdkp->physical_block_size; sdkp->device->sector_size = sector_size; if (sdkp->capacity > 0xffffffff) @@ -3113,17 +3215,21 @@ static bool sd_is_perm_stream(struct scsi_disk *sdkp, unsigned int stream_id) return false; if (get_unaligned_be32(&buf.h.len) < sizeof(struct scsi_stream_status)) return false; - return buf.h.stream_status[0].perm; + return buf.s.perm; } static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer) { struct scsi_device *sdp = sdkp->device; const struct scsi_io_group_descriptor *desc, *start, *end; + u16 permanent_stream_count_old; struct scsi_sense_hdr sshdr; struct scsi_mode_data data; int res; + if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS) + return; + res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a, /*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT, sdkp->max_retries, &data, &sshdr); @@ -3140,12 +3246,13 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer) for (desc = start; desc < end; desc++) if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start)) break; + permanent_stream_count_old = sdkp->permanent_stream_count; sdkp->permanent_stream_count = desc - start; if (sdkp->rscs && sdkp->permanent_stream_count < 2) sd_printk(KERN_INFO, sdkp, "Unexpected: RSCS has been set and the permanent stream count is %u\n", sdkp->permanent_stream_count); - else if (sdkp->permanent_stream_count) + else if (sdkp->permanent_stream_count != permanent_stream_count_old) sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n", sdkp->permanent_stream_count); } @@ -3196,11 +3303,33 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) return; } -/** - * sd_read_block_limits - Query disk device for preferred I/O sizes. - * @sdkp: disk to query +static unsigned int sd_discard_mode(struct scsi_disk *sdkp) +{ + if (!sdkp->lbpme) + return SD_LBP_FULL; + + if (!sdkp->lbpvpd) { + /* LBP VPD page not provided */ + if (sdkp->max_unmap_blocks) + return SD_LBP_UNMAP; + return SD_LBP_WS16; + } + + /* LBP VPD page tells us what to use */ + if (sdkp->lbpu && sdkp->max_unmap_blocks) + return SD_LBP_UNMAP; + if (sdkp->lbpws) + return SD_LBP_WS16; + if (sdkp->lbpws10) + return SD_LBP_WS10; + return SD_LBP_DISABLE; +} + +/* + * Query disk device for preferred I/O sizes. */ -static void sd_read_block_limits(struct scsi_disk *sdkp) +static void sd_read_block_limits(struct scsi_disk *sdkp, + struct queue_limits *lim) { struct scsi_vpd *vpd; @@ -3220,7 +3349,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]); if (!sdkp->lbpme) - goto out; + goto config_atomic; lba_count = get_unaligned_be32(&vpd->data[20]); desc_count = get_unaligned_be32(&vpd->data[24]); @@ -3234,23 +3363,14 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) sdkp->unmap_alignment = get_unaligned_be32(&vpd->data[32]) & ~(1 << 31); - if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ - - if (sdkp->max_unmap_blocks) - sd_config_discard(sdkp, SD_LBP_UNMAP); - else - sd_config_discard(sdkp, SD_LBP_WS16); - - } else { /* LBP VPD page tells us what to use */ - if (sdkp->lbpu && sdkp->max_unmap_blocks) - sd_config_discard(sdkp, SD_LBP_UNMAP); - else if (sdkp->lbpws) - sd_config_discard(sdkp, SD_LBP_WS16); - else if (sdkp->lbpws10) - sd_config_discard(sdkp, SD_LBP_WS10); - else - sd_config_discard(sdkp, SD_LBP_DISABLE); - } +config_atomic: + sdkp->max_atomic = get_unaligned_be32(&vpd->data[44]); + sdkp->atomic_alignment = get_unaligned_be32(&vpd->data[48]); + sdkp->atomic_granularity = get_unaligned_be32(&vpd->data[52]); + sdkp->max_atomic_with_boundary = get_unaligned_be32(&vpd->data[56]); + sdkp->max_atomic_boundary = get_unaligned_be32(&vpd->data[60]); + + sd_config_atomic(sdkp, lim); } out: @@ -3269,20 +3389,17 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp) rcu_read_unlock(); } -/** - * sd_read_block_characteristics - Query block dev. characteristics - * @sdkp: disk to query - */ -static void sd_read_block_characteristics(struct scsi_disk *sdkp) +/* Query block device characteristics */ +static void sd_read_block_characteristics(struct scsi_disk *sdkp, + struct queue_limits *lim) { - struct request_queue *q = sdkp->disk->queue; struct scsi_vpd *vpd; u16 rot; rcu_read_lock(); vpd = rcu_dereference(sdkp->device->vpd_pgb1); - if (!vpd || vpd->len < 8) { + if (!vpd || vpd->len <= 8) { rcu_read_unlock(); return; } @@ -3291,37 +3408,13 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) sdkp->zoned = (vpd->data[8] >> 4) & 3; rcu_read_unlock(); - if (rot == 1) { - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); - } - - -#ifdef CONFIG_BLK_DEV_ZONED /* sd_probe rejects ZBD devices early otherwise */ - if (sdkp->device->type == TYPE_ZBC) { - /* - * Host-managed. - */ - disk_set_zoned(sdkp->disk); - - /* - * Per ZBC and ZAC specifications, writes in sequential write - * required zones of host-managed devices must be aligned to - * the device physical block size. - */ - blk_queue_zone_write_granularity(q, sdkp->physical_block_size); - } else { - /* - * Host-aware devices are treated as conventional. - */ - WARN_ON_ONCE(blk_queue_is_zoned(q)); - } -#endif /* CONFIG_BLK_DEV_ZONED */ + if (rot == 1) + lim->features &= ~(BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); if (!sdkp->first_scan) return; - if (blk_queue_is_zoned(q)) + if (sdkp->device->type == TYPE_ZBC) sd_printk(KERN_NOTICE, sdkp, "Host-managed zoned block device\n"); else if (sdkp->zoned == 1) sd_printk(KERN_NOTICE, sdkp, "Host-aware SMR disk used as regular disk\n"); @@ -3570,16 +3663,23 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, static void sd_read_block_zero(struct scsi_disk *sdkp) { - unsigned int buf_len = sdkp->device->sector_size; - char *buffer, cmd[10] = { }; + struct scsi_device *sdev = sdkp->device; + unsigned int buf_len = sdev->sector_size; + u8 *buffer, cmd[16] = { }; buffer = kmalloc(buf_len, GFP_KERNEL); if (!buffer) return; - cmd[0] = READ_10; - put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */ - put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */ + if (sdev->use_16_for_rw) { + cmd[0] = READ_16; + put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */ + put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */ + } else { + cmd[0] = READ_10; + put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */ + put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */ + } scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len, SD_TIMEOUT, sdkp->max_retries, NULL); @@ -3595,10 +3695,11 @@ static int sd_revalidate_disk(struct gendisk *disk) { struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; - struct request_queue *q = sdkp->disk->queue; sector_t old_capacity = sdkp->capacity; + struct queue_limits lim; unsigned char *buffer; - unsigned int dev_max, rw_max; + unsigned int dev_max; + int err; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_revalidate_disk\n")); @@ -3619,12 +3720,14 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_spinup_disk(sdkp); + lim = queue_limits_start_update(sdkp->disk->queue); + /* * Without media there is no reason to ask; moreover, some devices * react badly if we do. */ if (sdkp->media_present) { - sd_read_capacity(sdkp, buffer); + sd_read_capacity(sdkp, &lim, buffer); /* * Some USB/UAS devices return generic values for mode pages * until the media has been accessed. Trigger a READ operation @@ -3638,18 +3741,18 @@ static int sd_revalidate_disk(struct gendisk *disk) * cause this to be updated correctly and any device which * doesn't support it should be treated as rotational. */ - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); - blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); + lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); if (scsi_device_supports_vpd(sdp)) { sd_read_block_provisioning(sdkp); - sd_read_block_limits(sdkp); + sd_read_block_limits(sdkp, &lim); sd_read_block_limits_ext(sdkp); - sd_read_block_characteristics(sdkp); - sd_zbc_read_zones(sdkp, buffer); - sd_read_cpr(sdkp); + sd_read_block_characteristics(sdkp, &lim); + sd_zbc_read_zones(sdkp, &lim, buffer); } + sd_config_discard(sdkp, &lim, sd_discard_mode(sdkp)); + sd_print_capacity(sdkp, old_capacity); sd_read_write_protect_flag(sdkp, buffer); @@ -3658,62 +3761,56 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_app_tag_own(sdkp, buffer); sd_read_write_same(sdkp, buffer); sd_read_security(sdkp, buffer); - sd_config_protection(sdkp); + sd_config_protection(sdkp, &lim); } /* * We now have all cache related info, determine how we deal * with flush requests. */ - sd_set_flush_flag(sdkp); + sd_set_flush_flag(sdkp, &lim); /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; /* Some devices report a maximum block count for READ/WRITE requests. */ dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); - q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); + lim.max_dev_sectors = logical_to_sectors(sdp, dev_max); if (sd_validate_min_xfer_size(sdkp)) - blk_queue_io_min(sdkp->disk->queue, - logical_to_bytes(sdp, sdkp->min_xfer_blocks)); + lim.io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks); else - blk_queue_io_min(sdkp->disk->queue, 0); - - if (sd_validate_opt_xfer_size(sdkp, dev_max)) { - q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); - rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); - } else { - q->limits.io_opt = 0; - rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), - (sector_t)BLK_DEF_MAX_SECTORS_CAP); - } + lim.io_min = 0; /* * Limit default to SCSI host optimal sector limit if set. There may be * an impact on performance for when the size of a request exceeds this * host limit. */ - rw_max = min_not_zero(rw_max, sdp->host->opt_sectors); - - /* Do not exceed controller limit */ - rw_max = min(rw_max, queue_max_hw_sectors(q)); - - /* - * Only update max_sectors if previously unset or if the current value - * exceeds the capabilities of the hardware. - */ - if (sdkp->first_scan || - q->limits.max_sectors > q->limits.max_dev_sectors || - q->limits.max_sectors > q->limits.max_hw_sectors) - q->limits.max_sectors = rw_max; + lim.io_opt = sdp->host->opt_sectors << SECTOR_SHIFT; + if (sd_validate_opt_xfer_size(sdkp, dev_max)) { + lim.io_opt = min_not_zero(lim.io_opt, + logical_to_bytes(sdp, sdkp->opt_xfer_blocks)); + } sdkp->first_scan = 0; set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity)); - sd_config_write_same(sdkp); + sd_config_write_same(sdkp, &lim); kfree(buffer); + err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim); + if (err) + return err; + + /* + * Query concurrent positioning ranges after + * queue_limits_commit_update() unlocked q->limits_lock to avoid + * deadlock with q->sysfs_dir_lock and q->sysfs_lock. + */ + if (sdkp->media_present && scsi_device_supports_vpd(sdp)) + sd_read_cpr(sdkp); + /* * For a zoned drive, revalidating the zones can be done only once * the gendisk capacity is set. So if this fails, set back the gendisk @@ -3979,7 +4076,6 @@ static void scsi_disk_release(struct device *dev) struct scsi_disk *sdkp = to_scsi_disk(dev); ida_free(&sd_index_ida, sdkp->index); - sd_zbc_free_zone_info(sdkp); put_device(&sdkp->device->sdev_gendev); free_opal_dev(sdkp->opal_dev); @@ -3990,9 +4086,38 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) { unsigned char cmd[6] = { START_STOP }; /* START_VALID */ struct scsi_sense_hdr sshdr; + struct scsi_failure failure_defs[] = { + { + /* Power on, reset, or bus device reset occurred */ + .sense = UNIT_ATTENTION, + .asc = 0x29, + .ascq = 0, + .result = SAM_STAT_CHECK_CONDITION, + }, + { + /* Power on occurred */ + .sense = UNIT_ATTENTION, + .asc = 0x29, + .ascq = 1, + .result = SAM_STAT_CHECK_CONDITION, + }, + { + /* SCSI bus reset */ + .sense = UNIT_ATTENTION, + .asc = 0x29, + .ascq = 2, + .result = SAM_STAT_CHECK_CONDITION, + }, + {} + }; + struct scsi_failures failures = { + .total_allowed = 3, + .failure_definitions = failure_defs, + }; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, .req_flags = BLK_MQ_REQ_PM, + .failures = &failures, }; struct scsi_device *sdp = sdkp->device; int res; @@ -4199,7 +4324,6 @@ static const struct dev_pm_ops sd_pm_ops = { static struct scsi_driver sd_template = { .gendrv = { .name = "sd", - .owner = THIS_MODULE, .probe = sd_probe, .probe_type = PROBE_PREFER_ASYNCHRONOUS, .remove = sd_remove, diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 5c4285a582b2..36382eca941c 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -104,12 +104,6 @@ struct scsi_disk { * between zone starting LBAs is constant. */ u32 zone_starting_lba_gran; - u32 *zones_wp_offset; - spinlock_t zones_wp_offset_lock; - u32 *rev_wp_offset; - struct mutex rev_mutex; - struct work_struct zone_wp_offset_work; - char *zone_wp_update_buf; #endif atomic_t openers; sector_t capacity; /* size in logical blocks */ @@ -121,6 +115,13 @@ struct scsi_disk { u32 max_unmap_blocks; u32 unmap_granularity; u32 unmap_alignment; + + u32 max_atomic; + u32 atomic_alignment; + u32 atomic_granularity; + u32 max_atomic_with_boundary; + u32 max_atomic_boundary; + u32 index; unsigned int physical_block_size; unsigned int max_medium_access_timeouts; @@ -154,6 +155,7 @@ struct scsi_disk { unsigned security : 1; unsigned ignore_medium_access_errors : 1; unsigned rscs : 1; /* reduced stream control support */ + unsigned use_atomic_write_boundary : 1; }; #define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev) @@ -226,27 +228,12 @@ static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sec return sector >> (ilog2(sdev->sector_size) - 9); } -#ifdef CONFIG_BLK_DEV_INTEGRITY - -extern void sd_dif_config_host(struct scsi_disk *); - -#else /* CONFIG_BLK_DEV_INTEGRITY */ - -static inline void sd_dif_config_host(struct scsi_disk *disk) -{ -} - -#endif /* CONFIG_BLK_DEV_INTEGRITY */ - -static inline int sd_is_zoned(struct scsi_disk *sdkp) -{ - return sdkp->zoned == 1 || sdkp->device->type == TYPE_ZBC; -} +void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim); #ifdef CONFIG_BLK_DEV_ZONED -void sd_zbc_free_zone_info(struct scsi_disk *sdkp); -int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]); +int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, + u8 buf[SD_BUF_SIZE]); int sd_zbc_revalidate_zones(struct scsi_disk *sdkp); blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, unsigned char op, bool all); @@ -255,14 +242,10 @@ unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); -blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, - unsigned int nr_blocks); - #else /* CONFIG_BLK_DEV_ZONED */ -static inline void sd_zbc_free_zone_info(struct scsi_disk *sdkp) {} - -static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) +static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, + struct queue_limits *lim, u8 buf[SD_BUF_SIZE]) { return 0; } @@ -285,13 +268,6 @@ static inline unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, return good_bytes; } -static inline blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, - sector_t *lba, - unsigned int nr_blocks) -{ - return BLK_STS_TARGET; -} - #define sd_zbc_report_zones NULL #endif /* CONFIG_BLK_DEV_ZONED */ diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 1df847b5f747..ae6ce6f5d622 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -24,14 +24,15 @@ /* * Configure exchange of protection information between OS and HBA. */ -void sd_dif_config_host(struct scsi_disk *sdkp) +void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim) { struct scsi_device *sdp = sdkp->device; - struct gendisk *disk = sdkp->disk; u8 type = sdkp->protection_type; - struct blk_integrity bi; + struct blk_integrity *bi = &lim->integrity; int dif, dix; + memset(bi, 0, sizeof(*bi)); + dif = scsi_host_dif_capable(sdp->host, type); dix = scsi_host_dix_capable(sdp->host, type); @@ -39,45 +40,33 @@ void sd_dif_config_host(struct scsi_disk *sdkp) dif = 0; dix = 1; } - if (!dix) { - blk_integrity_unregister(disk); + if (!dix) return; - } - - memset(&bi, 0, sizeof(bi)); /* Enable DMA of protection information */ - if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) { - if (type == T10_PI_TYPE3_PROTECTION) - bi.profile = &t10_pi_type3_ip; - else - bi.profile = &t10_pi_type1_ip; + if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) + bi->csum_type = BLK_INTEGRITY_CSUM_IP; + else + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; - bi.flags |= BLK_INTEGRITY_IP_CHECKSUM; - } else - if (type == T10_PI_TYPE3_PROTECTION) - bi.profile = &t10_pi_type3_crc; - else - bi.profile = &t10_pi_type1_crc; + if (type != T10_PI_TYPE3_PROTECTION) + bi->flags |= BLK_INTEGRITY_REF_TAG; - bi.tuple_size = sizeof(struct t10_pi_tuple); + bi->tuple_size = sizeof(struct t10_pi_tuple); if (dif && type) { - bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; if (!sdkp->ATO) - goto out; + return; if (type == T10_PI_TYPE3_PROTECTION) - bi.tag_size = sizeof(u16) + sizeof(u32); + bi->tag_size = sizeof(u16) + sizeof(u32); else - bi.tag_size = sizeof(u16); + bi->tag_size = sizeof(u16); } sd_first_printk(KERN_NOTICE, sdkp, "Enabling DIX %s, application tag size %u bytes\n", - bi.profile->name, bi.tag_size); -out: - blk_integrity_register(disk, &bi); + blk_integrity_profile_name(bi), bi->tag_size); } - diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 26af5ab7d7c1..a8db66428f80 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -13,7 +13,7 @@ #include <linux/sched/mm.h> #include <linux/mutex.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -23,36 +23,6 @@ #define CREATE_TRACE_POINTS #include "sd_trace.h" -/** - * sd_zbc_get_zone_wp_offset - Get zone write pointer offset. - * @zone: Zone for which to return the write pointer offset. - * - * Return: offset of the write pointer from the start of the zone. - */ -static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone) -{ - if (zone->type == ZBC_ZONE_TYPE_CONV) - return 0; - - switch (zone->cond) { - case BLK_ZONE_COND_IMP_OPEN: - case BLK_ZONE_COND_EXP_OPEN: - case BLK_ZONE_COND_CLOSED: - return zone->wp - zone->start; - case BLK_ZONE_COND_FULL: - return zone->len; - case BLK_ZONE_COND_EMPTY: - case BLK_ZONE_COND_OFFLINE: - case BLK_ZONE_COND_READONLY: - default: - /* - * Offline and read-only zones do not have a valid - * write pointer. Use 0 as for an empty zone. - */ - return 0; - } -} - /* Whether or not a SCSI zone descriptor describes a gap zone. */ static bool sd_zbc_is_gap_zone(const u8 buf[64]) { @@ -121,9 +91,6 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, const u8 buf[64], if (ret) return ret; - if (sdkp->rev_wp_offset) - sdkp->rev_wp_offset[idx] = sd_zbc_get_zone_wp_offset(&zone); - return 0; } @@ -202,6 +169,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, unsigned int nr_zones, size_t *buflen) { struct request_queue *q = sdkp->disk->queue; + unsigned int max_segments; size_t bufsize; void *buf; @@ -213,16 +181,18 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, * Furthermore, since the report zone command cannot be split, make * sure that the allocated buffer can always be mapped by limiting the * number of pages allocated to the HBA max segments limit. + * Since max segments can be larger than the max inline bio vectors, + * further limit the allocated buffer to BIO_MAX_INLINE_VECS. */ nr_zones = min(nr_zones, sdkp->zone_info.nr_zones); bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); bufsize = min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT); - bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); + max_segments = min(BIO_MAX_INLINE_VECS, queue_max_segments(q)); + bufsize = min_t(size_t, bufsize, max_segments << PAGE_SHIFT); while (bufsize >= SECTOR_SIZE) { - buf = __vmalloc(bufsize, - GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY); + buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); if (buf) { *buflen = bufsize; return buf; @@ -265,7 +235,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, int zone_idx = 0; int ret; - if (!sd_is_zoned(sdkp)) + if (sdkp->device->type != TYPE_ZBC) /* Not a zoned device */ return -EOPNOTSUPP; @@ -333,7 +303,7 @@ static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd) struct scsi_disk *sdkp = scsi_disk(rq->q->disk); sector_t sector = blk_rq_pos(rq); - if (!sd_is_zoned(sdkp)) + if (sdkp->device->type != TYPE_ZBC) /* Not a zoned device */ return BLK_STS_IOERR; @@ -347,123 +317,6 @@ static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd) return BLK_STS_OK; } -#define SD_ZBC_INVALID_WP_OFST (~0u) -#define SD_ZBC_UPDATING_WP_OFST (SD_ZBC_INVALID_WP_OFST - 1) - -static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx, - void *data) -{ - struct scsi_disk *sdkp = data; - - lockdep_assert_held(&sdkp->zones_wp_offset_lock); - - sdkp->zones_wp_offset[idx] = sd_zbc_get_zone_wp_offset(zone); - - return 0; -} - -/* - * An attempt to append a zone triggered an invalid write pointer error. - * Reread the write pointer of the zone(s) in which the append failed. - */ -static void sd_zbc_update_wp_offset_workfn(struct work_struct *work) -{ - struct scsi_disk *sdkp; - unsigned long flags; - sector_t zno; - int ret; - - sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work); - - spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); - for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) { - if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST) - continue; - - spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); - ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf, - SD_BUF_SIZE, - zno * sdkp->zone_info.zone_blocks, true); - spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); - if (!ret) - sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64, - zno, sd_zbc_update_wp_offset_cb, - sdkp); - } - spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); - - scsi_device_put(sdkp->device); -} - -/** - * sd_zbc_prepare_zone_append() - Prepare an emulated ZONE_APPEND command. - * @cmd: the command to setup - * @lba: the LBA to patch - * @nr_blocks: the number of LBAs to be written - * - * Called from sd_setup_read_write_cmnd() for REQ_OP_ZONE_APPEND. - * @sd_zbc_prepare_zone_append() handles the necessary zone wrote locking and - * patching of the lba for an emulated ZONE_APPEND command. - * - * In case the cached write pointer offset is %SD_ZBC_INVALID_WP_OFST it will - * schedule a REPORT ZONES command and return BLK_STS_IOERR. - */ -blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, - unsigned int nr_blocks) -{ - struct request *rq = scsi_cmd_to_rq(cmd); - struct scsi_disk *sdkp = scsi_disk(rq->q->disk); - unsigned int wp_offset, zno = blk_rq_zone_no(rq); - unsigned long flags; - blk_status_t ret; - - ret = sd_zbc_cmnd_checks(cmd); - if (ret != BLK_STS_OK) - return ret; - - if (!blk_rq_zone_is_seq(rq)) - return BLK_STS_IOERR; - - /* Unlock of the write lock will happen in sd_zbc_complete() */ - if (!blk_req_zone_write_trylock(rq)) - return BLK_STS_ZONE_RESOURCE; - - spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); - wp_offset = sdkp->zones_wp_offset[zno]; - switch (wp_offset) { - case SD_ZBC_INVALID_WP_OFST: - /* - * We are about to schedule work to update a zone write pointer - * offset, which will cause the zone append command to be - * requeued. So make sure that the scsi device does not go away - * while the work is being processed. - */ - if (scsi_device_get(sdkp->device)) { - ret = BLK_STS_IOERR; - break; - } - sdkp->zones_wp_offset[zno] = SD_ZBC_UPDATING_WP_OFST; - schedule_work(&sdkp->zone_wp_offset_work); - fallthrough; - case SD_ZBC_UPDATING_WP_OFST: - ret = BLK_STS_DEV_RESOURCE; - break; - default: - wp_offset = sectors_to_logical(sdkp->device, wp_offset); - if (wp_offset + nr_blocks > sdkp->zone_info.zone_blocks) { - ret = BLK_STS_IOERR; - break; - } - - trace_scsi_prepare_zone_append(cmd, *lba, wp_offset); - *lba += wp_offset; - } - spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); - if (ret) - blk_req_zone_write_unlock(rq); - return ret; -} - /** * sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations * can be RESET WRITE POINTER, OPEN, CLOSE or FINISH. @@ -504,96 +357,6 @@ blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, return BLK_STS_OK; } -static bool sd_zbc_need_zone_wp_update(struct request *rq) -{ - switch (req_op(rq)) { - case REQ_OP_ZONE_APPEND: - case REQ_OP_ZONE_FINISH: - case REQ_OP_ZONE_RESET: - case REQ_OP_ZONE_RESET_ALL: - return true; - case REQ_OP_WRITE: - case REQ_OP_WRITE_ZEROES: - return blk_rq_zone_is_seq(rq); - default: - return false; - } -} - -/** - * sd_zbc_zone_wp_update - Update cached zone write pointer upon cmd completion - * @cmd: Completed command - * @good_bytes: Command reply bytes - * - * Called from sd_zbc_complete() to handle the update of the cached zone write - * pointer value in case an update is needed. - */ -static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, - unsigned int good_bytes) -{ - int result = cmd->result; - struct request *rq = scsi_cmd_to_rq(cmd); - struct scsi_disk *sdkp = scsi_disk(rq->q->disk); - unsigned int zno = blk_rq_zone_no(rq); - enum req_op op = req_op(rq); - unsigned long flags; - - /* - * If we got an error for a command that needs updating the write - * pointer offset cache, we must mark the zone wp offset entry as - * invalid to force an update from disk the next time a zone append - * command is issued. - */ - spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); - - if (result && op != REQ_OP_ZONE_RESET_ALL) { - if (op == REQ_OP_ZONE_APPEND) { - /* Force complete completion (no retry) */ - good_bytes = 0; - scsi_set_resid(cmd, blk_rq_bytes(rq)); - } - - /* - * Force an update of the zone write pointer offset on - * the next zone append access. - */ - if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST) - sdkp->zones_wp_offset[zno] = SD_ZBC_INVALID_WP_OFST; - goto unlock_wp_offset; - } - - switch (op) { - case REQ_OP_ZONE_APPEND: - trace_scsi_zone_wp_update(cmd, rq->__sector, - sdkp->zones_wp_offset[zno], good_bytes); - rq->__sector += sdkp->zones_wp_offset[zno]; - fallthrough; - case REQ_OP_WRITE_ZEROES: - case REQ_OP_WRITE: - if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp)) - sdkp->zones_wp_offset[zno] += - good_bytes >> SECTOR_SHIFT; - break; - case REQ_OP_ZONE_RESET: - sdkp->zones_wp_offset[zno] = 0; - break; - case REQ_OP_ZONE_FINISH: - sdkp->zones_wp_offset[zno] = sd_zbc_zone_sectors(sdkp); - break; - case REQ_OP_ZONE_RESET_ALL: - memset(sdkp->zones_wp_offset, 0, - sdkp->zone_info.nr_zones * sizeof(unsigned int)); - break; - default: - break; - } - -unlock_wp_offset: - spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); - - return good_bytes; -} - /** * sd_zbc_complete - ZBC command post processing. * @cmd: Completed command @@ -619,11 +382,7 @@ unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, * so be quiet about the error. */ rq->rq_flags |= RQF_QUIET; - } else if (sd_zbc_need_zone_wp_update(rq)) - good_bytes = sd_zbc_zone_wp_update(cmd, good_bytes); - - if (req_op(rq) == REQ_OP_ZONE_APPEND) - blk_req_zone_write_unlock(rq); + } return good_bytes; } @@ -765,7 +524,7 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf, static void sd_zbc_print_zones(struct scsi_disk *sdkp) { - if (!sd_is_zoned(sdkp) || !sdkp->capacity) + if (sdkp->device->type != TYPE_ZBC || !sdkp->capacity) return; if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1)) @@ -780,46 +539,6 @@ static void sd_zbc_print_zones(struct scsi_disk *sdkp) sdkp->zone_info.zone_blocks); } -static int sd_zbc_init_disk(struct scsi_disk *sdkp) -{ - sdkp->zones_wp_offset = NULL; - spin_lock_init(&sdkp->zones_wp_offset_lock); - sdkp->rev_wp_offset = NULL; - mutex_init(&sdkp->rev_mutex); - INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn); - sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL); - if (!sdkp->zone_wp_update_buf) - return -ENOMEM; - - return 0; -} - -void sd_zbc_free_zone_info(struct scsi_disk *sdkp) -{ - if (!sdkp->zone_wp_update_buf) - return; - - /* Serialize against revalidate zones */ - mutex_lock(&sdkp->rev_mutex); - - kvfree(sdkp->zones_wp_offset); - sdkp->zones_wp_offset = NULL; - kfree(sdkp->zone_wp_update_buf); - sdkp->zone_wp_update_buf = NULL; - - sdkp->early_zone_info = (struct zoned_disk_info){ }; - sdkp->zone_info = (struct zoned_disk_info){ }; - - mutex_unlock(&sdkp->rev_mutex); -} - -static void sd_zbc_revalidate_zones_cb(struct gendisk *disk) -{ - struct scsi_disk *sdkp = scsi_disk(disk); - - swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset); -} - /* * Call blk_revalidate_disk_zones() if any of the zoned disk properties have * changed that make it necessary to call that function. Called by @@ -831,18 +550,8 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) struct request_queue *q = disk->queue; u32 zone_blocks = sdkp->early_zone_info.zone_blocks; unsigned int nr_zones = sdkp->early_zone_info.nr_zones; - int ret = 0; unsigned int flags; - - /* - * For all zoned disks, initialize zone append emulation data if not - * already done. - */ - if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) { - ret = sd_zbc_init_disk(sdkp); - if (ret) - return ret; - } + int ret; /* * There is nothing to do for regular disks, including host-aware disks @@ -851,78 +560,56 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) if (!blk_queue_is_zoned(q)) return 0; - /* - * Make sure revalidate zones are serialized to ensure exclusive - * updates of the scsi disk data. - */ - mutex_lock(&sdkp->rev_mutex); - if (sdkp->zone_info.zone_blocks == zone_blocks && sdkp->zone_info.nr_zones == nr_zones && disk->nr_zones == nr_zones) - goto unlock; + return 0; - flags = memalloc_noio_save(); sdkp->zone_info.zone_blocks = zone_blocks; sdkp->zone_info.nr_zones = nr_zones; - sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL); - if (!sdkp->rev_wp_offset) { - ret = -ENOMEM; - memalloc_noio_restore(flags); - goto unlock; - } - - blk_queue_chunk_sectors(q, - logical_to_sectors(sdkp->device, zone_blocks)); - blk_queue_max_zone_append_sectors(q, - q->limits.max_segments << PAGE_SECTORS_SHIFT); - - ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb); + flags = memalloc_noio_save(); + ret = blk_revalidate_disk_zones(disk); memalloc_noio_restore(flags); - kvfree(sdkp->rev_wp_offset); - sdkp->rev_wp_offset = NULL; - if (ret) { sdkp->zone_info = (struct zoned_disk_info){ }; sdkp->capacity = 0; - goto unlock; + return ret; } sd_zbc_print_zones(sdkp); -unlock: - mutex_unlock(&sdkp->rev_mutex); - - return ret; + return 0; } /** * sd_zbc_read_zones - Read zone information and update the request queue * @sdkp: SCSI disk pointer. + * @lim: queue limits to read into * @buf: 512 byte buffer used for storing SCSI command output. * * Read zone information and update the request queue zone characteristics and * also the zoned device information in *sdkp. Called by sd_revalidate_disk() * before the gendisk capacity has been set. */ -int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) +int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, + u8 buf[SD_BUF_SIZE]) { - struct gendisk *disk = sdkp->disk; - struct request_queue *q = disk->queue; unsigned int nr_zones; u32 zone_blocks = 0; int ret; - if (!sd_is_zoned(sdkp)) { - /* - * Device managed or normal SCSI disk, no special handling - * required. Nevertheless, free the disk zone information in - * case the device type changed. - */ - sd_zbc_free_zone_info(sdkp); + if (sdkp->device->type != TYPE_ZBC) return 0; - } + + lim->features |= BLK_FEAT_ZONED; + + /* + * Per ZBC and ZAC specifications, writes in sequential write required + * zones of host-managed devices must be aligned to the device physical + * block size. + */ + lim->zone_write_granularity = sdkp->physical_block_size; /* READ16/WRITE16/SYNC16 is mandatory for ZBC devices */ sdkp->device->use_16_for_rw = 1; @@ -939,19 +626,18 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) if (ret != 0) goto err; - /* The drive satisfies the kernel restrictions: set it up */ - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); - blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); - if (sdkp->zones_max_open == U32_MAX) - disk_set_max_open_zones(disk, 0); - else - disk_set_max_open_zones(disk, sdkp->zones_max_open); - disk_set_max_active_zones(disk, 0); nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks); - sdkp->early_zone_info.nr_zones = nr_zones; sdkp->early_zone_info.zone_blocks = zone_blocks; + /* The drive satisfies the kernel restrictions: set it up */ + if (sdkp->zones_max_open == U32_MAX) + lim->max_open_zones = 0; + else + lim->max_open_zones = sdkp->zones_max_open; + lim->max_active_zones = 0; + lim->chunk_sectors = logical_to_sectors(sdkp->device, zone_blocks); + return 0; err: diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 0f2c87cc95e6..2c61624cb4b0 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -9,7 +9,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/enclosure.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -908,7 +908,6 @@ static struct class_interface ses_interface = { static struct scsi_driver ses_template = { .gendrv = { .name = "ses", - .owner = THIS_MODULE, .probe = ses_probe, .remove = ses_remove, }, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index baf870a03ecf..3c02a5f7b5f3 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -307,10 +307,6 @@ sg_open(struct inode *inode, struct file *filp) if (retval) goto sg_put; - retval = scsi_autopm_get_device(device); - if (retval) - goto sdp_put; - /* scsi_block_when_processing_errors() may block so bypass * check if O_NONBLOCK. Permits SCSI commands to be issued * during error recovery. Tread carefully. */ @@ -318,7 +314,7 @@ sg_open(struct inode *inode, struct file *filp) scsi_block_when_processing_errors(device))) { retval = -ENXIO; /* we are in error recovery for this device */ - goto error_out; + goto sdp_put; } mutex_lock(&sdp->open_rel_lock); @@ -371,8 +367,6 @@ out_undo: } error_mutex_locked: mutex_unlock(&sdp->open_rel_lock); -error_out: - scsi_autopm_put_device(device); sdp_put: kref_put(&sdp->d_ref, sg_device_destroy); scsi_device_put(device); @@ -392,8 +386,6 @@ sg_release(struct inode *inode, struct file *filp) SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n")); mutex_lock(&sdp->open_rel_lock); - scsi_autopm_put_device(sdp->device); - kref_put(&sfp->f_ref, sg_remove_sfp); sdp->open_cnt--; /* possibly many open()s waiting on exlude clearing, start many; @@ -405,6 +397,7 @@ sg_release(struct inode *inode, struct file *filp) wake_up_interruptible(&sdp->open_wait); } mutex_unlock(&sdp->open_rel_lock); + kref_put(&sfp->f_ref, sg_remove_sfp); return 0; } @@ -1424,7 +1417,6 @@ static const struct file_operations sg_fops = { .mmap = sg_mmap, .release = sg_release, .fasync = sg_fasync, - .llseek = no_llseek, }; static const struct class sg_sysfs_class = { @@ -1647,7 +1639,7 @@ MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> -static struct ctl_table sg_sysctls[] = { +static const struct ctl_table sg_sysctls[] = { { .procname = "sg-big-buff", .data = &sg_big_buff, @@ -1666,8 +1658,7 @@ static void register_sg_sysctls(void) static void unregister_sg_sysctls(void) { - if (hdr) - unregister_sysctl_table(hdr); + unregister_sysctl_table(hdr); } #else #define register_sg_sysctls() do { } while (0) diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c index b0bef83db7b6..6594661db5f4 100644 --- a/drivers/scsi/sgiwd93.c +++ b/drivers/scsi/sgiwd93.c @@ -306,7 +306,7 @@ static void sgiwd93_remove(struct platform_device *pdev) static struct platform_driver sgiwd93_driver = { .probe = sgiwd93_probe, - .remove_new = sgiwd93_remove, + .remove = sgiwd93_remove, .driver = { .name = "sgiwd93", } diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index cdedc271857a..fae6db20a6e9 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -505,7 +505,7 @@ struct pqi_vendor_general_request { __le64 buffer_address; __le32 buffer_length; u8 reserved[40]; - } ofa_memory_allocation; + } host_memory_allocation; } data; }; @@ -517,21 +517,30 @@ struct pqi_vendor_general_response { u8 reserved[2]; }; -#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0 -#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1 +#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0 +#define PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE 1 +#define PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE 2 #define PQI_OFA_VERSION 1 #define PQI_OFA_SIGNATURE "OFA_QRM" -#define PQI_OFA_MAX_SG_DESCRIPTORS 64 +#define PQI_CTRL_LOG_VERSION 1 +#define PQI_CTRL_LOG_SIGNATURE "FW_DATA" +#define PQI_HOST_MAX_SG_DESCRIPTORS 64 -struct pqi_ofa_memory { - __le64 signature; /* "OFA_QRM" */ +struct pqi_host_memory { + __le64 signature; /* "OFA_QRM", "FW_DATA", etc. */ __le16 version; /* version of this struct (1 = 1st version) */ u8 reserved[62]; __le32 bytes_allocated; /* total allocated memory in bytes */ __le16 num_memory_descriptors; u8 reserved1[2]; - struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS]; + struct pqi_sg_descriptor sg_descriptor[PQI_HOST_MAX_SG_DESCRIPTORS]; +}; + +struct pqi_host_memory_descriptor { + struct pqi_host_memory *host_memory; + dma_addr_t host_memory_dma_handle; + void **host_chunk_virt_address; }; struct pqi_aio_error_info { @@ -867,7 +876,8 @@ struct pqi_config_table_firmware_features { #define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17 #define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18 #define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21 -#define PQI_FIRMWARE_FEATURE_MAXIMUM 21 +#define PQI_FIRMWARE_FEATURE_CTRL_LOGGING 22 +#define PQI_FIRMWARE_FEATURE_MAXIMUM 22 struct pqi_config_table_debug { struct pqi_config_table_section_header header; @@ -1096,6 +1106,11 @@ struct pqi_tmf_work { u8 scsi_opcode; }; +struct pqi_raid_io_stats { + u64 raid_bypass_cnt; + u64 write_stream_cnt; +}; + struct pqi_scsi_dev { int devtype; /* as reported by INQUIRY command */ u8 device_type; /* as reported by */ @@ -1158,7 +1173,7 @@ struct pqi_scsi_dev { struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN]; atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE]; - unsigned int raid_bypass_cnt; + struct pqi_raid_io_stats __percpu *raid_io_stats; struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE]; }; @@ -1357,6 +1372,7 @@ struct pqi_ctrl_info { u8 firmware_triage_supported : 1; u8 rpl_extended_format_4_5_supported : 1; u8 multi_lun_device_supported : 1; + u8 ctrl_logging_supported : 1; u8 enable_r1_writes : 1; u8 enable_r5_writes : 1; u8 enable_r6_writes : 1; @@ -1398,13 +1414,12 @@ struct pqi_ctrl_info { wait_queue_head_t block_requests_wait; struct mutex ofa_mutex; - struct pqi_ofa_memory *pqi_ofa_mem_virt_addr; - dma_addr_t pqi_ofa_mem_dma_handle; - void **pqi_ofa_chunk_virt_addr; struct work_struct ofa_memory_alloc_work; struct work_struct ofa_quiesce_work; u32 ofa_bytes_requested; u16 ofa_cancel_reason; + struct pqi_host_memory_descriptor ofa_memory; + struct pqi_host_memory_descriptor ctrl_log_memory; enum pqi_ctrl_removal_state ctrl_removal_state; }; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 385180c98be4..1d784ee7671c 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -19,13 +19,13 @@ #include <linux/bcd.h> #include <linux/reboot.h> #include <linux/cciss_ioctl.h> -#include <linux/blk-mq-pci.h> +#include <linux/crash_dump.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_transport_sas.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "smartpqi.h" #include "smartpqi_sis.h" @@ -33,11 +33,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "2.1.26-030" +#define DRIVER_VERSION "2.1.34-035" #define DRIVER_MAJOR 2 #define DRIVER_MINOR 1 -#define DRIVER_RELEASE 26 -#define DRIVER_REVISION 30 +#define DRIVER_RELEASE 34 +#define DRIVER_REVISION 35 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -68,6 +68,7 @@ static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd) static void pqi_verify_structures(void); static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); +static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info); static void pqi_ctrl_offline_worker(struct work_struct *work); static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); static void pqi_scan_start(struct Scsi_Host *shost); @@ -92,9 +93,9 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); -static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info); -static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); -static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); +static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size); +static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor); +static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code); static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs); static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); @@ -1041,9 +1042,8 @@ static int pqi_write_driver_version_to_host_wellness( buffer->driver_version_tag[1] = 'V'; put_unaligned_le16(sizeof(buffer->driver_version), &buffer->driver_version_length); - strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, - sizeof(buffer->driver_version) - 1); - buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; + strscpy(buffer->driver_version, "Linux " DRIVER_VERSION, + sizeof(buffer->driver_version)); buffer->dont_write_tag[0] = 'D'; buffer->dont_write_tag[1] = 'W'; buffer->end_tag[0] = 'Z'; @@ -1509,6 +1509,12 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, if (rc) goto error; + device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats); + if (!device->raid_io_stats) { + rc = -ENOMEM; + goto error; + } + device->raid_map = raid_map; return 0; @@ -2006,18 +2012,31 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, PQI_DEV_INFO_BUFFER_LENGTH - count, "-:-"); - if (pqi_is_logical_device(device)) + if (pqi_is_logical_device(device)) { count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " %08x%08x", *((u32 *)&device->scsi3addr), *((u32 *)&device->scsi3addr[4])); - else + } else if (ctrl_info->rpl_extended_format_4_5_supported) { + if (device->device_type == SA_DEVICE_TYPE_NVME) + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " %016llx%016llx", + get_unaligned_be64(&device->wwid[0]), + get_unaligned_be64(&device->wwid[8])); + else + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " %016llx", + get_unaligned_be64(&device->wwid[0])); + } else { count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, - " %016llx%016llx", - get_unaligned_be64(&device->wwid[0]), - get_unaligned_be64(&device->wwid[8])); + " %016llx", + get_unaligned_be64(&device->wwid[0])); + } + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " %s %.8s %.16s ", @@ -2100,6 +2119,10 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, /* To prevent this from being freed later. */ new_device->raid_map = NULL; } + if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) { + existing_device->raid_io_stats = new_device->raid_io_stats; + new_device->raid_io_stats = NULL; + } existing_device->raid_bypass_configured = new_device->raid_bypass_configured; existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; } @@ -2122,6 +2145,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, static inline void pqi_free_device(struct pqi_scsi_dev *device) { if (device) { + free_percpu(device->raid_io_stats); kfree(device->raid_map); kfree(device); } @@ -2293,17 +2317,23 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, * queue depth, device size. */ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { + /* + * Check for queue depth change. + */ if (device->sdev && device->queue_depth != device->advertised_queue_depth) { device->advertised_queue_depth = device->queue_depth; scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); - spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); - if (pqi_volume_rescan_needed(device)) { - device->rescan = false; - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); - scsi_rescan_device(device->sdev); - } else { - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); - } + } + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + /* + * Check for changes in the device, such as size. + */ + if (pqi_volume_rescan_needed(device)) { + device->rescan = false; + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + scsi_rescan_device(device->sdev); + } else { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); } } @@ -2355,14 +2385,6 @@ static inline void pqi_mask_device(u8 *scsi3addr) scsi3addr[3] |= 0xc0; } -static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device) -{ - if (pqi_is_logical_device(device)) - return false; - - return (device->path_map & (device->path_map - 1)) != 0; -} - static inline bool pqi_expose_device(struct pqi_scsi_dev *device) { return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); @@ -3245,6 +3267,20 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request) sense_data_length); } + if (pqi_cmd_priv(scmd)->this_residual && + !pqi_is_logical_device(scmd->device->hostdata) && + scsi_status == SAM_STAT_CHECK_CONDITION && + host_byte == DID_OK && + sense_data_length && + scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) && + sshdr.sense_key == ILLEGAL_REQUEST && + sshdr.asc == 0x26 && + sshdr.ascq == 0x0) { + host_byte = DID_NO_CONNECT; + pqi_take_device_offline(scmd->device, "AIO"); + scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1); + } + scmd->result = scsi_status; set_host_byte(scmd, host_byte); } @@ -3259,14 +3295,12 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) int residual_count; int xfer_count; bool device_offline; - struct pqi_scsi_dev *device; scmd = io_request->scmd; error_info = io_request->error_info; host_byte = DID_OK; sense_data_length = 0; device_offline = false; - device = scmd->device->hostdata; switch (error_info->service_response) { case PQI_AIO_SERV_RESPONSE_COMPLETE: @@ -3291,14 +3325,8 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) break; case PQI_AIO_STATUS_AIO_PATH_DISABLED: pqi_aio_path_disabled(io_request); - if (pqi_is_multipath_device(device)) { - pqi_device_remove_start(device); - host_byte = DID_NO_CONNECT; - scsi_status = SAM_STAT_CHECK_CONDITION; - } else { - scsi_status = SAM_STAT_GOOD; - io_request->status = -EAGAIN; - } + scsi_status = SAM_STAT_GOOD; + io_request->status = -EAGAIN; break; case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: case PQI_AIO_STATUS_INVALID_DEVICE: @@ -3626,7 +3654,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) ctrl_info->pqi_mode_enabled = false; pqi_save_ctrl_mode(ctrl_info, SIS_MODE); rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); - pqi_ofa_free_host_buffer(ctrl_info); + pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); pqi_ctrl_ofa_done(ctrl_info); dev_info(&ctrl_info->pci_dev->dev, "Online Firmware Activation: %s\n", @@ -3637,7 +3665,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) "Online Firmware Activation ABORTED\n"); if (ctrl_info->soft_reset_handshake_supported) pqi_clear_soft_reset_status(ctrl_info); - pqi_ofa_free_host_buffer(ctrl_info); + pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); pqi_ctrl_ofa_done(ctrl_info); pqi_ofa_ctrl_unquiesce(ctrl_info); break; @@ -3647,7 +3675,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) dev_err(&ctrl_info->pci_dev->dev, "unexpected Online Firmware Activation reset status: 0x%x\n", reset_status); - pqi_ofa_free_host_buffer(ctrl_info); + pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); pqi_ctrl_ofa_done(ctrl_info); pqi_ofa_ctrl_unquiesce(ctrl_info); pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); @@ -3662,8 +3690,8 @@ static void pqi_ofa_memory_alloc_worker(struct work_struct *work) ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); pqi_ctrl_ofa_start(ctrl_info); - pqi_ofa_setup_host_buffer(ctrl_info); - pqi_ofa_host_memory_update(ctrl_info); + pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested); + pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE); } static void pqi_ofa_quiesce_worker(struct work_struct *work) @@ -3703,7 +3731,7 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, dev_info(&ctrl_info->pci_dev->dev, "received Online Firmware Activation cancel request: reason: %u\n", ctrl_info->ofa_cancel_reason); - pqi_ofa_free_host_buffer(ctrl_info); + pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); pqi_ctrl_ofa_done(ctrl_info); break; default: @@ -3840,7 +3868,7 @@ static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) { - del_timer_sync(&ctrl_info->heartbeat_timer); + timer_delete_sync(&ctrl_info->heartbeat_timer); } static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, @@ -5233,7 +5261,7 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) ctrl_info->error_buffer_length = ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; - if (reset_devices) + if (is_kdump_kernel()) max_transfer_size = min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE_KDUMP); else @@ -5262,7 +5290,7 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) u16 num_elements_per_iq; u16 num_elements_per_oq; - if (reset_devices) { + if (is_kdump_kernel()) { num_queue_groups = 1; } else { int num_cpus; @@ -5934,7 +5962,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, int rc; struct pqi_scsi_dev *device; struct pqi_stream_data *pqi_stream_data; - struct pqi_scsi_dev_raid_map_data rmd; + struct pqi_scsi_dev_raid_map_data rmd = { 0 }; if (!ctrl_info->enable_stream_detection) return false; @@ -5976,6 +6004,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; pqi_stream_data->last_accessed = jiffies; + per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++; return true; } @@ -6026,7 +6055,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm ctrl_info = shost_to_hba(shost); - if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { + if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) { set_host_byte(scmd, DID_NO_CONNECT); pqi_scsi_done(scmd); return 0; @@ -6054,7 +6083,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { raid_bypassed = true; - device->raid_bypass_cnt++; + per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++; } } if (!raid_bypassed) @@ -6191,14 +6220,12 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, continue; scsi_device = scmd->device->hostdata; - if (scsi_device != device) - continue; - - if ((u8)scmd->device->lun != lun) - continue; list_del(&io_request->request_list_entry); - set_host_byte(scmd, DID_RESET); + if (scsi_device == device && (u8)scmd->device->lun == lun) + set_host_byte(scmd, DID_RESET); + else + set_host_byte(scmd, DID_REQUEUE); pqi_free_io_request(io_request); scsi_dma_unmap(scmd); pqi_scsi_done(scmd); @@ -6477,7 +6504,7 @@ out: return SUCCESS; } -static int pqi_slave_alloc(struct scsi_device *sdev) +static int pqi_sdev_init(struct scsi_device *sdev) { struct pqi_scsi_dev *device; unsigned long flags; @@ -6534,10 +6561,10 @@ static void pqi_map_queues(struct Scsi_Host *shost) struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); if (!ctrl_info->disable_managed_interrupts) - return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], - ctrl_info->pci_dev, 0); + blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], + &ctrl_info->pci_dev->dev, 0); else - return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); + blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); } static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) @@ -6545,7 +6572,8 @@ static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; } -static int pqi_slave_configure(struct scsi_device *sdev) +static int pqi_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { int rc = 0; struct pqi_scsi_dev *device; @@ -6561,7 +6589,7 @@ static int pqi_slave_configure(struct scsi_device *sdev) return rc; } -static void pqi_slave_destroy(struct scsi_device *sdev) +static void pqi_sdev_destroy(struct scsi_device *sdev) { struct pqi_ctrl_info *ctrl_info; struct pqi_scsi_dev *device; @@ -6892,12 +6920,6 @@ static ssize_t pqi_firmware_version_show(struct device *dev, return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); } -static ssize_t pqi_driver_version_show(struct device *dev, - struct device_attribute *attr, char *buffer) -{ - return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP); -} - static ssize_t pqi_serial_number_show(struct device *dev, struct device_attribute *attr, char *buffer) { @@ -7066,7 +7088,8 @@ static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, return count; } -static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); +static DEVICE_STRING_ATTR_RO(driver_version, 0444, + DRIVER_VERSION BUILD_TIMESTAMP); static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); @@ -7083,7 +7106,7 @@ static DEVICE_ATTR(enable_r6_writes, 0644, pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); static struct attribute *pqi_shost_attrs[] = { - &dev_attr_driver_version.attr, + &dev_attr_driver_version.attr.attr, &dev_attr_firmware_version.attr, &dev_attr_model.attr, &dev_attr_serial_number.attr, @@ -7356,7 +7379,8 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, struct scsi_device *sdev; struct pqi_scsi_dev *device; unsigned long flags; - unsigned int raid_bypass_cnt; + u64 raid_bypass_cnt; + int cpu; sdev = to_scsi_device(dev); ctrl_info = shost_to_hba(sdev->host); @@ -7372,11 +7396,17 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, return -ENODEV; } - raid_bypass_cnt = device->raid_bypass_cnt; + raid_bypass_cnt = 0; + + if (device->raid_io_stats) { + for_each_online_cpu(cpu) { + raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt; + } + } spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); - return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt); + return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt); } static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev, @@ -7458,6 +7488,43 @@ static ssize_t pqi_numa_node_show(struct device *dev, return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); } +static ssize_t pqi_write_stream_cnt_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + u64 write_stream_cnt; + int cpu; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + if (pqi_ctrl_offline(ctrl_info)) + return -ENODEV; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + write_stream_cnt = 0; + + if (device->raid_io_stats) { + for_each_online_cpu(cpu) { + write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt; + } + } + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt); +} + static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); @@ -7468,6 +7535,7 @@ static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); static DEVICE_ATTR(sas_ncq_prio_enable, 0644, pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); +static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL); static struct attribute *pqi_sdev_attrs[] = { &dev_attr_lunid.attr, @@ -7479,6 +7547,7 @@ static struct attribute *pqi_sdev_attrs[] = { &dev_attr_raid_bypass_cnt.attr, &dev_attr_sas_ncq_prio_enable.attr, &dev_attr_numa_node.attr, + &dev_attr_write_stream_cnt.attr, NULL }; @@ -7495,9 +7564,9 @@ static const struct scsi_host_template pqi_driver_template = { .eh_device_reset_handler = pqi_eh_device_reset_handler, .eh_abort_handler = pqi_eh_abort_handler, .ioctl = pqi_ioctl, - .slave_alloc = pqi_slave_alloc, - .slave_configure = pqi_slave_configure, - .slave_destroy = pqi_slave_destroy, + .sdev_init = pqi_sdev_init, + .sdev_configure = pqi_sdev_configure, + .sdev_destroy = pqi_sdev_destroy, .map_queues = pqi_map_queues, .sdev_groups = pqi_sdev_groups, .shost_groups = pqi_shost_groups, @@ -7869,6 +7938,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT: ctrl_info->multi_lun_device_supported = firmware_feature->enabled; break; + case PQI_FIRMWARE_FEATURE_CTRL_LOGGING: + ctrl_info->ctrl_logging_supported = firmware_feature->enabled; + break; } pqi_firmware_feature_status(ctrl_info, firmware_feature); @@ -7974,6 +8046,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = { .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT, .feature_status = pqi_ctrl_update_feature_flags, }, + { + .feature_name = "Controller Data Logging", + .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING, + .feature_status = pqi_ctrl_update_feature_flags, + }, }; static void pqi_process_firmware_features( @@ -8076,6 +8153,7 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) ctrl_info->firmware_triage_supported = false; ctrl_info->rpl_extended_format_4_5_supported = false; ctrl_info->multi_lun_device_supported = false; + ctrl_info->ctrl_logging_supported = false; } static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) @@ -8216,17 +8294,26 @@ static void pqi_perform_lockup_action(void) } } +#define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024) +#define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS) + static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) { int rc; u32 product_id; if (reset_devices) { - if (pqi_is_fw_triage_supported(ctrl_info)) { + if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) { rc = sis_wait_for_fw_triage_completion(ctrl_info); if (rc) return rc; } + if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) { + sis_notify_kdump(ctrl_info); + rc = sis_wait_for_ctrl_logging_completion(ctrl_info); + if (rc) + return rc; + } sis_soft_reset(ctrl_info); ssleep(PQI_POST_RESET_DELAY_SECS); } else { @@ -8272,7 +8359,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) ctrl_info->product_id = (u8)product_id; ctrl_info->product_revision = (u8)(product_id >> 8); - if (reset_devices) { + if (is_kdump_kernel()) { if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) ctrl_info->max_outstanding_requests = @@ -8408,6 +8495,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) if (rc) return rc; + if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) { + pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE); + pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); + } + rc = pqi_get_ctrl_product_details(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, @@ -8592,8 +8684,22 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) return rc; } - if (pqi_ofa_in_progress(ctrl_info)) + if (pqi_ofa_in_progress(ctrl_info)) { pqi_ctrl_unblock_scan(ctrl_info); + if (ctrl_info->ctrl_logging_supported) { + if (!ctrl_info->ctrl_log_memory.host_memory) + pqi_host_setup_buffer(ctrl_info, + &ctrl_info->ctrl_log_memory, + PQI_CTRL_LOG_TOTAL_SIZE, + PQI_CTRL_LOG_MIN_SIZE); + pqi_host_memory_update(ctrl_info, + &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); + } else { + if (ctrl_info->ctrl_log_memory.host_memory) + pqi_host_free_buffer(ctrl_info, + &ctrl_info->ctrl_log_memory); + } + } pqi_scan_scsi_devices(ctrl_info); @@ -8783,6 +8889,7 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) pqi_fail_all_outstanding_requests(ctrl_info); ctrl_info->pqi_mode_enabled = false; } + pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory); pqi_unregister_scsi(ctrl_info); if (ctrl_info->pqi_mode_enabled) pqi_revert_to_sis_mode(ctrl_info); @@ -8808,177 +8915,187 @@ static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) pqi_ctrl_unblock_scan(ctrl_info); } -static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size) +static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) +{ + ssleep(delay_secs); + + return pqi_ctrl_init_resume(ctrl_info); +} + +static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor, + u32 total_size, u32 chunk_size) { int i; u32 sg_count; struct device *dev; - struct pqi_ofa_memory *ofap; + struct pqi_host_memory *host_memory; struct pqi_sg_descriptor *mem_descriptor; dma_addr_t dma_handle; - ofap = ctrl_info->pqi_ofa_mem_virt_addr; - sg_count = DIV_ROUND_UP(total_size, chunk_size); - if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS) + if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS) goto out; - ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); - if (!ctrl_info->pqi_ofa_chunk_virt_addr) + host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL); + if (!host_memory_descriptor->host_chunk_virt_address) goto out; dev = &ctrl_info->pci_dev->dev; + host_memory = host_memory_descriptor->host_memory; for (i = 0; i < sg_count; i++) { - ctrl_info->pqi_ofa_chunk_virt_addr[i] = - dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); - if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) + host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); + if (!host_memory_descriptor->host_chunk_virt_address[i]) goto out_free_chunks; - mem_descriptor = &ofap->sg_descriptor[i]; + mem_descriptor = &host_memory->sg_descriptor[i]; put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); put_unaligned_le32(chunk_size, &mem_descriptor->length); } put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); - put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); - put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated); + put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors); + put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated); return 0; out_free_chunks: while (--i >= 0) { - mem_descriptor = &ofap->sg_descriptor[i]; + mem_descriptor = &host_memory->sg_descriptor[i]; dma_free_coherent(dev, chunk_size, - ctrl_info->pqi_ofa_chunk_virt_addr[i], + host_memory_descriptor->host_chunk_virt_address[i], get_unaligned_le64(&mem_descriptor->address)); } - kfree(ctrl_info->pqi_ofa_chunk_virt_addr); - + kfree(host_memory_descriptor->host_chunk_virt_address); out: return -ENOMEM; } -static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) +static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor, + u32 total_required_size, u32 min_required_size) { - u32 total_size; u32 chunk_size; u32 min_chunk_size; - if (ctrl_info->ofa_bytes_requested == 0) + if (total_required_size == 0 || min_required_size == 0) return 0; - total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); - min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS); + total_required_size = PAGE_ALIGN(total_required_size); + min_required_size = PAGE_ALIGN(min_required_size); + min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS); min_chunk_size = PAGE_ALIGN(min_chunk_size); - for (chunk_size = total_size; chunk_size >= min_chunk_size;) { - if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0) - return 0; - chunk_size /= 2; - chunk_size = PAGE_ALIGN(chunk_size); + while (total_required_size >= min_required_size) { + for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) { + if (pqi_host_alloc_mem(ctrl_info, + host_memory_descriptor, total_required_size, + chunk_size) == 0) + return 0; + chunk_size /= 2; + chunk_size = PAGE_ALIGN(chunk_size); + } + total_required_size /= 2; + total_required_size = PAGE_ALIGN(total_required_size); } return -ENOMEM; } -static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info) +static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor, + u32 total_size, u32 min_size) { struct device *dev; - struct pqi_ofa_memory *ofap; + struct pqi_host_memory *host_memory; dev = &ctrl_info->pci_dev->dev; - ofap = dma_alloc_coherent(dev, sizeof(*ofap), - &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); - if (!ofap) + host_memory = dma_alloc_coherent(dev, sizeof(*host_memory), + &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL); + if (!host_memory) return; - ctrl_info->pqi_ofa_mem_virt_addr = ofap; + host_memory_descriptor->host_memory = host_memory; - if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { - dev_err(dev, - "failed to allocate host buffer for Online Firmware Activation\n"); - dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle); - ctrl_info->pqi_ofa_mem_virt_addr = NULL; + if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor, + total_size, min_size) < 0) { + dev_err(dev, "failed to allocate firmware usable host buffer\n"); + dma_free_coherent(dev, sizeof(*host_memory), host_memory, + host_memory_descriptor->host_memory_dma_handle); + host_memory_descriptor->host_memory = NULL; return; } - - put_unaligned_le16(PQI_OFA_VERSION, &ofap->version); - memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature)); } -static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) +static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor) { unsigned int i; struct device *dev; - struct pqi_ofa_memory *ofap; + struct pqi_host_memory *host_memory; struct pqi_sg_descriptor *mem_descriptor; unsigned int num_memory_descriptors; - ofap = ctrl_info->pqi_ofa_mem_virt_addr; - if (!ofap) + host_memory = host_memory_descriptor->host_memory; + if (!host_memory) return; dev = &ctrl_info->pci_dev->dev; - if (get_unaligned_le32(&ofap->bytes_allocated) == 0) + if (get_unaligned_le32(&host_memory->bytes_allocated) == 0) goto out; - mem_descriptor = ofap->sg_descriptor; - num_memory_descriptors = - get_unaligned_le16(&ofap->num_memory_descriptors); + mem_descriptor = host_memory->sg_descriptor; + num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors); for (i = 0; i < num_memory_descriptors; i++) { dma_free_coherent(dev, get_unaligned_le32(&mem_descriptor[i].length), - ctrl_info->pqi_ofa_chunk_virt_addr[i], + host_memory_descriptor->host_chunk_virt_address[i], get_unaligned_le64(&mem_descriptor[i].address)); } - kfree(ctrl_info->pqi_ofa_chunk_virt_addr); + kfree(host_memory_descriptor->host_chunk_virt_address); out: - dma_free_coherent(dev, sizeof(*ofap), ofap, - ctrl_info->pqi_ofa_mem_dma_handle); - ctrl_info->pqi_ofa_mem_virt_addr = NULL; + dma_free_coherent(dev, sizeof(*host_memory), host_memory, + host_memory_descriptor->host_memory_dma_handle); + host_memory_descriptor->host_memory = NULL; } -static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) +static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor, + u16 function_code) { u32 buffer_length; struct pqi_vendor_general_request request; - struct pqi_ofa_memory *ofap; + struct pqi_host_memory *host_memory; memset(&request, 0, sizeof(request)); request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; - put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, - &request.header.iu_length); - put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, - &request.function_code); - - ofap = ctrl_info->pqi_ofa_mem_virt_addr; - - if (ofap) { - buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) + - get_unaligned_le16(&ofap->num_memory_descriptors) * - sizeof(struct pqi_sg_descriptor); - - put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, - &request.data.ofa_memory_allocation.buffer_address); - put_unaligned_le32(buffer_length, - &request.data.ofa_memory_allocation.buffer_length); + put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); + put_unaligned_le16(function_code, &request.function_code); + + host_memory = host_memory_descriptor->host_memory; + + if (host_memory) { + buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor); + put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address); + put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length); + + if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) { + put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version); + memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature)); + } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) { + put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version); + memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature)); + } } return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); } -static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) -{ - ssleep(delay_secs); - - return pqi_ctrl_init_resume(ctrl_info); -} - static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, .status = SAM_STAT_CHECK_CONDITION, @@ -9026,6 +9143,7 @@ static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) pqi_ctrl_wait_until_quiesced(ctrl_info); pqi_fail_all_outstanding_requests(ctrl_info); pqi_ctrl_unblock_requests(ctrl_info); + pqi_take_ctrl_devices_offline(ctrl_info); } static void pqi_ctrl_offline_worker(struct work_struct *work) @@ -9100,6 +9218,27 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, schedule_work(&ctrl_info->ctrl_offline_work); } +static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + unsigned long flags; + struct pqi_scsi_dev *device; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { + rc = list_is_last(&device->scsi_device_list_entry, &ctrl_info->scsi_device_list); + if (rc) + continue; + + /* + * Is the sdev pointer NULL? + */ + if (device->sdev) + scsi_device_set_state(device->sdev, SDEV_OFFLINE); + } + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); +} + static void pqi_print_ctrl_info(struct pci_dev *pci_dev, const struct pci_device_id *id) { @@ -9452,6 +9591,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x0462) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0x1104) }, { @@ -9480,6 +9623,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x1110) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0x8460) }, { @@ -9488,6 +9635,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x8462) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0xc460) }, { @@ -9596,6 +9747,18 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x00a3) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x00a1) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f3a, 0x0104) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x19e5, 0xd227) }, { @@ -9924,6 +10087,30 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4044) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4054) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4084) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4094) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4140) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4240) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADVANTECH, 0x8312) }, { @@ -10140,6 +10327,14 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1018, 0x8238) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f3f, 0x0610) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0220) }, { @@ -10148,10 +10343,30 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0222) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0223) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0224) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0225) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0520) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0521) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0522) }, { @@ -10172,6 +10387,26 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0624) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0625) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0626) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0627) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0628) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1014, 0x0718) }, { @@ -10188,6 +10423,114 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1137, 0x02fe) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1137, 0x02ff) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1137, 0x0300) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ded, 0x3301) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0045) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0046) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0047) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0048) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x004a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x004b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x004c) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x004f) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0051) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0052) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0053) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0054) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x006b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x006c) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x006d) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x006f) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0070) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0071) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0072) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0086) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0087) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0088) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0089) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1e93, 0x1000) }, { @@ -10244,6 +10587,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x100b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1f51, 0x100e) }, { @@ -10272,6 +10619,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x00a3) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_ANY_ID, PCI_ANY_ID) }, { 0 } diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c index a981d0377948..93e96705754e 100644 --- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c +++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c @@ -14,7 +14,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_transport_sas.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "smartpqi.h" static struct pqi_sas_phy *pqi_alloc_sas_phy(struct pqi_sas_port *pqi_sas_port) diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index 673437c7152b..ae5a264d062d 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -14,7 +14,7 @@ #include <linux/delay.h> #include <linux/pci.h> #include <scsi/scsi_device.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "smartpqi.h" #include "smartpqi_sis.h" @@ -29,6 +29,7 @@ #define SIS_ENABLE_INTX 0x80 #define SIS_SOFT_RESET 0x100 #define SIS_CMD_READY 0x200 +#define SIS_NOTIFY_KDUMP 0x400 #define SIS_TRIGGER_SHUTDOWN 0x800000 #define SIS_PQI_RESET_QUIESCE 0x1000000 @@ -52,6 +53,8 @@ #define SIS_BASE_STRUCT_ALIGNMENT 16 #define SIS_CTRL_KERNEL_FW_TRIAGE 0x3 +#define SIS_CTRL_KERNEL_CTRL_LOGGING 0x4 +#define SIS_CTRL_KERNEL_CTRL_LOGGING_STATUS 0x18 #define SIS_CTRL_KERNEL_UP 0x80 #define SIS_CTRL_KERNEL_PANIC 0x100 #define SIS_CTRL_READY_TIMEOUT_SECS 180 @@ -65,6 +68,13 @@ enum sis_fw_triage_status { FW_TRIAGE_COMPLETED }; +enum sis_ctrl_logging_status { + CTRL_LOGGING_NOT_STARTED = 0, + CTRL_LOGGING_STARTED, + CTRL_LOGGING_COND_INVALID, + CTRL_LOGGING_COMPLETED +}; + #pragma pack(1) /* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */ @@ -442,6 +452,21 @@ static inline enum sis_fw_triage_status SIS_CTRL_KERNEL_FW_TRIAGE)); } +bool sis_is_ctrl_logging_supported(struct pqi_ctrl_info *ctrl_info) +{ + return readl(&ctrl_info->registers->sis_firmware_status) & SIS_CTRL_KERNEL_CTRL_LOGGING; +} + +void sis_notify_kdump(struct pqi_ctrl_info *ctrl_info) +{ + sis_set_doorbell_bit(ctrl_info, SIS_NOTIFY_KDUMP); +} + +static inline enum sis_ctrl_logging_status sis_read_ctrl_logging_status(struct pqi_ctrl_info *ctrl_info) +{ + return ((enum sis_ctrl_logging_status)((readl(&ctrl_info->registers->sis_firmware_status) & SIS_CTRL_KERNEL_CTRL_LOGGING_STATUS) >> 3)); +} + void sis_soft_reset(struct pqi_ctrl_info *ctrl_info) { writel(SIS_SOFT_RESET, @@ -484,6 +509,41 @@ int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info) return rc; } +#define SIS_CTRL_LOGGING_STATUS_TIMEOUT_SECS 180 +#define SIS_CTRL_LOGGING_STATUS_POLL_INTERVAL_SECS 1 + +int sis_wait_for_ctrl_logging_completion(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + enum sis_ctrl_logging_status status; + unsigned long timeout; + + timeout = (SIS_CTRL_LOGGING_STATUS_TIMEOUT_SECS * HZ) + jiffies; + while (1) { + status = sis_read_ctrl_logging_status(ctrl_info); + if (status == CTRL_LOGGING_COND_INVALID) { + dev_err(&ctrl_info->pci_dev->dev, + "controller data logging condition invalid\n"); + rc = -EINVAL; + break; + } else if (status == CTRL_LOGGING_COMPLETED) { + rc = 0; + break; + } + + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "timed out waiting for controller data logging status\n"); + rc = -ETIMEDOUT; + break; + } + + ssleep(SIS_CTRL_LOGGING_STATUS_POLL_INTERVAL_SECS); + } + + return rc; +} + void sis_verify_structures(void) { BUILD_BUG_ON(offsetof(struct sis_base_struct, diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h index 0c97626d87d4..7e0eac3d07de 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.h +++ b/drivers/scsi/smartpqi/smartpqi_sis.h @@ -31,6 +31,9 @@ u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info); void sis_soft_reset(struct pqi_ctrl_info *ctrl_info); u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info); int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info); +bool sis_is_ctrl_logging_supported(struct pqi_ctrl_info *ctrl_info); +void sis_notify_kdump(struct pqi_ctrl_info *ctrl_info); +int sis_wait_for_ctrl_logging_completion(struct pqi_ctrl_info *ctrl_info); extern unsigned int sis_ctrl_ready_timeout_secs; diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index 9df1c90a24c1..d1d2556c8fc4 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -119,7 +119,7 @@ static void snirm710_driver_remove(struct platform_device *dev) static struct platform_driver snirm710_driver = { .probe = snirm710_probe, - .remove_new = snirm710_driver_remove, + .remove = snirm710_driver_remove, .driver = { .name = "snirm_53c710", }, diff --git a/drivers/scsi/snic/snic_attrs.c b/drivers/scsi/snic/snic_attrs.c index 3ddbdbc3ded1..48bf82d042b4 100644 --- a/drivers/scsi/snic/snic_attrs.c +++ b/drivers/scsi/snic/snic_attrs.c @@ -13,7 +13,7 @@ snic_show_sym_name(struct device *dev, { struct snic *snic = shost_priv(class_to_shost(dev)); - return snprintf(buf, PAGE_SIZE, "%s\n", snic->name); + return sysfs_emit(buf, "%s\n", snic->name); } static ssize_t @@ -23,8 +23,7 @@ snic_show_state(struct device *dev, { struct snic *snic = shost_priv(class_to_shost(dev)); - return snprintf(buf, PAGE_SIZE, "%s\n", - snic_state_str[snic_get_state(snic)]); + return sysfs_emit(buf, "%s\n", snic_state_str[snic_get_state(snic)]); } static ssize_t @@ -32,7 +31,7 @@ snic_show_drv_version(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", SNIC_DRV_VERSION); + return sysfs_emit(buf, "%s\n", SNIC_DRV_VERSION); } static ssize_t @@ -45,8 +44,8 @@ snic_show_link_state(struct device *dev, if (snic->config.xpt_type == SNIC_DAS) snic->link_status = svnic_dev_link_status(snic->vdev); - return snprintf(buf, PAGE_SIZE, "%s\n", - (snic->link_status) ? "Link Up" : "Link Down"); + return sysfs_emit(buf, "%s\n", + (snic->link_status) ? "Link Up" : "Link Down"); } static DEVICE_ATTR(snic_sym_name, S_IRUGO, snic_show_sym_name, NULL); diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c index cc824dcfe7da..1c24517e4e65 100644 --- a/drivers/scsi/snic/snic_main.c +++ b/drivers/scsi/snic/snic_main.c @@ -21,7 +21,7 @@ #define PCI_DEVICE_ID_CISCO_SNIC 0x0046 /* Supported devices by snic module */ -static struct pci_device_id snic_id_table[] = { +static const struct pci_device_id snic_id_table[] = { {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) }, { 0, } /* end of table */ }; @@ -42,11 +42,11 @@ module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN"); /* - * snic_slave_alloc : callback function to SCSI Mid Layer, called on + * snic_sdev_init : callback function to SCSI Mid Layer, called on * scsi device initialization. */ static int -snic_slave_alloc(struct scsi_device *sdev) +snic_sdev_init(struct scsi_device *sdev) { struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev)); @@ -57,11 +57,11 @@ snic_slave_alloc(struct scsi_device *sdev) } /* - * snic_slave_configure : callback function to SCSI Mid Layer, called on + * snic_sdev_configure : callback function to SCSI Mid Layer, called on * scsi device initialization. */ static int -snic_slave_configure(struct scsi_device *sdev) +snic_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { struct snic *snic = shost_priv(sdev->host); u32 qdepth = 0, max_ios = 0; @@ -107,8 +107,8 @@ static const struct scsi_host_template snic_host_template = { .eh_abort_handler = snic_abort_cmd, .eh_device_reset_handler = snic_device_reset, .eh_host_reset_handler = snic_host_reset, - .slave_alloc = snic_slave_alloc, - .slave_configure = snic_slave_configure, + .sdev_init = snic_sdev_init, + .sdev_configure = snic_sdev_configure, .change_queue_depth = snic_change_queue_depth, .this_id = -1, .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH, @@ -300,9 +300,8 @@ snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev) } SNIC_BUG_ON(shost->work_q != NULL); - snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d", - shost->host_no); - shost->work_q = create_singlethread_workqueue(shost->work_q_name); + shost->work_q = alloc_ordered_workqueue("scsi_wq_%d", WQ_MEM_RECLAIM, + shost->host_no); if (!shost->work_q) { SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n"); @@ -873,7 +872,7 @@ snic_global_data_init(void) snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep; len = sizeof(struct snic_host_req); - cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, + cachep = kmem_cache_create("snic_req_tm", len, SNIC_SG_DESC_ALIGN, SLAB_HWCACHE_ALIGN, NULL); if (!cachep) { SNIC_ERR("Failed to create snic tm req slab\n"); @@ -884,7 +883,8 @@ snic_global_data_init(void) snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep; /* snic_event queue */ - snic_glob->event_q = create_singlethread_workqueue("snic_event_wq"); + snic_glob->event_q = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "snic_event_wq"); if (!snic_glob->event_q) { SNIC_ERR("snic event queue create failed\n"); ret = -ENOMEM; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 268b3a40891e..b17796d5ee66 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -52,7 +52,7 @@ #include <linux/pm_runtime.h> #include <linux/uaccess.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_dbg.h> @@ -95,7 +95,6 @@ static const struct dev_pm_ops sr_pm_ops = { static struct scsi_driver sr_template = { .gendrv = { .name = "sr", - .owner = THIS_MODULE, .probe = sr_probe, .remove = sr_remove, .pm = &sr_pm_ops, @@ -112,7 +111,7 @@ static struct lock_class_key sr_bio_compl_lkclass; static int sr_open(struct cdrom_device_info *, int); static void sr_release(struct cdrom_device_info *); -static void get_sectorsize(struct scsi_cd *); +static int get_sectorsize(struct scsi_cd *); static int get_capabilities(struct scsi_cd *); static unsigned int sr_check_events(struct cdrom_device_info *cdi, @@ -474,15 +473,15 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt) return BLK_STS_IOERR; } -static void sr_revalidate_disk(struct scsi_cd *cd) +static int sr_revalidate_disk(struct scsi_cd *cd) { struct scsi_sense_hdr sshdr; /* if the unit is not ready, nothing more to do */ if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) - return; + return 0; sr_cd_check(&cd->cdi); - get_sectorsize(cd); + return get_sectorsize(cd); } static int sr_block_open(struct gendisk *disk, blk_mode_t mode) @@ -495,13 +494,16 @@ static int sr_block_open(struct gendisk *disk, blk_mode_t mode) return -ENXIO; scsi_autopm_get_device(sdev); - if (disk_check_media_change(disk)) - sr_revalidate_disk(cd); + if (disk_check_media_change(disk)) { + ret = sr_revalidate_disk(cd); + if (ret) + goto out; + } mutex_lock(&cd->lock); ret = cdrom_open(&cd->cdi, mode); mutex_unlock(&cd->lock); - +out: scsi_autopm_put_device(sdev); if (ret) scsi_device_put(cd->device); @@ -686,7 +688,9 @@ static int sr_probe(struct device *dev) blk_pm_runtime_init(sdev->request_queue, dev); dev_set_drvdata(dev, cd); - sr_revalidate_disk(cd); + error = sr_revalidate_disk(cd); + if (error) + goto unregister_cdrom; error = device_add_disk(&sdev->sdev_gendev, disk, NULL); if (error) @@ -715,13 +719,14 @@ fail: } -static void get_sectorsize(struct scsi_cd *cd) +static int get_sectorsize(struct scsi_cd *cd) { + struct request_queue *q = cd->device->request_queue; static const u8 cmd[10] = { READ_CAPACITY }; unsigned char buffer[8] = { }; - int the_result; + struct queue_limits lim; + int err; int sector_size; - struct request_queue *queue; struct scsi_failure failure_defs[] = { { .result = SCMD_FAILURE_RESULT_ANY, @@ -737,10 +742,10 @@ static void get_sectorsize(struct scsi_cd *cd) }; /* Do the command and wait.. */ - the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, buffer, + err = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, buffer, sizeof(buffer), SR_TIMEOUT, MAX_RETRIES, &exec_args); - if (the_result) { + if (err) { cd->capacity = 0x1fffff; sector_size = 2048; /* A guess, just in case */ } else { @@ -790,10 +795,9 @@ static void get_sectorsize(struct scsi_cd *cd) set_capacity(cd->disk, cd->capacity); } - queue = cd->device->request_queue; - blk_queue_logical_block_size(queue, sector_size); - - return; + lim = queue_limits_start_update(q); + lim.logical_block_size = sector_size; + return queue_limits_commit_update_frozen(q, &lim); } static int get_capabilities(struct scsi_cd *cd) diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h index 1175f2e213b5..dc899277b3a4 100644 --- a/drivers/scsi/sr.h +++ b/drivers/scsi/sr.h @@ -65,7 +65,7 @@ int sr_disk_status(struct cdrom_device_info *); int sr_get_last_session(struct cdrom_device_info *, struct cdrom_multisession *); int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *); int sr_reset(struct cdrom_device_info *); -int sr_select_speed(struct cdrom_device_info *cdi, int speed); +int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed); int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *); int sr_is_xa(Scsi_CD *); diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index 5b0b35e60e61..089653018d32 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -425,11 +425,14 @@ int sr_reset(struct cdrom_device_info *cdi) return 0; } -int sr_select_speed(struct cdrom_device_info *cdi, int speed) +int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed) { Scsi_CD *cd = cdi->handle; struct packet_command cgc; + /* avoid exceeding the max speed or overflowing integer bounds */ + speed = clamp(speed, 0, 0xffff / 177); + if (speed == 0) speed = 0xffff; /* set to max */ else diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 5a9bcf8e0792..74a6830b7ed8 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -46,7 +46,7 @@ static const char *verstr = "20160209"; #include <linux/uaccess.h> #include <asm/dma.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_dbg.h> @@ -163,9 +163,11 @@ static const char *st_formats[] = { static int debugging = DEBUG; +/* Setting these non-zero may risk recognizing resets */ #define MAX_RETRIES 0 #define MAX_WRITE_RETRIES 0 #define MAX_READY_RETRIES 0 + #define NO_TAPE NOT_READY #define ST_TIMEOUT (900 * HZ) @@ -206,7 +208,6 @@ static int st_remove(struct device *); static struct scsi_driver st_template = { .gendrv = { .name = "st", - .owner = THIS_MODULE, .probe = st_probe, .remove = st_remove, .groups = st_drv_groups, @@ -358,10 +359,18 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) { int result = SRpnt->result; u8 scode; + unsigned int ctr; DEB(const char *stp;) char *name = STp->name; struct st_cmdstatus *cmdstatp; + ctr = scsi_get_ua_por_ctr(STp->device); + if (ctr != STp->por_ctr) { + STp->por_ctr = ctr; + STp->pos_unknown = 1; /* ASC => power on / reset */ + st_printk(KERN_WARNING, STp, "Power on/reset recognized."); + } + if (!result) return 0; @@ -414,10 +423,11 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) if (cmdstatp->have_sense && cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17) STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */ - if (cmdstatp->have_sense && scode == UNIT_ATTENTION && cmdstatp->sense_hdr.asc == 0x29) + if (cmdstatp->have_sense && scode == UNIT_ATTENTION && + cmdstatp->sense_hdr.asc == 0x29 && !STp->pos_unknown) { STp->pos_unknown = 1; /* ASC => power on / reset */ - - STp->pos_unknown |= STp->device->was_reset; + st_printk(KERN_WARNING, STp, "Power on/reset recognized."); + } if (cmdstatp->have_sense && scode == RECOVERED_ERROR @@ -835,6 +845,9 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next) int backspace, result; struct st_partstat *STps; + if (STp->ready != ST_READY) + return 0; + /* * If there was a bus reset, block further access * to this device. @@ -842,8 +855,6 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next) if (STp->pos_unknown) return (-EIO); - if (STp->ready != ST_READY) - return 0; STps = &(STp->ps[STp->partition]); if (STps->rw == ST_WRITING) /* Writing */ return st_flush_write_buffer(STp); @@ -952,7 +963,6 @@ static void reset_state(struct scsi_tape *STp) STp->partition = find_partition(STp); if (STp->partition < 0) STp->partition = 0; - STp->new_partition = STp->partition; } } @@ -969,6 +979,7 @@ static int test_ready(struct scsi_tape *STp, int do_wait) { int attentions, waits, max_wait, scode; int retval = CHKRES_READY, new_session = 0; + unsigned int ctr; unsigned char cmd[MAX_COMMAND_SIZE]; struct st_request *SRpnt = NULL; struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; @@ -991,7 +1002,10 @@ static int test_ready(struct scsi_tape *STp, int do_wait) scode = cmdstatp->sense_hdr.sense_key; if (scode == UNIT_ATTENTION) { /* New media? */ - new_session = 1; + if (cmdstatp->sense_hdr.asc == 0x28) { /* New media */ + new_session = 1; + DEBC_printk(STp, "New tape session."); + } if (attentions < MAX_ATTENTIONS) { attentions++; continue; @@ -1022,11 +1036,23 @@ static int test_ready(struct scsi_tape *STp, int do_wait) } } + ctr = scsi_get_ua_new_media_ctr(STp->device); + if (ctr != STp->new_media_ctr) { + STp->new_media_ctr = ctr; + new_session = 1; + DEBC_printk(STp, "New tape session."); + } + retval = (STp->buffer)->syscall_result; if (!retval) retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY; break; } + if (STp->first_tur) { + /* Don't set pos_unknown right after device recognition */ + STp->pos_unknown = 0; + STp->first_tur = 0; + } if (SRpnt != NULL) st_release_request(SRpnt); @@ -2889,7 +2915,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon timeout = STp->long_timeout * 8; DEBC_printk(STp, "Erasing tape.\n"); - fileno = blkno = at_sm = 0; break; case MTSETBLK: /* Set block length */ case MTSETDENSITY: /* Set tape density */ @@ -2922,14 +2947,17 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon if (cmd_in == MTSETDENSITY) { (STp->buffer)->b_data[4] = arg; STp->density_changed = 1; /* At least we tried ;-) */ + STp->changed_density = arg; } else if (cmd_in == SET_DENS_AND_BLK) (STp->buffer)->b_data[4] = arg >> 24; else (STp->buffer)->b_data[4] = STp->density; if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) { ltmp = arg & MT_ST_BLKSIZE_MASK; - if (cmd_in == MTSETBLK) + if (cmd_in == MTSETBLK) { STp->blksize_changed = 1; /* At least we tried ;-) */ + STp->changed_blksize = arg; + } } else ltmp = STp->block_size; (STp->buffer)->b_data[9] = (ltmp >> 16); @@ -3076,7 +3104,9 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon cmd_in == MTSETDRVBUFFER || cmd_in == SET_DENS_AND_BLK) { if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST && - !(STp->use_pf & PF_TESTED)) { + cmdstatp->sense_hdr.asc == 0x24 && + (STp->device)->scsi_level <= SCSI_2 && + !(STp->use_pf & PF_TESTED)) { /* Try the other possible state of Page Format if not already tried */ STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED; @@ -3506,6 +3536,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) int i, cmd_nr, cmd_type, bt; int retval = 0; unsigned int blk; + bool cmd_mtiocget; struct scsi_tape *STp = file->private_data; struct st_modedef *STm; struct st_partstat *STps; @@ -3619,6 +3650,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) */ if (mtc.mt_op != MTREW && mtc.mt_op != MTOFFL && + mtc.mt_op != MTLOAD && mtc.mt_op != MTRETEN && mtc.mt_op != MTERASE && mtc.mt_op != MTSEEK && @@ -3626,9 +3658,23 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) retval = (-EIO); goto out; } - reset_state(STp); - /* remove this when the midlevel properly clears was_reset */ - STp->device->was_reset = 0; + reset_state(STp); /* Clears pos_unknown */ + + /* Fix the device settings after reset, ignore errors */ + if (mtc.mt_op == MTREW || mtc.mt_op == MTSEEK || + mtc.mt_op == MTEOM) { + if (STp->can_partitions) { + /* STp->new_partition contains the + * latest partition set + */ + STp->partition = 0; + switch_partition(STp); + } + if (STp->density_changed) + st_int_ioctl(STp, MTSETDENSITY, STp->changed_density); + if (STp->blksize_changed) + st_int_ioctl(STp, MTSETBLK, STp->changed_blksize); + } } if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK && @@ -3732,17 +3778,28 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) goto out; } + cmd_mtiocget = cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET); + if ((i = flush_buffer(STp, 0)) < 0) { - retval = i; - goto out; - } - if (STp->can_partitions && - (i = switch_partition(STp)) < 0) { - retval = i; - goto out; + if (cmd_mtiocget && STp->pos_unknown) { + /* flush fails -> modify status accordingly */ + reset_state(STp); + STp->pos_unknown = 1; + } else { /* return error */ + retval = i; + goto out; + } + } else { /* flush_buffer succeeds */ + if (STp->can_partitions) { + i = switch_partition(STp); + if (i < 0) { + retval = i; + goto out; + } + } } - if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) { + if (cmd_mtiocget) { struct mtget mt_status; if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) { @@ -3756,7 +3813,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK); mt_status.mt_blkno = STps->drv_block; mt_status.mt_fileno = STps->drv_file; - if (STp->block_size != 0) { + if (STp->block_size != 0 && mt_status.mt_blkno >= 0) { if (STps->rw == ST_WRITING) mt_status.mt_blkno += (STp->buffer)->buffer_bytes / STp->block_size; @@ -4101,7 +4158,7 @@ static void validate_options(void) */ static int __init st_setup(char *str) { - int i, len, ints[5]; + int i, len, ints[ARRAY_SIZE(parms) + 1]; char *stp; stp = get_options(str, ARRAY_SIZE(ints), ints); @@ -4312,6 +4369,7 @@ static int st_probe(struct device *dev) blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT); tpnt->long_timeout = ST_LONG_TIMEOUT; tpnt->try_dio = try_direct_io; + tpnt->first_tur = 1; for (i = 0; i < ST_NBR_MODES; i++) { STm = &(tpnt->modes[i]); @@ -4362,6 +4420,9 @@ static int st_probe(struct device *dev) goto out_idr_remove; } + tpnt->new_media_ctr = scsi_get_ua_new_media_ctr(SDp); + tpnt->por_ctr = scsi_get_ua_por_ctr(SDp); + dev_set_drvdata(dev, tpnt); @@ -4643,6 +4704,24 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf) } static DEVICE_ATTR_RO(options); +/** + * position_lost_in_reset_show - Value 1 indicates that reads, writes, etc. + * are blocked because a device reset has occurred and no operation positioning + * the tape has been issued. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t position_lost_in_reset_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + struct scsi_tape *STp = STm->tape; + + return sprintf(buf, "%d", STp->pos_unknown); +} +static DEVICE_ATTR_RO(position_lost_in_reset); + /* Support for tape stats */ /** @@ -4827,6 +4906,7 @@ static struct attribute *st_dev_attrs[] = { &dev_attr_default_density.attr, &dev_attr_default_compression.attr, &dev_attr_options.attr, + &dev_attr_position_lost_in_reset.attr, NULL, }; diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h index 7a68eaba7e81..0d7c4b8c2c8a 100644 --- a/drivers/scsi/st.h +++ b/drivers/scsi/st.h @@ -165,17 +165,24 @@ struct scsi_tape { unsigned char compression_changed; unsigned char drv_buffer; unsigned char density; + unsigned char changed_density; unsigned char door_locked; unsigned char autorew_dev; /* auto-rewind device */ unsigned char rew_at_close; /* rewind necessary at close */ unsigned char inited; unsigned char cleaning_req; /* cleaning requested? */ + unsigned char first_tur; /* first TEST UNIT READY */ int block_size; + int changed_blksize; int min_block; int max_block; int recover_count; /* From tape opening */ int recover_reg; /* From last status call */ + /* The saved values of midlevel counters */ + unsigned int new_media_ctr; + unsigned int por_ctr; + #if DEBUG unsigned char write_pending; int nbr_finished; diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 8ffb75be99bc..63ed7f9aaa93 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -334,7 +334,6 @@ struct st_hba { struct st_ccb *wait_ccb; __le32 *scratch; - char work_q_name[20]; struct workqueue_struct *work_q; struct work_struct reset_work; wait_queue_head_t reset_waitq; @@ -585,7 +584,7 @@ static void return_abnormal_state(struct st_hba *hba, int status) spin_unlock_irqrestore(hba->host->host_lock, flags); } static int -stex_slave_config(struct scsi_device *sdev) +stex_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; @@ -1482,14 +1481,14 @@ static const struct scsi_host_template driver_template = { .proc_name = DRV_NAME, .bios_param = stex_biosparam, .queuecommand = stex_queuecommand, - .slave_configure = stex_slave_config, + .sdev_configure = stex_sdev_configure, .eh_abort_handler = stex_abort, .eh_host_reset_handler = stex_reset, .this_id = -1, .dma_boundary = PAGE_SIZE - 1, }; -static struct pci_device_id stex_pci_tbl[] = { +static const struct pci_device_id stex_pci_tbl[] = { /* st_shasta */ { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */ @@ -1795,9 +1794,8 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) hba->pdev = pdev; init_waitqueue_head(&hba->reset_waitq); - snprintf(hba->work_q_name, sizeof(hba->work_q_name), - "stex_wq_%d", host->host_no); - hba->work_q = create_singlethread_workqueue(hba->work_q_name); + hba->work_q = alloc_ordered_workqueue("stex_wq_%d", WQ_MEM_RECLAIM, + host->host_no); if (!hba->work_q) { printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n", pci_name(pdev)); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 7ceb982040a5..2e6b2412d2c9 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -149,6 +149,8 @@ struct hv_fc_wwn_packet { */ static int vmstor_proto_version; +static bool hv_dev_is_fc(struct hv_device *hv_dev); + #define STORVSC_LOGGING_NONE 0 #define STORVSC_LOGGING_ERROR 1 #define STORVSC_LOGGING_WARN 2 @@ -169,6 +171,12 @@ do { \ dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \ } while (0) +#define storvsc_log_ratelimited(dev, level, fmt, ...) \ +do { \ + if (do_logging(level)) \ + dev_warn_ratelimited(&(dev)->device, fmt, ##__VA_ARGS__); \ +} while (0) + struct vmscsi_request { u16 length; u8 srb_status; @@ -768,7 +776,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns) if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || vstor_packet->status != 0) { - dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n", + dev_err(dev, "Failed to create sub-channel: op=%d, host=0x%x\n", vstor_packet->operation, vstor_packet->status); return; } @@ -915,14 +923,13 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc) /* * Allocate state to manage the sub-channels. - * We allocate an array based on the numbers of possible CPUs - * (Hyper-V does not support cpu online/offline). - * This Array will be sparseley populated with unique - * channels - primary + sub-channels. - * We will however populate all the slots to evenly distribute - * the load. + * We allocate an array based on the number of CPU ids. This array + * is initially sparsely populated for the CPUs assigned to channels: + * primary + sub-channels. As I/Os are initiated by different CPUs, + * the slots for all online CPUs are populated to evenly distribute + * the load across all channels. */ - stor_device->stor_chns = kcalloc(num_possible_cpus(), sizeof(void *), + stor_device->stor_chns = kcalloc(nr_cpu_ids, sizeof(void *), GFP_KERNEL); if (stor_device->stor_chns == NULL) return -ENOMEM; @@ -1138,6 +1145,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, * not correctly handle: * INQUIRY command with page code parameter set to 0x80 * MODE_SENSE command with cmd[2] == 0x1c + * MAINTENANCE_IN is not supported by HyperV FC passthrough * * Setup srb and scsi status so this won't be fatal. * We do this so we can distinguish truly fatal failues @@ -1145,7 +1153,9 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, */ if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) || - (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) { + (stor_pkt->vm_srb.cdb[0] == MODE_SENSE) || + (stor_pkt->vm_srb.cdb[0] == MAINTENANCE_IN && + hv_dev_is_fc(device))) { vstor_packet->vm_srb.scsi_status = 0; vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS; } @@ -1172,8 +1182,8 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ? STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR; - storvsc_log(device, loglevel, - "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n", + storvsc_log_ratelimited(device, loglevel, + "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x host 0x%x\n", scsi_cmd_to_rq(request->cmd)->tag, stor_pkt->vm_srb.cdb[0], vstor_packet->vm_srb.scsi_status, @@ -1574,7 +1584,8 @@ static int storvsc_device_alloc(struct scsi_device *sdevice) return 0; } -static int storvsc_device_configure(struct scsi_device *sdevice) +static int storvsc_sdev_configure(struct scsi_device *sdevice, + struct queue_limits *lim) { blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); @@ -1789,6 +1800,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) length = scsi_bufflen(scmnd); payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb; + payload->range.len = 0; payload_sz = 0; if (scsi_sg_count(scmnd)) { @@ -1807,6 +1819,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) return SCSI_MLQUEUE_DEVICE_BUSY; } + payload->rangecount = 1; payload->range.len = length; payload->range.offset = offset_in_hvpg; @@ -1875,8 +1888,8 @@ static struct scsi_host_template scsi_driver = { .eh_host_reset_handler = storvsc_host_reset_handler, .proc_name = "storvsc_host", .eh_timed_out = storvsc_eh_timed_out, - .slave_alloc = storvsc_device_alloc, - .slave_configure = storvsc_device_configure, + .sdev_init = storvsc_device_alloc, + .sdev_configure = storvsc_sdev_configure, .cmd_per_lun = 2048, .this_id = -1, /* Ensure there are no gaps in presented sgls */ diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 4a8cc2e8238e..ca9cd691cc32 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -304,7 +304,7 @@ static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata, sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); #endif - return count; + return count; } @@ -656,8 +656,14 @@ static void __exit sun3_scsi_remove(struct platform_device *pdev) iounmap(ioaddr); } -static struct platform_driver sun3_scsi_driver = { - .remove_new = __exit_p(sun3_scsi_remove), +/* + * sun3_scsi_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver sun3_scsi_driver __refdata = { + .remove = __exit_p(sun3_scsi_remove), .driver = { .name = DRV_MODULE_NAME, }, @@ -666,4 +672,5 @@ static struct platform_driver sun3_scsi_driver = { module_platform_driver_probe(sun3_scsi_driver, sun3_scsi_probe); MODULE_ALIAS("platform:" DRV_MODULE_NAME); +MODULE_DESCRIPTION("Sun3 NCR5380 SCSI controller driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index e20f314cf3e7..365406885b8e 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c @@ -265,7 +265,7 @@ static void esp_sun3x_remove(struct platform_device *dev) static struct platform_driver esp_sun3x_driver = { .probe = esp_sun3x_probe, - .remove_new = esp_sun3x_remove, + .remove = esp_sun3x_remove, .driver = { .name = "sun3x_esp", }, diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c index 5ce6c9d19d1e..aa430501f0c7 100644 --- a/drivers/scsi/sun_esp.c +++ b/drivers/scsi/sun_esp.c @@ -603,7 +603,7 @@ static struct platform_driver esp_sbus_driver = { .of_match_table = esp_match, }, .probe = esp_sbus_probe, - .remove_new = esp_sbus_remove, + .remove = esp_sbus_remove, }; module_platform_driver(esp_sbus_driver); diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index a2560cc807d3..1a6eb72ca281 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -765,7 +765,7 @@ static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags) } } -static int sym53c8xx_slave_alloc(struct scsi_device *sdev) +static int sym53c8xx_sdev_init(struct scsi_device *sdev) { struct sym_hcb *np = sym_get_hcb(sdev->host); struct sym_tcb *tp = &np->target[sdev->id]; @@ -825,7 +825,8 @@ out: /* * Linux entry point for device queue sizing. */ -static int sym53c8xx_slave_configure(struct scsi_device *sdev) +static int sym53c8xx_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct sym_hcb *np = sym_get_hcb(sdev->host); struct sym_tcb *tp = &np->target[sdev->id]; @@ -861,14 +862,14 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev) return 0; } -static void sym53c8xx_slave_destroy(struct scsi_device *sdev) +static void sym53c8xx_sdev_destroy(struct scsi_device *sdev) { struct sym_hcb *np = sym_get_hcb(sdev->host); struct sym_tcb *tp = &np->target[sdev->id]; struct sym_lcb *lp = sym_lp(tp, sdev->lun); unsigned long flags; - /* if slave_alloc returned before allocating a sym_lcb, return */ + /* if sdev_init returned before allocating a sym_lcb, return */ if (!lp) return; @@ -1656,7 +1657,7 @@ static int sym_detach(struct Scsi_Host *shost, struct pci_dev *pdev) struct sym_hcb *np = sym_get_hcb(shost); printk("%s: detaching ...\n", sym_name(np)); - del_timer_sync(&np->s.timer); + timer_delete_sync(&np->s.timer); /* * Reset NCR chip. @@ -1684,9 +1685,9 @@ static const struct scsi_host_template sym2_template = { .info = sym53c8xx_info, .cmd_size = sizeof(struct sym_ucmd), .queuecommand = sym53c8xx_queue_command, - .slave_alloc = sym53c8xx_slave_alloc, - .slave_configure = sym53c8xx_slave_configure, - .slave_destroy = sym53c8xx_slave_destroy, + .sdev_init = sym53c8xx_sdev_init, + .sdev_configure = sym53c8xx_sdev_configure, + .sdev_destroy = sym53c8xx_sdev_destroy, .eh_abort_handler = sym53c8xx_eh_abort_handler, .eh_target_reset_handler = sym53c8xx_eh_target_reset_handler, .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler, @@ -2030,7 +2031,7 @@ static struct spi_function_template sym2_transport_functions = { .get_signalling = sym2_get_signalling, }; -static struct pci_device_id sym2_id_table[] = { +static const struct pci_device_id sym2_id_table[] = { { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820, diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 617eb892f4ad..21ce3e940192 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -29,7 +29,6 @@ #include <scsi/scsi_tcq.h> #include <scsi/scsi_devinfo.h> #include <linux/seqlock.h> -#include <linux/blk-mq-virtio.h> #include "sd.h" @@ -746,7 +745,7 @@ static void virtscsi_map_queues(struct Scsi_Host *shost) if (i == HCTX_TYPE_POLL) blk_mq_map_queues(map); else - blk_mq_virtio_map_queues(map, vscsi->vdev, 2); + blk_mq_map_hw_queues(map, &vscsi->vdev->dev, 2); } } @@ -801,7 +800,7 @@ static const struct scsi_host_template virtscsi_host_template = { .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, .eh_timed_out = virtscsi_eh_timed_out, - .slave_alloc = virtscsi_device_alloc, + .sdev_init = virtscsi_device_alloc, .dma_boundary = UINT_MAX, .map_queues = virtscsi_map_queues, @@ -841,19 +840,16 @@ static int virtscsi_init(struct virtio_device *vdev, int err; u32 i; u32 num_vqs, num_poll_vqs, num_req_vqs; - vq_callback_t **callbacks; - const char **names; + struct virtqueue_info *vqs_info; struct virtqueue **vqs; struct irq_affinity desc = { .pre_vectors = 2 }; num_req_vqs = vscsi->num_queues; num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE; vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL); - callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *), - GFP_KERNEL); - names = kmalloc_array(num_vqs, sizeof(char *), GFP_KERNEL); + vqs_info = kcalloc(num_vqs, sizeof(*vqs_info), GFP_KERNEL); - if (!callbacks || !vqs || !names) { + if (!vqs || !vqs_info) { err = -ENOMEM; goto out; } @@ -869,22 +865,20 @@ static int virtscsi_init(struct virtio_device *vdev, vscsi->io_queues[HCTX_TYPE_READ], vscsi->io_queues[HCTX_TYPE_POLL]); - callbacks[0] = virtscsi_ctrl_done; - callbacks[1] = virtscsi_event_done; - names[0] = "control"; - names[1] = "event"; + vqs_info[0].callback = virtscsi_ctrl_done; + vqs_info[0].name = "control"; + vqs_info[1].callback = virtscsi_event_done; + vqs_info[1].name = "event"; for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs - num_poll_vqs; i++) { - callbacks[i] = virtscsi_req_done; - names[i] = "request"; + vqs_info[i].callback = virtscsi_req_done; + vqs_info[i].name = "request"; } - for (; i < num_vqs; i++) { - callbacks[i] = NULL; - names[i] = "request_poll"; - } + for (; i < num_vqs; i++) + vqs_info[i].name = "request_poll"; /* Discover virtqueues and write information to configuration. */ - err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); + err = virtio_find_vqs(vdev, num_vqs, vqs, vqs_info, &desc); if (err) goto out; @@ -900,8 +894,7 @@ static int virtscsi_init(struct virtio_device *vdev, err = 0; out: - kfree(names); - kfree(callbacks); + kfree(vqs_info); kfree(vqs); if (err) virtscsi_remove_vqs(vdev); @@ -1052,7 +1045,6 @@ static struct virtio_driver virtio_scsi_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtscsi_probe, #ifdef CONFIG_PM_SLEEP diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index f88ecdb93a8a..32242d86cf5b 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -1137,7 +1137,8 @@ static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter) snprintf(name, sizeof(name), "vmw_pvscsi_wq_%u", adapter->host->host_no); - adapter->workqueue = create_singlethread_workqueue(name); + adapter->workqueue = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name); if (!adapter->workqueue) { printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n"); return 0; @@ -1346,7 +1347,7 @@ exit: static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - unsigned int irq_flag = PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY; + unsigned int irq_flag = PCI_IRQ_ALL_TYPES; struct pvscsi_adapter *adapter; struct pvscsi_adapter adapter_temp; struct Scsi_Host *host = NULL; diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index e4fafc77bd20..dd1fef9226f2 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c @@ -831,7 +831,7 @@ wd33c93_intr(struct Scsi_Host *instance) /* construct an IDENTIFY message with correct disconnect bit */ hostdata->outgoing_msg[0] = IDENTIFY(0, cmd->device->lun); - if (scsi_pointer->phase) + if (WD33C93_scsi_pointer(cmd)->phase) hostdata->outgoing_msg[0] |= 0x40; if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) { @@ -1721,9 +1721,7 @@ wd33c93_setup(char *str) p1 = setup_buffer; *p1 = '\0'; if (str) - strncpy(p1, str, SETUP_BUFFER_SIZE - strlen(setup_buffer)); - setup_buffer[SETUP_BUFFER_SIZE - 1] = '\0'; - p1 = setup_buffer; + strscpy(p1, str, SETUP_BUFFER_SIZE); i = 0; while (*p1 && (i < MAX_SETUP_ARGS)) { p2 = strchr(p1, ','); diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 9ec55ddc1204..924025305753 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -735,7 +735,8 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_RESET); } -static int scsifront_sdev_configure(struct scsi_device *sdev) +static int scsifront_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct vscsifrnt_info *info = shost_priv(sdev->host); int err; @@ -776,8 +777,8 @@ static const struct scsi_host_template scsifront_sht = { .queuecommand = scsifront_queuecommand, .eh_abort_handler = scsifront_eh_abort_handler, .eh_device_reset_handler = scsifront_dev_reset_handler, - .slave_configure = scsifront_sdev_configure, - .slave_destroy = scsifront_sdev_destroy, + .sdev_configure = scsifront_sdev_configure, + .sdev_destroy = scsifront_sdev_destroy, .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN, .can_queue = VSCSIIF_MAX_REQS, .this_id = -1, @@ -1074,8 +1075,8 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) continue; /* - * Front device state path, used in slave_configure called - * on successfull scsi_add_device, and in slave_destroy called + * Front device state path, used in sdev_configure called + * on successfull scsi_add_device, and in sdev_destroy called * on remove of a device. */ snprintf(info->dev_state_path, sizeof(info->dev_state_path), diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c index 22d412cab91d..15602ec862e3 100644 --- a/drivers/scsi/zalon.c +++ b/drivers/scsi/zalon.c @@ -139,7 +139,7 @@ zalon_probe(struct parisc_device *dev) return -ENODEV; if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { - dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", + dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n", dev->irq); goto fail; } |