diff options
Diffstat (limited to 'drivers/scsi/smartpqi/smartpqi_init.c')
| -rw-r--r-- | drivers/scsi/smartpqi/smartpqi_init.c | 1339 |
1 files changed, 987 insertions, 352 deletions
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index d0446d4d4465..fe549e2b7c94 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * driver for Microchip PQI-based storage controllers - * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * @@ -19,13 +19,14 @@ #include <linux/bcd.h> #include <linux/reboot.h> #include <linux/cciss_ioctl.h> -#include <linux/blk-mq-pci.h> +#include <linux/crash_dump.h> +#include <linux/string.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_transport_sas.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "smartpqi.h" #include "smartpqi_sis.h" @@ -33,11 +34,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "2.1.20-035" +#define DRIVER_VERSION "2.1.36-026" #define DRIVER_MAJOR 2 #define DRIVER_MINOR 1 -#define DRIVER_RELEASE 20 -#define DRIVER_REVISION 35 +#define DRIVER_RELEASE 36 +#define DRIVER_REVISION 26 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -48,6 +49,8 @@ #define PQI_POST_RESET_DELAY_SECS 5 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10 +#define PQI_NO_COMPLETION ((void *)-1) + MODULE_AUTHOR("Microchip"); MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version " DRIVER_VERSION); @@ -66,6 +69,7 @@ static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd) static void pqi_verify_structures(void); static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); +static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info); static void pqi_ctrl_offline_worker(struct work_struct *work); static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); static void pqi_scan_start(struct Scsi_Host *shost); @@ -90,12 +94,13 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); -static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info); -static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); -static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); +static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size); +static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor); +static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code); static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs); static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); +static void pqi_tmf_worker(struct work_struct *work); /* for flags argument to pqi_submit_raid_request_synchronous() */ #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 @@ -455,6 +460,21 @@ static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device) return device->in_remove; } +static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun) +{ + device->in_reset[lun] = true; +} + +static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun) +{ + device->in_reset[lun] = false; +} + +static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun) +{ + return device->in_reset[lun]; +} + static inline int pqi_event_type_to_event_index(unsigned int event_type) { int index; @@ -519,6 +539,36 @@ static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) writeb(status, ctrl_info->soft_reset_status); } +static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd) +{ + bool io_high_prio; + int priority_class; + + io_high_prio = false; + + if (device->ncq_prio_enable) { + priority_class = + IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd))); + if (priority_class == IOPRIO_CLASS_RT) { + /* Set NCQ priority for read/write commands. */ + switch (scmd->cmnd[0]) { + case WRITE_16: + case READ_16: + case WRITE_12: + case READ_12: + case WRITE_10: + case READ_10: + case WRITE_6: + case READ_6: + io_high_prio = true; + break; + } + } + } + + return io_high_prio; +} + static int pqi_map_single(struct pci_dev *pci_dev, struct pqi_sg_descriptor *sg_descriptor, void *buffer, size_t buffer_length, enum dma_data_direction data_direction) @@ -578,10 +628,6 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, cdb = request->cdb; switch (cmd) { - case TEST_UNIT_READY: - request->data_direction = SOP_READ_FLAG; - cdb[0] = TEST_UNIT_READY; - break; case INQUIRY: request->data_direction = SOP_READ_FLAG; cdb[0] = INQUIRY; @@ -708,7 +754,8 @@ static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info * } } - pqi_reinit_io_request(io_request); + if (io_request) + pqi_reinit_io_request(io_request); return io_request; } @@ -996,9 +1043,8 @@ static int pqi_write_driver_version_to_host_wellness( buffer->driver_version_tag[1] = 'V'; put_unaligned_le16(sizeof(buffer->driver_version), &buffer->driver_version_length); - strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, - sizeof(buffer->driver_version) - 1); - buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; + strscpy(buffer->driver_version, "Linux " DRIVER_VERSION, + sizeof(buffer->driver_version)); buffer->dont_write_tag[0] = 'D'; buffer->dont_write_tag[1] = 'W'; buffer->end_tag[0] = 'Z'; @@ -1176,7 +1222,6 @@ static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **b unsigned int i; u8 rpl_response_format; u32 num_physicals; - size_t rpl_16byte_wwid_list_length; void *rpl_list; struct report_lun_header *rpl_header; struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list; @@ -1205,9 +1250,9 @@ static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **b rpl_8byte_wwid_list = rpl_list; num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]); - rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid)); - rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL); + rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries, + num_physicals), GFP_KERNEL); if (!rpl_16byte_wwid_list) return -ENOMEM; @@ -1259,7 +1304,8 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, "report logical LUNs failed\n"); /* - * Tack the controller itself onto the end of the logical device list. + * Tack the controller itself onto the end of the logical device list + * by adding a list entry that is all zeros. */ logdev_data = *logdev_list; @@ -1464,6 +1510,12 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, if (rc) goto error; + device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats); + if (!device->raid_io_stats) { + rc = -ENOMEM; + goto error; + } + device->raid_map = raid_map; return 0; @@ -1587,6 +1639,7 @@ no_buffer: #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10 +#define PQI_DEVICE_ERASE_IN_PROGRESS 0x10 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, @@ -1635,6 +1688,8 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) & PQI_DEVICE_NCQ_PRIO_SUPPORTED); + device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS); + return 0; } @@ -1680,7 +1735,7 @@ out: /* * Prevent adding drive to OS for some corner cases such as a drive - * undergoing a sanitize operation. Some OSes will continue to poll + * undergoing a sanitize (erase) operation. Some OSes will continue to poll * the drive until the sanitize completes, which can take hours, * resulting in long bootup delays. Commands such as TUR, READ_CAP * are allowed, but READ/WRITE cause check condition. So the OS @@ -1688,73 +1743,9 @@ out: * Note: devices that have completed sanitize must be re-enabled * using the management utility. */ -static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device) +static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device) { - u8 scsi_status; - int rc; - enum dma_data_direction dir; - char *buffer; - int buffer_length = 64; - size_t sense_data_length; - struct scsi_sense_hdr sshdr; - struct pqi_raid_path_request request; - struct pqi_raid_error_info error_info; - bool offline = false; /* Assume keep online */ - - /* Do not check controllers. */ - if (pqi_is_hba_lunid(device->scsi3addr)) - return false; - - /* Do not check LVs. */ - if (pqi_is_logical_device(device)) - return false; - - buffer = kmalloc(buffer_length, GFP_KERNEL); - if (!buffer) - return false; /* Assume not offline */ - - /* Check for SANITIZE in progress using TUR */ - rc = pqi_build_raid_path_request(ctrl_info, &request, - TEST_UNIT_READY, RAID_CTLR_LUNID, buffer, - buffer_length, 0, &dir); - if (rc) - goto out; /* Assume not offline */ - - memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number)); - - rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info); - - if (rc) - goto out; /* Assume not offline */ - - scsi_status = error_info.status; - sense_data_length = get_unaligned_le16(&error_info.sense_data_length); - if (sense_data_length == 0) - sense_data_length = - get_unaligned_le16(&error_info.response_data_length); - if (sense_data_length) { - if (sense_data_length > sizeof(error_info.data)) - sense_data_length = sizeof(error_info.data); - - /* - * Check for sanitize in progress: asc:0x04, ascq: 0x1b - */ - if (scsi_status == SAM_STAT_CHECK_CONDITION && - scsi_normalize_sense(error_info.data, - sense_data_length, &sshdr) && - sshdr.sense_key == NOT_READY && - sshdr.asc == 0x04 && - sshdr.ascq == 0x1b) { - device->device_offline = true; - offline = true; - goto out; /* Keep device offline */ - } - } - -out: - kfree(buffer); - return offline; + return device->erase_in_progress; } static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, @@ -2022,18 +2013,31 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, PQI_DEV_INFO_BUFFER_LENGTH - count, "-:-"); - if (pqi_is_logical_device(device)) + if (pqi_is_logical_device(device)) { count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " %08x%08x", *((u32 *)&device->scsi3addr), *((u32 *)&device->scsi3addr[4])); - else + } else if (ctrl_info->rpl_extended_format_4_5_supported) { + if (device->device_type == SA_DEVICE_TYPE_NVME) + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " %016llx%016llx", + get_unaligned_be64(&device->wwid[0]), + get_unaligned_be64(&device->wwid[8])); + else + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " %016llx", + get_unaligned_be64(&device->wwid[0])); + } else { count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, - " %016llx%016llx", - get_unaligned_be64(&device->wwid[0]), - get_unaligned_be64(&device->wwid[8])); + " %016llx", + get_unaligned_be64(&device->wwid[0])); + } + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " %s %.8s %.16s ", @@ -2109,8 +2113,6 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, if (existing_device->devtype == TYPE_DISK) { existing_device->raid_level = new_device->raid_level; existing_device->volume_status = new_device->volume_status; - if (ctrl_info->logical_volume_rescan_needed) - existing_device->rescan = true; memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group)); if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) { kfree(existing_device->raid_map); @@ -2118,6 +2120,10 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, /* To prevent this from being freed later. */ new_device->raid_map = NULL; } + if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) { + existing_device->raid_io_stats = new_device->raid_io_stats; + new_device->raid_io_stats = NULL; + } existing_device->raid_bypass_configured = new_device->raid_bypass_configured; existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; } @@ -2140,6 +2146,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, static inline void pqi_free_device(struct pqi_scsi_dev *device) { if (device) { + free_percpu(device->raid_io_stats); kfree(device->raid_map); kfree(device); } @@ -2171,6 +2178,29 @@ static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) return device->sdev != NULL; } +static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device) +{ + unsigned int lun; + struct pqi_tmf_work *tmf_work; + + for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) + INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker); +} + +static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device) +{ + if (pqi_device_in_remove(device)) + return false; + + if (device->sdev == NULL) + return false; + + if (!scsi_device_online(device->sdev)) + return false; + + return device->rescan; +} + static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) { @@ -2251,6 +2281,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, list_add_tail(&device->add_list_entry, &add_list); /* To prevent this device structure from being freed later. */ device->keep_device = true; + pqi_init_device_tmf_work(device); } spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); @@ -2287,13 +2318,23 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, * queue depth, device size. */ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { + /* + * Check for queue depth change. + */ if (device->sdev && device->queue_depth != device->advertised_queue_depth) { device->advertised_queue_depth = device->queue_depth; scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); - if (device->rescan) { - scsi_rescan_device(&device->sdev->sdev_gendev); - device->rescan = false; - } + } + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + /* + * Check for changes in the device, such as size. + */ + if (pqi_volume_rescan_needed(device)) { + device->rescan = false; + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + scsi_rescan_device(device->sdev); + } else { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); } } @@ -2314,8 +2355,6 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, } } - ctrl_info->logical_volume_rescan_needed = false; - } static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device) @@ -2347,14 +2386,6 @@ static inline void pqi_mask_device(u8 *scsi3addr) scsi3addr[3] |= 0xc0; } -static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device) -{ - if (pqi_is_logical_device(device)) - return false; - - return (device->path_map & (device->path_map - 1)) != 0; -} - static inline bool pqi_expose_device(struct pqi_scsi_dev *device) { return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); @@ -2498,10 +2529,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) if (!pqi_is_supported_device(device)) continue; - /* Do not present disks that the OS cannot fully probe */ - if (pqi_keep_device_offline(ctrl_info, device)) - continue; - /* Gather information about the device. */ rc = pqi_get_device_info(ctrl_info, device, id_phys); if (rc == -ENOMEM) { @@ -2524,6 +2551,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) continue; } + /* Do not present disks that the OS cannot fully probe. */ + if (pqi_keep_device_offline(device)) + continue; + pqi_assign_bus_target_lun(device); if (device->is_physical_device) { @@ -3237,6 +3268,20 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request) sense_data_length); } + if (pqi_cmd_priv(scmd)->this_residual && + !pqi_is_logical_device(scmd->device->hostdata) && + scsi_status == SAM_STAT_CHECK_CONDITION && + host_byte == DID_OK && + sense_data_length && + scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) && + sshdr.sense_key == ILLEGAL_REQUEST && + sshdr.asc == 0x26 && + sshdr.ascq == 0x0) { + host_byte = DID_NO_CONNECT; + pqi_take_device_offline(scmd->device, "AIO"); + scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1); + } + scmd->result = scsi_status; set_host_byte(scmd, host_byte); } @@ -3251,14 +3296,12 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) int residual_count; int xfer_count; bool device_offline; - struct pqi_scsi_dev *device; scmd = io_request->scmd; error_info = io_request->error_info; host_byte = DID_OK; sense_data_length = 0; device_offline = false; - device = scmd->device->hostdata; switch (error_info->service_response) { case PQI_AIO_SERV_RESPONSE_COMPLETE: @@ -3283,14 +3326,8 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) break; case PQI_AIO_STATUS_AIO_PATH_DISABLED: pqi_aio_path_disabled(io_request); - if (pqi_is_multipath_device(device)) { - pqi_device_remove_start(device); - host_byte = DID_NO_CONNECT; - scsi_status = SAM_STAT_CHECK_CONDITION; - } else { - scsi_status = SAM_STAT_GOOD; - io_request->status = -EAGAIN; - } + scsi_status = SAM_STAT_GOOD; + io_request->status = -EAGAIN; break; case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: case PQI_AIO_STATUS_INVALID_DEVICE: @@ -3364,7 +3401,7 @@ static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_inf case SOP_TMF_REJECTED: rc = -EAGAIN; break; - case SOP_RC_INCORRECT_LOGICAL_UNIT: + case SOP_TMF_INCORRECT_LOGICAL_UNIT: rc = -ENODEV; break; default: @@ -3618,7 +3655,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) ctrl_info->pqi_mode_enabled = false; pqi_save_ctrl_mode(ctrl_info, SIS_MODE); rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); - pqi_ofa_free_host_buffer(ctrl_info); + pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); pqi_ctrl_ofa_done(ctrl_info); dev_info(&ctrl_info->pci_dev->dev, "Online Firmware Activation: %s\n", @@ -3629,7 +3666,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) "Online Firmware Activation ABORTED\n"); if (ctrl_info->soft_reset_handshake_supported) pqi_clear_soft_reset_status(ctrl_info); - pqi_ofa_free_host_buffer(ctrl_info); + pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); pqi_ctrl_ofa_done(ctrl_info); pqi_ofa_ctrl_unquiesce(ctrl_info); break; @@ -3639,7 +3676,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) dev_err(&ctrl_info->pci_dev->dev, "unexpected Online Firmware Activation reset status: 0x%x\n", reset_status); - pqi_ofa_free_host_buffer(ctrl_info); + pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); pqi_ctrl_ofa_done(ctrl_info); pqi_ofa_ctrl_unquiesce(ctrl_info); pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); @@ -3654,8 +3691,8 @@ static void pqi_ofa_memory_alloc_worker(struct work_struct *work) ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); pqi_ctrl_ofa_start(ctrl_info); - pqi_ofa_setup_host_buffer(ctrl_info); - pqi_ofa_host_memory_update(ctrl_info); + pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested); + pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE); } static void pqi_ofa_quiesce_worker(struct work_struct *work) @@ -3695,7 +3732,7 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, dev_info(&ctrl_info->pci_dev->dev, "received Online Firmware Activation cancel request: reason: %u\n", ctrl_info->ofa_cancel_reason); - pqi_ofa_free_host_buffer(ctrl_info); + pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); pqi_ctrl_ofa_done(ctrl_info); break; default: @@ -3708,6 +3745,21 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, return ack_event; } +static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info) +{ + unsigned long flags; + struct pqi_scsi_dev *device; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { + if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) + device->rescan = true; + } + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); +} + static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info) { unsigned long flags; @@ -3748,7 +3800,7 @@ static void pqi_event_worker(struct work_struct *work) ack_event = true; rescan_needed = true; if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE) - ctrl_info->logical_volume_rescan_needed = true; + pqi_mark_volumes_for_rescan(ctrl_info); else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE) pqi_disable_raid_bypass(ctrl_info); } @@ -3774,7 +3826,8 @@ static void pqi_heartbeat_timer_handler(struct timer_list *t) { int num_interrupts; u32 heartbeat_count; - struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); + struct pqi_ctrl_info *ctrl_info = timer_container_of(ctrl_info, t, + heartbeat_timer); pqi_check_ctrl_health(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) @@ -3817,7 +3870,7 @@ static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) { - del_timer_sync(&ctrl_info->heartbeat_timer); + timer_delete_sync(&ctrl_info->heartbeat_timer); } static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, @@ -5014,7 +5067,7 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) } #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ - struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS) + struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS) static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, bool enable_events) @@ -5210,7 +5263,7 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) ctrl_info->error_buffer_length = ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; - if (reset_devices) + if (is_kdump_kernel()) max_transfer_size = min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE_KDUMP); else @@ -5239,18 +5292,17 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) u16 num_elements_per_iq; u16 num_elements_per_oq; - if (reset_devices) { + if (is_kdump_kernel()) { num_queue_groups = 1; } else { - int num_cpus; int max_queue_groups; max_queue_groups = min(ctrl_info->max_inbound_queues / 2, ctrl_info->max_outbound_queues - 1); max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); - num_cpus = num_online_cpus(); - num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); + num_queue_groups = + blk_mq_num_online_queues(ctrl_info->max_msix_vectors); num_queue_groups = min(num_queue_groups, max_queue_groups); } @@ -5503,14 +5555,29 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request, pqi_scsi_done(scmd); } -static int pqi_raid_submit_scsi_cmd_with_io_request( - struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, +/* + * Adjust the timeout value for physical devices sent to the firmware + * by subtracting 3 seconds for timeouts greater than or equal to 8 seconds. + * + * This provides the firmware with additional time to attempt early recovery + * before the OS-level timeout occurs. + */ +#define ADJUST_SECS_TIMEOUT_VALUE(tv) (((tv) >= 8) ? ((tv) - 3) : (tv)) + +static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, - struct pqi_queue_group *queue_group) + struct pqi_queue_group *queue_group, bool io_high_prio) { int rc; + u32 timeout; size_t cdb_length; + struct pqi_io_request *io_request; struct pqi_raid_path_request *request; + struct request *rq; + + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; io_request->io_complete_callback = pqi_raid_io_complete; io_request->scmd = scmd; @@ -5521,6 +5588,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request( request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; + request->command_priority = io_high_prio; put_unaligned_le16(io_request->index, &request->request_id); request->error_index = request->request_id; memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); @@ -5577,6 +5645,12 @@ static int pqi_raid_submit_scsi_cmd_with_io_request( return SCSI_MLQUEUE_HOST_BUSY; } + if (device->is_physical_device) { + rq = scsi_cmd_to_rq(scmd); + timeout = rq->timeout / HZ; + put_unaligned_le32(ADJUST_SECS_TIMEOUT_VALUE(timeout), &request->timeout); + } + pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); return 0; @@ -5586,14 +5660,11 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group) { - struct pqi_io_request *io_request; + bool io_high_prio; - io_request = pqi_alloc_io_request(ctrl_info, scmd); - if (!io_request) - return SCSI_MLQUEUE_HOST_BUSY; + io_high_prio = pqi_is_io_high_priority(device, scmd); - return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, - device, scmd, queue_group); + return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio); } static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) @@ -5638,44 +5709,13 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request, pqi_scsi_done(scmd); } -static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device, struct scsi_cmnd *scmd) -{ - bool io_high_prio; - int priority_class; - - io_high_prio = false; - - if (device->ncq_prio_enable) { - priority_class = - IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd))); - if (priority_class == IOPRIO_CLASS_RT) { - /* Set NCQ priority for read/write commands. */ - switch (scmd->cmnd[0]) { - case WRITE_16: - case READ_16: - case WRITE_12: - case READ_12: - case WRITE_10: - case READ_10: - case WRITE_6: - case READ_6: - io_high_prio = true; - break; - } - } - } - - return io_high_prio; -} - static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group) { bool io_high_prio; - io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd); + io_high_prio = pqi_is_io_high_priority(device, scmd); return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, scmd->cmnd, scmd->cmd_len, queue_group, NULL, @@ -5691,12 +5731,11 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, int rc; struct pqi_io_request *io_request; struct pqi_aio_path_request *request; - struct pqi_scsi_dev *device; - device = scmd->device->hostdata; io_request = pqi_alloc_io_request(ctrl_info, scmd); if (!io_request) return SCSI_MLQUEUE_HOST_BUSY; + io_request->io_complete_callback = pqi_aio_io_complete; io_request->scmd = scmd; io_request->raid_bypass = raid_bypass; @@ -5711,8 +5750,8 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, request->command_priority = io_high_prio; put_unaligned_le16(io_request->index, &request->request_id); request->error_index = request->request_id; - if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) - put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number); + if (!raid_bypass && ctrl_info->multi_lun_device_supported) + put_unaligned_le64(scmd->device->lun << 8, &request->lun_number); if (cdb_length > sizeof(request->cdb)) cdb_length = sizeof(request->cdb); request->cdb_length = cdb_length; @@ -5912,6 +5951,7 @@ static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) { struct pqi_scsi_dev *device; + struct completion *wait; if (!scmd->device) { set_host_byte(scmd, DID_NO_CONNECT); @@ -5925,6 +5965,10 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) } atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]); + + wait = (struct completion *)xchg(&scmd->host_scribble, NULL); + if (wait != PQI_NO_COMPLETION) + complete(wait); } static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, @@ -5936,7 +5980,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, int rc; struct pqi_scsi_dev *device; struct pqi_stream_data *pqi_stream_data; - struct pqi_scsi_dev_raid_map_data rmd; + struct pqi_scsi_dev_raid_map_data rmd = { 0 }; if (!ctrl_info->enable_stream_detection) return false; @@ -5978,6 +6022,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; pqi_stream_data->last_accessed = jiffies; + per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++; return true; } @@ -6010,6 +6055,9 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm u16 hw_queue; struct pqi_queue_group *queue_group; bool raid_bypassed; + u8 lun; + + scmd->host_scribble = PQI_NO_COMPLETION; device = scmd->device->hostdata; @@ -6019,17 +6067,19 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm return 0; } - atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]); + lun = (u8)scmd->device->lun; + + atomic_inc(&device->scsi_cmds_outstanding[lun]); ctrl_info = shost_to_hba(shost); - if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { + if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) { set_host_byte(scmd, DID_NO_CONNECT); pqi_scsi_done(scmd); return 0; } - if (pqi_ctrl_blocked(ctrl_info)) { + if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) { rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } @@ -6051,7 +6101,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { raid_bypassed = true; - atomic_inc(&device->raid_bypass_cnt); + per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++; } } if (!raid_bypassed) @@ -6064,8 +6114,10 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm } out: - if (rc) - atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]); + if (rc) { + scmd->host_scribble = NULL; + atomic_dec(&device->scsi_cmds_outstanding[lun]); + } return rc; } @@ -6159,7 +6211,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) } static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device) + struct pqi_scsi_dev *device, u8 lun) { unsigned int i; unsigned int path; @@ -6186,11 +6238,12 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, continue; scsi_device = scmd->device->hostdata; - if (scsi_device != device) - continue; list_del(&io_request->request_list_entry); - set_host_byte(scmd, DID_RESET); + if (scsi_device == device && (u8)scmd->device->lun == lun) + set_host_byte(scmd, DID_RESET); + else + set_host_byte(scmd, DID_REQUEUE); pqi_free_io_request(io_request); scsi_dma_unmap(scmd); pqi_scsi_done(scmd); @@ -6286,15 +6339,13 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30 -static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) +static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) { int rc; struct pqi_io_request *io_request; DECLARE_COMPLETION_ONSTACK(wait); struct pqi_task_management_request *request; - struct pqi_scsi_dev *device; - device = scmd->device->hostdata; io_request = pqi_alloc_io_request(ctrl_info, NULL); io_request->io_complete_callback = pqi_lun_reset_complete; io_request->context = &wait; @@ -6309,7 +6360,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) - request->ml_device_lun_number = (u8)scmd->device->lun; + request->ml_device_lun_number = lun; request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; if (ctrl_info->tmf_iu_timeout_supported) put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout); @@ -6317,7 +6368,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, io_request); - rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait); + rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait); if (rc == 0) rc = io_request->status; @@ -6331,18 +6382,16 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000) #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000) -static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) +static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) { int reset_rc; int wait_rc; unsigned int retries; unsigned long timeout_msecs; - struct pqi_scsi_dev *device; - device = scmd->device->hostdata; for (retries = 0;;) { - reset_rc = pqi_lun_reset(ctrl_info, scmd); - if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES) + reset_rc = pqi_lun_reset(ctrl_info, device, lun); + if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES) break; msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); } @@ -6350,60 +6399,63 @@ static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct sc timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS : PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS; - wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs); + wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs); if (wait_rc && reset_rc == 0) reset_rc = wait_rc; return reset_rc == 0 ? SUCCESS : FAILED; } -static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) +static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) { int rc; - struct pqi_scsi_dev *device; - device = scmd->device->hostdata; pqi_ctrl_block_requests(ctrl_info); pqi_ctrl_wait_until_quiesced(ctrl_info); - pqi_fail_io_queued_for_device(ctrl_info, device); + pqi_fail_io_queued_for_device(ctrl_info, device, lun); rc = pqi_wait_until_inbound_queues_empty(ctrl_info); + pqi_device_reset_start(device, lun); + pqi_ctrl_unblock_requests(ctrl_info); if (rc) rc = FAILED; else - rc = pqi_lun_reset_with_retries(ctrl_info, scmd); - pqi_ctrl_unblock_requests(ctrl_info); + rc = pqi_lun_reset_with_retries(ctrl_info, device, lun); + pqi_device_reset_done(device, lun); return rc; } -static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) +static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode) { + unsigned long flags; int rc; - struct Scsi_Host *shost; - struct pqi_ctrl_info *ctrl_info; - struct pqi_scsi_dev *device; - - shost = scmd->device->host; - ctrl_info = shost_to_hba(shost); - device = scmd->device->hostdata; mutex_lock(&ctrl_info->lun_reset_mutex); + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + if (pqi_find_scsi_dev(ctrl_info, device->bus, device->target, device->lun) == NULL) { + dev_warn(&ctrl_info->pci_dev->dev, + "skipping reset of scsi %d:%d:%d:%u, device has been removed\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + mutex_unlock(&ctrl_info->lun_reset_mutex); + return 0; + } + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + dev_err(&ctrl_info->pci_dev->dev, - "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n", - shost->host_no, - device->bus, device->target, (u32)scmd->device->lun, - scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff); + "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); pqi_check_ctrl_health(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) rc = FAILED; else - rc = pqi_device_reset(ctrl_info, scmd); + rc = pqi_device_reset(ctrl_info, device, lun); dev_err(&ctrl_info->pci_dev->dev, - "reset of scsi %d:%d:%d:%d: %s\n", - shost->host_no, device->bus, device->target, (u32)scmd->device->lun, + "reset of scsi %d:%d:%d:%u: %s\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, lun, rc == SUCCESS ? "SUCCESS" : "FAILED"); mutex_unlock(&ctrl_info->lun_reset_mutex); @@ -6411,7 +6463,78 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) return rc; } -static int pqi_slave_alloc(struct scsi_device *sdev) +static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) +{ + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + struct pqi_scsi_dev *device; + u8 scsi_opcode; + + shost = scmd->device->host; + ctrl_info = shost_to_hba(shost); + device = scmd->device->hostdata; + scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; + + return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode); +} + +static void pqi_tmf_worker(struct work_struct *work) +{ + struct pqi_tmf_work *tmf_work; + struct scsi_cmnd *scmd; + + tmf_work = container_of(work, struct pqi_tmf_work, work_struct); + scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL); + + pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode); +} + +static int pqi_eh_abort_handler(struct scsi_cmnd *scmd) +{ + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + struct pqi_scsi_dev *device; + struct pqi_tmf_work *tmf_work; + DECLARE_COMPLETION_ONSTACK(wait); + + shost = scmd->device->host; + ctrl_info = shost_to_hba(shost); + device = scmd->device->hostdata; + + dev_err(&ctrl_info->pci_dev->dev, + "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n", + shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); + + if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) { + dev_err(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n", + shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); + scmd->result = DID_RESET << 16; + goto out; + } + + tmf_work = &device->tmf_work[scmd->device->lun]; + + if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) { + tmf_work->ctrl_info = ctrl_info; + tmf_work->device = device; + tmf_work->lun = (u8)scmd->device->lun; + tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; + schedule_work(&tmf_work->work_struct); + } + + wait_for_completion(&wait); + + dev_err(&ctrl_info->pci_dev->dev, + "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n", + shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); + +out: + + return SUCCESS; +} + +static int pqi_sdev_init(struct scsi_device *sdev) { struct pqi_scsi_dev *device; unsigned long flags; @@ -6467,8 +6590,11 @@ static void pqi_map_queues(struct Scsi_Host *shost) { struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); - blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], - ctrl_info->pci_dev, 0); + if (!ctrl_info->disable_managed_interrupts) + blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], + &ctrl_info->pci_dev->dev, 0); + else + blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); } static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) @@ -6476,7 +6602,8 @@ static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; } -static int pqi_slave_configure(struct scsi_device *sdev) +static int pqi_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { int rc = 0; struct pqi_scsi_dev *device; @@ -6492,11 +6619,13 @@ static int pqi_slave_configure(struct scsi_device *sdev) return rc; } -static void pqi_slave_destroy(struct scsi_device *sdev) +static void pqi_sdev_destroy(struct scsi_device *sdev) { struct pqi_ctrl_info *ctrl_info; struct pqi_scsi_dev *device; + struct pqi_tmf_work *tmf_work; int mutex_acquired; + unsigned int lun; unsigned long flags; ctrl_info = shost_to_hba(sdev->host); @@ -6523,8 +6652,13 @@ static void pqi_slave_destroy(struct scsi_device *sdev) mutex_unlock(&ctrl_info->scan_mutex); + for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) + cancel_work_sync(&tmf_work->work_struct); + + mutex_lock(&ctrl_info->lun_reset_mutex); pqi_dev_info(ctrl_info, "removed", device); pqi_free_device(device); + mutex_unlock(&ctrl_info->lun_reset_mutex); } static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) @@ -6532,21 +6666,21 @@ static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *ar struct pci_dev *pci_dev; u32 subsystem_vendor; u32 subsystem_device; - cciss_pci_info_struct pciinfo; + cciss_pci_info_struct pci_info; if (!arg) return -EINVAL; pci_dev = ctrl_info->pci_dev; - pciinfo.domain = pci_domain_nr(pci_dev->bus); - pciinfo.bus = pci_dev->bus->number; - pciinfo.dev_fn = pci_dev->devfn; + pci_info.domain = pci_domain_nr(pci_dev->bus); + pci_info.bus = pci_dev->bus->number; + pci_info.dev_fn = pci_dev->devfn; subsystem_vendor = pci_dev->subsystem_vendor; subsystem_device = pci_dev->subsystem_device; - pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; + pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; - if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) + if (copy_to_user(arg, &pci_info, sizeof(pci_info))) return -EFAULT; return 0; @@ -6677,17 +6811,15 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) } if (iocommand.buf_size > 0) { - kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); - if (!kernel_buffer) - return -ENOMEM; if (iocommand.Request.Type.Direction & XFER_WRITE) { - if (copy_from_user(kernel_buffer, iocommand.buf, - iocommand.buf_size)) { - rc = -EFAULT; - goto out; - } + kernel_buffer = memdup_user(iocommand.buf, + iocommand.buf_size); + if (IS_ERR(kernel_buffer)) + return PTR_ERR(kernel_buffer); } else { - memset(kernel_buffer, 0, iocommand.buf_size); + kernel_buffer = kzalloc(iocommand.buf_size, GFP_KERNEL); + if (!kernel_buffer) + return -ENOMEM; } } @@ -6823,12 +6955,6 @@ static ssize_t pqi_firmware_version_show(struct device *dev, return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); } -static ssize_t pqi_driver_version_show(struct device *dev, - struct device_attribute *attr, char *buffer) -{ - return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP); -} - static ssize_t pqi_serial_number_show(struct device *dev, struct device_attribute *attr, char *buffer) { @@ -6902,7 +7028,7 @@ static ssize_t pqi_lockup_action_store(struct device *dev, char *action_name; char action_name_buffer[32]; - strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); + strscpy(action_name_buffer, buffer, sizeof(action_name_buffer)); action_name = strstrip(action_name_buffer); for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { @@ -6997,7 +7123,8 @@ static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, return count; } -static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); +static DEVICE_STRING_ATTR_RO(driver_version, 0444, + DRIVER_VERSION BUILD_TIMESTAMP); static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); @@ -7014,7 +7141,7 @@ static DEVICE_ATTR(enable_r6_writes, 0644, pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); static struct attribute *pqi_shost_attrs[] = { - &dev_attr_driver_version.attr, + &dev_attr_driver_version.attr.attr, &dev_attr_firmware_version.attr, &dev_attr_model.attr, &dev_attr_serial_number.attr, @@ -7287,7 +7414,8 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, struct scsi_device *sdev; struct pqi_scsi_dev *device; unsigned long flags; - int raid_bypass_cnt; + u64 raid_bypass_cnt; + int cpu; sdev = to_scsi_device(dev); ctrl_info = shost_to_hba(sdev->host); @@ -7303,11 +7431,17 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, return -ENODEV; } - raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt); + raid_bypass_cnt = 0; + + if (device->raid_io_stats) { + for_each_online_cpu(cpu) { + raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt; + } + } spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); - return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt); + return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt); } static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev, @@ -7365,8 +7499,7 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, return -ENODEV; } - if (!device->ncq_prio_support || - !device->is_physical_device) { + if (!device->ncq_prio_support) { spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); return -EINVAL; } @@ -7378,6 +7511,55 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, return strlen(buf); } +static ssize_t pqi_numa_node_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct scsi_device *sdev; + struct pqi_ctrl_info *ctrl_info; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); +} + +static ssize_t pqi_write_stream_cnt_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + u64 write_stream_cnt; + int cpu; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + if (pqi_ctrl_offline(ctrl_info)) + return -ENODEV; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + write_stream_cnt = 0; + + if (device->raid_io_stats) { + for_each_online_cpu(cpu) { + write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt; + } + } + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt); +} + static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); @@ -7387,6 +7569,8 @@ static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); static DEVICE_ATTR(sas_ncq_prio_enable, 0644, pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); +static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); +static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL); static struct attribute *pqi_sdev_attrs[] = { &dev_attr_lunid.attr, @@ -7397,12 +7581,14 @@ static struct attribute *pqi_sdev_attrs[] = { &dev_attr_raid_level.attr, &dev_attr_raid_bypass_cnt.attr, &dev_attr_sas_ncq_prio_enable.attr, + &dev_attr_numa_node.attr, + &dev_attr_write_stream_cnt.attr, NULL }; ATTRIBUTE_GROUPS(pqi_sdev); -static struct scsi_host_template pqi_driver_template = { +static const struct scsi_host_template pqi_driver_template = { .module = THIS_MODULE, .name = DRIVER_NAME_SHORT, .proc_name = DRIVER_NAME_SHORT, @@ -7411,10 +7597,11 @@ static struct scsi_host_template pqi_driver_template = { .scan_finished = pqi_scan_finished, .this_id = -1, .eh_device_reset_handler = pqi_eh_device_reset_handler, + .eh_abort_handler = pqi_eh_abort_handler, .ioctl = pqi_ioctl, - .slave_alloc = pqi_slave_alloc, - .slave_configure = pqi_slave_configure, - .slave_destroy = pqi_slave_destroy, + .sdev_init = pqi_sdev_init, + .sdev_configure = pqi_sdev_configure, + .sdev_destroy = pqi_sdev_destroy, .map_queues = pqi_map_queues, .sdev_groups = pqi_sdev_groups, .shost_groups = pqi_shost_groups, @@ -7715,8 +7902,8 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, features_requested_iomem_addr + (le16_to_cpu(firmware_features->num_elements) * 2) + sizeof(__le16); - writew(PQI_FIRMWARE_FEATURE_MAXIMUM, - host_max_known_feature_iomem_addr); + writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr); + writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1); } return pqi_config_table_update(ctrl_info, @@ -7786,6 +7973,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT: ctrl_info->multi_lun_device_supported = firmware_feature->enabled; break; + case PQI_FIRMWARE_FEATURE_CTRL_LOGGING: + ctrl_info->ctrl_logging_supported = firmware_feature->enabled; + break; } pqi_firmware_feature_status(ctrl_info, firmware_feature); @@ -7891,6 +8081,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = { .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT, .feature_status = pqi_ctrl_update_feature_flags, }, + { + .feature_name = "Controller Data Logging", + .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING, + .feature_status = pqi_ctrl_update_feature_flags, + }, }; static void pqi_process_firmware_features( @@ -7993,6 +8188,7 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) ctrl_info->firmware_triage_supported = false; ctrl_info->rpl_extended_format_4_5_supported = false; ctrl_info->multi_lun_device_supported = false; + ctrl_info->ctrl_logging_supported = false; } static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) @@ -8133,17 +8329,26 @@ static void pqi_perform_lockup_action(void) } } +#define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024) +#define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS) + static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) { int rc; u32 product_id; if (reset_devices) { - if (pqi_is_fw_triage_supported(ctrl_info)) { + if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) { rc = sis_wait_for_fw_triage_completion(ctrl_info); if (rc) return rc; } + if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) { + sis_notify_kdump(ctrl_info); + rc = sis_wait_for_ctrl_logging_completion(ctrl_info); + if (rc) + return rc; + } sis_soft_reset(ctrl_info); ssleep(PQI_POST_RESET_DELAY_SECS); } else { @@ -8189,7 +8394,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) ctrl_info->product_id = (u8)product_id; ctrl_info->product_revision = (u8)(product_id >> 8); - if (reset_devices) { + if (is_kdump_kernel()) { if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) ctrl_info->max_outstanding_requests = @@ -8325,6 +8530,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) if (rc) return rc; + if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) { + pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE); + pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); + } + rc = pqi_get_ctrl_product_details(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, @@ -8509,8 +8719,22 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) return rc; } - if (pqi_ofa_in_progress(ctrl_info)) + if (pqi_ofa_in_progress(ctrl_info)) { pqi_ctrl_unblock_scan(ctrl_info); + if (ctrl_info->ctrl_logging_supported) { + if (!ctrl_info->ctrl_log_memory.host_memory) + pqi_host_setup_buffer(ctrl_info, + &ctrl_info->ctrl_log_memory, + PQI_CTRL_LOG_TOTAL_SIZE, + PQI_CTRL_LOG_MIN_SIZE); + pqi_host_memory_update(ctrl_info, + &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); + } else { + if (ctrl_info->ctrl_log_memory.host_memory) + pqi_host_free_buffer(ctrl_info, + &ctrl_info->ctrl_log_memory); + } + } pqi_scan_scsi_devices(ctrl_info); @@ -8559,7 +8783,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) ctrl_info->iomem_base = ioremap(pci_resource_start( ctrl_info->pci_dev, 0), - sizeof(struct pqi_ctrl_registers)); + pci_resource_len(ctrl_info->pci_dev, 0)); if (!ctrl_info->iomem_base) { dev_err(&ctrl_info->pci_dev->dev, "failed to map memory for controller registers\n"); @@ -8700,6 +8924,7 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) pqi_fail_all_outstanding_requests(ctrl_info); ctrl_info->pqi_mode_enabled = false; } + pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory); pqi_unregister_scsi(ctrl_info); if (ctrl_info->pqi_mode_enabled) pqi_revert_to_sis_mode(ctrl_info); @@ -8725,177 +8950,188 @@ static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) pqi_ctrl_unblock_scan(ctrl_info); } -static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size) +static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) +{ + ssleep(delay_secs); + + return pqi_ctrl_init_resume(ctrl_info); +} + +static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor, + u32 total_size, u32 chunk_size) { int i; u32 sg_count; struct device *dev; - struct pqi_ofa_memory *ofap; + struct pqi_host_memory *host_memory; struct pqi_sg_descriptor *mem_descriptor; dma_addr_t dma_handle; - ofap = ctrl_info->pqi_ofa_mem_virt_addr; - sg_count = DIV_ROUND_UP(total_size, chunk_size); - if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS) + if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS) goto out; - ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); - if (!ctrl_info->pqi_ofa_chunk_virt_addr) + host_memory_descriptor->host_chunk_virt_address = + kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); + if (!host_memory_descriptor->host_chunk_virt_address) goto out; dev = &ctrl_info->pci_dev->dev; + host_memory = host_memory_descriptor->host_memory; for (i = 0; i < sg_count; i++) { - ctrl_info->pqi_ofa_chunk_virt_addr[i] = - dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); - if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) + host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); + if (!host_memory_descriptor->host_chunk_virt_address[i]) goto out_free_chunks; - mem_descriptor = &ofap->sg_descriptor[i]; + mem_descriptor = &host_memory->sg_descriptor[i]; put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); put_unaligned_le32(chunk_size, &mem_descriptor->length); } put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); - put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); - put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated); + put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors); + put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated); return 0; out_free_chunks: while (--i >= 0) { - mem_descriptor = &ofap->sg_descriptor[i]; + mem_descriptor = &host_memory->sg_descriptor[i]; dma_free_coherent(dev, chunk_size, - ctrl_info->pqi_ofa_chunk_virt_addr[i], + host_memory_descriptor->host_chunk_virt_address[i], get_unaligned_le64(&mem_descriptor->address)); } - kfree(ctrl_info->pqi_ofa_chunk_virt_addr); - + kfree(host_memory_descriptor->host_chunk_virt_address); out: return -ENOMEM; } -static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) +static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor, + u32 total_required_size, u32 min_required_size) { - u32 total_size; u32 chunk_size; u32 min_chunk_size; - if (ctrl_info->ofa_bytes_requested == 0) + if (total_required_size == 0 || min_required_size == 0) return 0; - total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); - min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS); + total_required_size = PAGE_ALIGN(total_required_size); + min_required_size = PAGE_ALIGN(min_required_size); + min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS); min_chunk_size = PAGE_ALIGN(min_chunk_size); - for (chunk_size = total_size; chunk_size >= min_chunk_size;) { - if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0) - return 0; - chunk_size /= 2; - chunk_size = PAGE_ALIGN(chunk_size); + while (total_required_size >= min_required_size) { + for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) { + if (pqi_host_alloc_mem(ctrl_info, + host_memory_descriptor, total_required_size, + chunk_size) == 0) + return 0; + chunk_size /= 2; + chunk_size = PAGE_ALIGN(chunk_size); + } + total_required_size /= 2; + total_required_size = PAGE_ALIGN(total_required_size); } return -ENOMEM; } -static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info) +static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor, + u32 total_size, u32 min_size) { struct device *dev; - struct pqi_ofa_memory *ofap; + struct pqi_host_memory *host_memory; dev = &ctrl_info->pci_dev->dev; - ofap = dma_alloc_coherent(dev, sizeof(*ofap), - &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); - if (!ofap) + host_memory = dma_alloc_coherent(dev, sizeof(*host_memory), + &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL); + if (!host_memory) return; - ctrl_info->pqi_ofa_mem_virt_addr = ofap; + host_memory_descriptor->host_memory = host_memory; - if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { - dev_err(dev, - "failed to allocate host buffer for Online Firmware Activation\n"); - dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle); - ctrl_info->pqi_ofa_mem_virt_addr = NULL; + if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor, + total_size, min_size) < 0) { + dev_err(dev, "failed to allocate firmware usable host buffer\n"); + dma_free_coherent(dev, sizeof(*host_memory), host_memory, + host_memory_descriptor->host_memory_dma_handle); + host_memory_descriptor->host_memory = NULL; return; } - - put_unaligned_le16(PQI_OFA_VERSION, &ofap->version); - memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature)); } -static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) +static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor) { unsigned int i; struct device *dev; - struct pqi_ofa_memory *ofap; + struct pqi_host_memory *host_memory; struct pqi_sg_descriptor *mem_descriptor; unsigned int num_memory_descriptors; - ofap = ctrl_info->pqi_ofa_mem_virt_addr; - if (!ofap) + host_memory = host_memory_descriptor->host_memory; + if (!host_memory) return; dev = &ctrl_info->pci_dev->dev; - if (get_unaligned_le32(&ofap->bytes_allocated) == 0) + if (get_unaligned_le32(&host_memory->bytes_allocated) == 0) goto out; - mem_descriptor = ofap->sg_descriptor; - num_memory_descriptors = - get_unaligned_le16(&ofap->num_memory_descriptors); + mem_descriptor = host_memory->sg_descriptor; + num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors); for (i = 0; i < num_memory_descriptors; i++) { dma_free_coherent(dev, get_unaligned_le32(&mem_descriptor[i].length), - ctrl_info->pqi_ofa_chunk_virt_addr[i], + host_memory_descriptor->host_chunk_virt_address[i], get_unaligned_le64(&mem_descriptor[i].address)); } - kfree(ctrl_info->pqi_ofa_chunk_virt_addr); + kfree(host_memory_descriptor->host_chunk_virt_address); out: - dma_free_coherent(dev, sizeof(*ofap), ofap, - ctrl_info->pqi_ofa_mem_dma_handle); - ctrl_info->pqi_ofa_mem_virt_addr = NULL; + dma_free_coherent(dev, sizeof(*host_memory), host_memory, + host_memory_descriptor->host_memory_dma_handle); + host_memory_descriptor->host_memory = NULL; } -static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) +static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor, + u16 function_code) { u32 buffer_length; struct pqi_vendor_general_request request; - struct pqi_ofa_memory *ofap; + struct pqi_host_memory *host_memory; memset(&request, 0, sizeof(request)); request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; - put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, - &request.header.iu_length); - put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, - &request.function_code); - - ofap = ctrl_info->pqi_ofa_mem_virt_addr; - - if (ofap) { - buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) + - get_unaligned_le16(&ofap->num_memory_descriptors) * - sizeof(struct pqi_sg_descriptor); - - put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, - &request.data.ofa_memory_allocation.buffer_address); - put_unaligned_le32(buffer_length, - &request.data.ofa_memory_allocation.buffer_length); + put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); + put_unaligned_le16(function_code, &request.function_code); + + host_memory = host_memory_descriptor->host_memory; + + if (host_memory) { + buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor); + put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address); + put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length); + + if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) { + put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version); + memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature)); + } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) { + put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version); + memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature)); + } } return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); } -static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) -{ - ssleep(delay_secs); - - return pqi_ctrl_init_resume(ctrl_info); -} - static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, .status = SAM_STAT_CHECK_CONDITION, @@ -8943,6 +9179,7 @@ static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) pqi_ctrl_wait_until_quiesced(ctrl_info); pqi_fail_all_outstanding_requests(ctrl_info); pqi_ctrl_unblock_requests(ctrl_info); + pqi_take_ctrl_devices_offline(ctrl_info); } static void pqi_ctrl_offline_worker(struct work_struct *work) @@ -8953,6 +9190,52 @@ static void pqi_ctrl_offline_worker(struct work_struct *work) pqi_take_ctrl_offline_deferred(ctrl_info); } +static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) +{ + char *string; + + switch (ctrl_shutdown_reason) { + case PQI_IQ_NOT_DRAINED_TIMEOUT: + string = "inbound queue not drained timeout"; + break; + case PQI_LUN_RESET_TIMEOUT: + string = "LUN reset timeout"; + break; + case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT: + string = "I/O pending timeout after LUN reset"; + break; + case PQI_NO_HEARTBEAT: + string = "no controller heartbeat detected"; + break; + case PQI_FIRMWARE_KERNEL_NOT_UP: + string = "firmware kernel not ready"; + break; + case PQI_OFA_RESPONSE_TIMEOUT: + string = "OFA response timeout"; + break; + case PQI_INVALID_REQ_ID: + string = "invalid request ID"; + break; + case PQI_UNMATCHED_REQ_ID: + string = "unmatched request ID"; + break; + case PQI_IO_PI_OUT_OF_RANGE: + string = "I/O queue producer index out of range"; + break; + case PQI_EVENT_PI_OUT_OF_RANGE: + string = "event queue producer index out of range"; + break; + case PQI_UNEXPECTED_IU_TYPE: + string = "unexpected IU type"; + break; + default: + string = "unknown reason"; + break; + } + + return string; +} + static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) { @@ -8965,10 +9248,33 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, if (!pqi_disable_ctrl_shutdown) sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason); pci_disable_device(ctrl_info->pci_dev); - dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); + dev_err(&ctrl_info->pci_dev->dev, + "controller offline: reason code 0x%x (%s)\n", + ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason)); schedule_work(&ctrl_info->ctrl_offline_work); } +static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + unsigned long flags; + struct pqi_scsi_dev *device; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { + rc = list_is_last(&device->scsi_device_list_entry, &ctrl_info->scsi_device_list); + if (rc) + continue; + + /* + * Is the sdev pointer NULL? + */ + if (device->sdev) + scsi_device_set_state(device->sdev, SDEV_OFFLINE); + } + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); +} + static void pqi_print_ctrl_info(struct pci_dev *pci_dev, const struct pci_device_id *id) { @@ -9017,6 +9323,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev, "failed to allocate controller info block\n"); return -ENOMEM; } + ctrl_info->numa_node = node; ctrl_info->pci_dev = pci_dev; @@ -9110,7 +9417,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev) rc = pqi_flush_cache(ctrl_info, shutdown_event); if (rc) dev_err(&pci_dev->dev, - "unable to flush controller cache\n"); + "unable to flush controller cache during shutdown\n"); pqi_crash_if_pending_command(ctrl_info); pqi_reset(ctrl_info); @@ -9320,6 +9627,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x0462) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0x1104) }, { @@ -9348,6 +9659,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x1110) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0x8460) }, { @@ -9356,6 +9671,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x8462) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0xc460) }, { @@ -9464,6 +9783,18 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x00a3) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x00a1) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f3a, 0x0104) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x19e5, 0xd227) }, { @@ -9792,6 +10123,34 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4044) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4054) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4084) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4094) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4140) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4240) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4840) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADVANTECH, 0x8312) }, { @@ -9928,6 +10287,18 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0804) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0805) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0806) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1cf2, 0x5445) }, { @@ -9964,6 +10335,18 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x54da) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x54db) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x54dc) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1cf2, 0x0b27) }, { @@ -9984,6 +10367,14 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1018, 0x8238) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f3f, 0x0610) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0220) }, { @@ -9992,10 +10383,30 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0222) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0223) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0224) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0225) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0520) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0521) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0522) }, { @@ -10016,6 +10427,150 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0624) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0625) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0626) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0627) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0628) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1014, 0x0718) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1137, 0x02f8) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1137, 0x02f9) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1137, 0x02fa) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1137, 0x02fe) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1137, 0x02ff) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1137, 0x0300) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ded, 0x3301) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0045) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0046) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0047) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0048) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x004a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x004b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x004c) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x004f) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0051) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0052) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0053) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0054) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x006b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x006c) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x006d) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x006f) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0070) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0071) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0072) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0086) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0087) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0088) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0089) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1e93, 0x1000) }, { @@ -10028,6 +10583,86 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1e93, 0x1005) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1001) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1002) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1003) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1004) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1005) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1006) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1007) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1008) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1009) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x100a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x100b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x100e) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x100f) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1010) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1011) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1043) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1044) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1045) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x00a3) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_ANY_ID, PCI_ANY_ID) }, { 0 } |
